LLVM  10.0.0svn
PPCISelLowering.h
Go to the documentation of this file.
1 //===-- PPCISelLowering.h - PPC32 DAG Lowering Interface --------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that PPC uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
15 #define LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
16 
17 #include "PPCInstrInfo.h"
25 #include "llvm/IR/Attributes.h"
26 #include "llvm/IR/CallingConv.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/InlineAsm.h"
29 #include "llvm/IR/Metadata.h"
30 #include "llvm/IR/Type.h"
32 #include <utility>
33 
34 namespace llvm {
35 
36  namespace PPCISD {
37 
38  // When adding a NEW PPCISD node please add it to the correct position in
39  // the enum. The order of elements in this enum matters!
40  // Values that are added after this entry:
41  // STBRX = ISD::FIRST_TARGET_MEMORY_OPCODE
42  // are considered memory opcodes and are treated differently than entries
43  // that come before it. For example, ADD or MUL should be placed before
44  // the ISD::FIRST_TARGET_MEMORY_OPCODE while a LOAD or STORE should come
45  // after it.
46  enum NodeType : unsigned {
47  // Start the numbering where the builtin ops and target ops leave off.
49 
50  /// FSEL - Traditional three-operand fsel node.
51  ///
53 
54  /// FCFID - The FCFID instruction, taking an f64 operand and producing
55  /// and f64 value containing the FP representation of the integer that
56  /// was temporarily in the f64 operand.
58 
59  /// Newer FCFID[US] integer-to-floating-point conversion instructions for
60  /// unsigned integers and single-precision outputs.
62 
63  /// FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64
64  /// operand, producing an f64 value containing the integer representation
65  /// of that FP value.
67 
68  /// Newer FCTI[D,W]UZ floating-point-to-integer conversion instructions for
69  /// unsigned integers with round toward zero.
71 
72  /// Floating-point-to-interger conversion instructions
74 
75  /// VEXTS, ByteWidth - takes an input in VSFRC and produces an output in
76  /// VSFRC that is sign-extended from ByteWidth to a 64-byte integer.
78 
79  /// SExtVElems, takes an input vector of a smaller type and sign
80  /// extends to an output vector of a larger type.
82 
83  /// Reciprocal estimate instructions (unary FP ops).
85 
86  // VMADDFP, VNMSUBFP - The VMADDFP and VNMSUBFP instructions, taking
87  // three v4f32 operands and producing a v4f32 result.
89 
90  /// VPERM - The PPC VPERM Instruction.
91  ///
93 
94  /// XXSPLT - The PPC VSX splat instructions
95  ///
97 
98  /// VECINSERT - The PPC vector insert instruction
99  ///
101 
102  /// XXREVERSE - The PPC VSX reverse instruction
103  ///
105 
106  /// VECSHL - The PPC vector shift left instruction
107  ///
109 
110  /// XXPERMDI - The PPC XXPERMDI instruction
111  ///
113 
114  /// The CMPB instruction (takes two operands of i32 or i64).
116 
117  /// Hi/Lo - These represent the high and low 16-bit parts of a global
118  /// address respectively. These nodes have two operands, the first of
119  /// which must be a TargetGlobalAddress, and the second of which must be a
120  /// Constant. Selected naively, these turn into 'lis G+C' and 'li G+C',
121  /// though these are usually folded into other nodes.
122  Hi, Lo,
123 
124  /// The following two target-specific nodes are used for calls through
125  /// function pointers in the 64-bit SVR4 ABI.
126 
127  /// OPRC, CHAIN = DYNALLOC(CHAIN, NEGSIZE, FRAME_INDEX)
128  /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to
129  /// compute an allocation on the stack.
131 
132  /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to
133  /// compute an offset from native SP to the address of the most recent
134  /// dynamic alloca.
136 
137  /// GlobalBaseReg - On Darwin, this node represents the result of the mflr
138  /// at function entry, used for PIC code.
140 
141  /// These nodes represent PPC shifts.
142  ///
143  /// For scalar types, only the last `n + 1` bits of the shift amounts
144  /// are used, where n is log2(sizeof(element) * 8). See sld/slw, etc.
145  /// for exact behaviors.
146  ///
147  /// For vector types, only the last n bits are used. See vsld.
149 
150  /// EXTSWSLI = The PPC extswsli instruction, which does an extend-sign
151  /// word and shift left immediate.
153 
154  /// The combination of sra[wd]i and addze used to implemented signed
155  /// integer division by a power of 2. The first operand is the dividend,
156  /// and the second is the constant shift amount (representing the
157  /// divisor).
159 
160  /// CALL - A direct function call.
161  /// CALL_NOP is a call with the special NOP which follows 64-bit
162  /// SVR4 calls and 32-bit/64-bit AIX calls.
164 
165  /// CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a
166  /// MTCTR instruction.
168 
169  /// CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a
170  /// BCTRL instruction.
172 
173  /// CHAIN,FLAG = BCTRL(CHAIN, ADDR, INFLAG) - The combination of a bctrl
174  /// instruction and the TOC reload required on SVR4 PPC64.
176 
177  /// Return with a flag operand, matched by 'blr'
179 
180  /// R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction.
181  /// This copies the bits corresponding to the specified CRREG into the
182  /// resultant GPR. Bits corresponding to other CR regs are undefined.
184 
185  /// Direct move from a VSX register to a GPR
187 
188  /// Direct move from a GPR to a VSX register (algebraic)
190 
191  /// Direct move from a GPR to a VSX register (zero)
193 
194  /// Direct move of 2 consecutive GPR to a VSX register.
196 
197  /// BUILD_SPE64 and EXTRACT_SPE are analogous to BUILD_PAIR and
198  /// EXTRACT_ELEMENT but take f64 arguments instead of i64, as i64 is
199  /// unsupported for this target.
200  /// Merge 2 GPRs to a single SPE register.
202 
203  /// Extract SPE register component, second argument is high or low.
205 
206  /// Extract a subvector from signed integer vector and convert to FP.
207  /// It is primarily used to convert a (widened) illegal integer vector
208  /// type to a legal floating point vector type.
209  /// For example v2i32 -> widened to v4i32 -> v2f64
211 
212  /// Extract a subvector from unsigned integer vector and convert to FP.
213  /// As with SINT_VEC_TO_FP, used for converting illegal types.
215 
216  // FIXME: Remove these once the ANDI glue bug is fixed:
217  /// i1 = ANDIo_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the
218  /// eq or gt bit of CR0 after executing andi. x, 1. This is used to
219  /// implement truncation of i32 or i64 to i1.
221 
222  // READ_TIME_BASE - A read of the 64-bit time-base register on a 32-bit
223  // target (returns (Lo, Hi)). It takes a chain operand.
225 
226  // EH_SJLJ_SETJMP - SjLj exception handling setjmp.
228 
229  // EH_SJLJ_LONGJMP - SjLj exception handling longjmp.
231 
232  /// RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP*
233  /// instructions. For lack of better number, we use the opcode number
234  /// encoding for the OPC field to identify the compare. For example, 838
235  /// is VCMPGTSH.
237 
238  /// RESVEC, OUTFLAG = VCMPo(LHS, RHS, OPC) - Represents one of the
239  /// altivec VCMP*o instructions. For lack of better number, we use the
240  /// opcode number encoding for the OPC field to identify the compare. For
241  /// example, 838 is VCMPGTSH.
243 
244  /// CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This
245  /// corresponds to the COND_BRANCH pseudo instruction. CRRC is the
246  /// condition register to branch on, OPC is the branch opcode to use (e.g.
247  /// PPC::BLE), DESTBB is the destination block to branch to, and INFLAG is
248  /// an optional input flag argument.
250 
251  /// CHAIN = BDNZ CHAIN, DESTBB - These are used to create counter-based
252  /// loops.
254 
255  /// F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding
256  /// towards zero. Used only as part of the long double-to-int
257  /// conversion sequence.
259 
260  /// F8RC = MFFS - This moves the FPSCR (not modeled) into the register.
262 
263  /// TC_RETURN - A tail call return.
264  /// operand #0 chain
265  /// operand #1 callee (register or absolute)
266  /// operand #2 stack adjustment
267  /// operand #3 optional in flag
269 
270  /// ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls
273 
274  /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by initial-exec TLS
275  /// for non-position independent code on PPC32.
277 
278  /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by general dynamic and
279  /// local dynamic TLS and position indendepent code on PPC32.
281 
282  /// G8RC = ADDIS_GOT_TPREL_HA %x2, Symbol - Used by the initial-exec
283  /// TLS model, produces an ADDIS8 instruction that adds the GOT
284  /// base to sym\@got\@tprel\@ha.
286 
287  /// G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec
288  /// TLS model, produces a LD instruction with base register G8RReg
289  /// and offset sym\@got\@tprel\@l. This completes the addition that
290  /// finds the offset of "sym" relative to the thread pointer.
292 
293  /// G8RC = ADD_TLS G8RReg, Symbol - Used by the initial-exec TLS
294  /// model, produces an ADD instruction that adds the contents of
295  /// G8RReg to the thread pointer. Symbol contains a relocation
296  /// sym\@tls which is to be replaced by the thread pointer and
297  /// identifies to the linker that the instruction is part of a
298  /// TLS sequence.
300 
301  /// G8RC = ADDIS_TLSGD_HA %x2, Symbol - For the general-dynamic TLS
302  /// model, produces an ADDIS8 instruction that adds the GOT base
303  /// register to sym\@got\@tlsgd\@ha.
305 
306  /// %x3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS
307  /// model, produces an ADDI8 instruction that adds G8RReg to
308  /// sym\@got\@tlsgd\@l and stores the result in X3. Hidden by
309  /// ADDIS_TLSGD_L_ADDR until after register assignment.
311 
312  /// %x3 = GET_TLS_ADDR %x3, Symbol - For the general-dynamic TLS
313  /// model, produces a call to __tls_get_addr(sym\@tlsgd). Hidden by
314  /// ADDIS_TLSGD_L_ADDR until after register assignment.
316 
317  /// G8RC = ADDI_TLSGD_L_ADDR G8RReg, Symbol, Symbol - Op that
318  /// combines ADDI_TLSGD_L and GET_TLS_ADDR until expansion following
319  /// register assignment.
321 
322  /// G8RC = ADDIS_TLSLD_HA %x2, Symbol - For the local-dynamic TLS
323  /// model, produces an ADDIS8 instruction that adds the GOT base
324  /// register to sym\@got\@tlsld\@ha.
326 
327  /// %x3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS
328  /// model, produces an ADDI8 instruction that adds G8RReg to
329  /// sym\@got\@tlsld\@l and stores the result in X3. Hidden by
330  /// ADDIS_TLSLD_L_ADDR until after register assignment.
332 
333  /// %x3 = GET_TLSLD_ADDR %x3, Symbol - For the local-dynamic TLS
334  /// model, produces a call to __tls_get_addr(sym\@tlsld). Hidden by
335  /// ADDIS_TLSLD_L_ADDR until after register assignment.
337 
338  /// G8RC = ADDI_TLSLD_L_ADDR G8RReg, Symbol, Symbol - Op that
339  /// combines ADDI_TLSLD_L and GET_TLSLD_ADDR until expansion
340  /// following register assignment.
342 
343  /// G8RC = ADDIS_DTPREL_HA %x3, Symbol - For the local-dynamic TLS
344  /// model, produces an ADDIS8 instruction that adds X3 to
345  /// sym\@dtprel\@ha.
347 
348  /// G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS
349  /// model, produces an ADDI8 instruction that adds G8RReg to
350  /// sym\@got\@dtprel\@l.
352 
353  /// VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded
354  /// during instruction selection to optimize a BUILD_VECTOR into
355  /// operations on splats. This is necessary to avoid losing these
356  /// optimizations due to constant folding.
358 
359  /// CHAIN = SC CHAIN, Imm128 - System call. The 7-bit unsigned
360  /// operand identifies the operating system entry point.
361  SC,
362 
363  /// CHAIN = CLRBHRB CHAIN - Clear branch history rolling buffer.
365 
366  /// GPRC, CHAIN = MFBHRBE CHAIN, Entry, Dummy - Move from branch
367  /// history rolling buffer entry.
369 
370  /// CHAIN = RFEBB CHAIN, State - Return from event-based branch.
372 
373  /// VSRC, CHAIN = XXSWAPD CHAIN, VSRC - Occurs only for little
374  /// endian. Maps to an xxswapd instruction that corrects an lxvd2x
375  /// or stxvd2x instruction. The chain is necessary because the
376  /// sequence replaces a load and needs to provide the same number
377  /// of outputs.
379 
380  /// An SDNode for swaps that are not associated with any loads/stores
381  /// and thereby have no chain.
383 
384  /// An SDNode for Power9 vector absolute value difference.
385  /// operand #0 vector
386  /// operand #1 vector
387  /// operand #2 constant i32 0 or 1, to indicate whether needs to patch
388  /// the most significant bit for signed i32
389  ///
390  /// Power9 VABSD* instructions are designed to support unsigned integer
391  /// vectors (byte/halfword/word), if we want to make use of them for signed
392  /// integer vectors, we have to flip their sign bits first. To flip sign bit
393  /// for byte/halfword integer vector would become inefficient, but for word
394  /// integer vector, we can leverage XVNEGSP to make it efficiently. eg:
395  /// abs(sub(a,b)) => VABSDUW(a+0x80000000, b+0x80000000)
396  /// => VABSDUW((XVNEGSP a), (XVNEGSP b))
398 
399  /// QVFPERM = This corresponds to the QPX qvfperm instruction.
401 
402  /// QVGPCI = This corresponds to the QPX qvgpci instruction.
404 
405  /// QVALIGNI = This corresponds to the QPX qvaligni instruction.
407 
408  /// QVESPLATI = This corresponds to the QPX qvesplati instruction.
410 
411  /// QBFLT = Access the underlying QPX floating-point boolean
412  /// representation.
414 
415  /// FP_EXTEND_HALF(VECTOR, IDX) - Custom extend upper (IDX=0) half or
416  /// lower (IDX=1) half of v4f32 to v2f64.
418 
419  /// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a
420  /// byte-swapping store instruction. It byte-swaps the low "Type" bits of
421  /// the GPRC input, then stores it through Ptr. Type can be either i16 or
422  /// i32.
424 
425  /// GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a
426  /// byte-swapping load instruction. It loads "Type" bits, byte swaps it,
427  /// then puts it in the bottom bits of the GPRC. TYPE can be either i16
428  /// or i32.
430 
431  /// STFIWX - The STFIWX instruction. The first operand is an input token
432  /// chain, then an f64 value to store, then an address to store it to.
434 
435  /// GPRC, CHAIN = LFIWAX CHAIN, Ptr - This is a floating-point
436  /// load which sign-extends from a 32-bit integer value into the
437  /// destination 64-bit register.
439 
440  /// GPRC, CHAIN = LFIWZX CHAIN, Ptr - This is a floating-point
441  /// load which zero-extends from a 32-bit integer value into the
442  /// destination 64-bit register.
444 
445  /// GPRC, CHAIN = LXSIZX, CHAIN, Ptr, ByteWidth - This is a load of an
446  /// integer smaller than 64 bits into a VSR. The integer is zero-extended.
447  /// This can be used for converting loaded integers to floating point.
449 
450  /// STXSIX - The STXSI[bh]X instruction. The first operand is an input
451  /// chain, then an f64 value to store, then an address to store it to,
452  /// followed by a byte-width for the store.
454 
455  /// VSRC, CHAIN = LXVD2X_LE CHAIN, Ptr - Occurs only for little endian.
456  /// Maps directly to an lxvd2x instruction that will be followed by
457  /// an xxswapd.
459 
460  /// VSRC, CHAIN = LOAD_VEC_BE CHAIN, Ptr - Occurs only for little endian.
461  /// Maps directly to one of lxvd2x/lxvw4x/lxvh8x/lxvb16x depending on
462  /// the vector type to load vector in big-endian element order.
464 
465  /// VSRC, CHAIN = LD_VSX_LH CHAIN, Ptr - This is a floating-point load of a
466  /// v2f32 value into the lower half of a VSR register.
468 
469  /// VSRC, CHAIN = LD_SPLAT, CHAIN, Ptr - a splatting load memory
470  /// instructions such as LXVDSX, LXVWSX.
472 
473  /// CHAIN = STXVD2X CHAIN, VSRC, Ptr - Occurs only for little endian.
474  /// Maps directly to an stxvd2x instruction that will be preceded by
475  /// an xxswapd.
477 
478  /// CHAIN = STORE_VEC_BE CHAIN, VSRC, Ptr - Occurs only for little endian.
479  /// Maps directly to one of stxvd2x/stxvw4x/stxvh8x/stxvb16x depending on
480  /// the vector type to store vector in big-endian element order.
482 
483  /// Store scalar integers from VSR.
485 
486  /// QBRC, CHAIN = QVLFSb CHAIN, Ptr
487  /// The 4xf32 load used for v4i1 constants.
489 
490  /// ATOMIC_CMP_SWAP - the exact same as the target-independent nodes
491  /// except they ensure that the compare input is zero-extended for
492  /// sub-word versions because the atomic loads zero-extend.
494 
495  /// GPRC = TOC_ENTRY GA, TOC
496  /// Loads the entry for GA from the TOC, where the TOC base is given by
497  /// the last operand.
499  };
500 
501  } // end namespace PPCISD
502 
503  /// Define some predicates that are used for node matching.
504  namespace PPC {
505 
506  /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
507  /// VPKUHUM instruction.
508  bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
509  SelectionDAG &DAG);
510 
511  /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
512  /// VPKUWUM instruction.
513  bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
514  SelectionDAG &DAG);
515 
516  /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
517  /// VPKUDUM instruction.
518  bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
519  SelectionDAG &DAG);
520 
521  /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
522  /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
523  bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
524  unsigned ShuffleKind, SelectionDAG &DAG);
525 
526  /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
527  /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
528  bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
529  unsigned ShuffleKind, SelectionDAG &DAG);
530 
531  /// isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for
532  /// a VMRGEW or VMRGOW instruction
533  bool isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
534  unsigned ShuffleKind, SelectionDAG &DAG);
535  /// isXXSLDWIShuffleMask - Return true if this is a shuffle mask suitable
536  /// for a XXSLDWI instruction.
537  bool isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
538  bool &Swap, bool IsLE);
539 
540  /// isXXBRHShuffleMask - Return true if this is a shuffle mask suitable
541  /// for a XXBRH instruction.
543 
544  /// isXXBRWShuffleMask - Return true if this is a shuffle mask suitable
545  /// for a XXBRW instruction.
547 
548  /// isXXBRDShuffleMask - Return true if this is a shuffle mask suitable
549  /// for a XXBRD instruction.
551 
552  /// isXXBRQShuffleMask - Return true if this is a shuffle mask suitable
553  /// for a XXBRQ instruction.
555 
556  /// isXXPERMDIShuffleMask - Return true if this is a shuffle mask suitable
557  /// for a XXPERMDI instruction.
558  bool isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
559  bool &Swap, bool IsLE);
560 
561  /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the
562  /// shift amount, otherwise return -1.
563  int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
564  SelectionDAG &DAG);
565 
566  /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
567  /// specifies a splat of a single element that is suitable for input to
568  /// VSPLTB/VSPLTH/VSPLTW.
569  bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize);
570 
571  /// isXXINSERTWMask - Return true if this VECTOR_SHUFFLE can be handled by
572  /// the XXINSERTW instruction introduced in ISA 3.0. This is essentially any
573  /// shuffle of v4f32/v4i32 vectors that just inserts one element from one
574  /// vector into the other. This function will also set a couple of
575  /// output parameters for how much the source vector needs to be shifted and
576  /// what byte number needs to be specified for the instruction to put the
577  /// element in the desired location of the target vector.
578  bool isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
579  unsigned &InsertAtByte, bool &Swap, bool IsLE);
580 
581  /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is
582  /// appropriate for PPC mnemonics (which have a big endian bias - namely
583  /// elements are counted from the left of the vector register).
584  unsigned getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize,
585  SelectionDAG &DAG);
586 
587  /// get_VSPLTI_elt - If this is a build_vector of constants which can be
588  /// formed by using a vspltis[bhw] instruction of the specified element
589  /// size, return the constant being splatted. The ByteSize field indicates
590  /// the number of bytes of each element [124] -> [bhw].
591  SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG);
592 
593  /// If this is a qvaligni shuffle mask, return the shift
594  /// amount, otherwise return -1.
596 
597  } // end namespace PPC
598 
600  const PPCSubtarget &Subtarget;
601 
602  public:
603  explicit PPCTargetLowering(const PPCTargetMachine &TM,
604  const PPCSubtarget &STI);
605 
606  /// getTargetNodeName() - This method returns the name of a target specific
607  /// DAG node.
608  const char *getTargetNodeName(unsigned Opcode) const override;
609 
610  bool isSelectSupported(SelectSupportKind Kind) const override {
611  // PowerPC does not support scalar condition selects on vectors.
612  return (Kind != SelectSupportKind::ScalarCondVectorVal);
613  }
614 
615  /// getPreferredVectorAction - The code we generate when vector types are
616  /// legalized by promoting the integer element type is often much worse
617  /// than code we generate if we widen the type for applicable vector types.
618  /// The issue with promoting is that the vector is scalaraized, individual
619  /// elements promoted and then the vector is rebuilt. So say we load a pair
620  /// of v4i8's and shuffle them. This will turn into a mess of 8 extending
621  /// loads, moves back into VSR's (or memory ops if we don't have moves) and
622  /// then the VPERM for the shuffle. All in all a very slow sequence.
624  const override {
625  if (VT.getScalarSizeInBits() % 8 == 0)
626  return TypeWidenVector;
628  }
629 
630  bool useSoftFloat() const override;
631 
632  bool hasSPE() const;
633 
634  MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override {
635  return MVT::i32;
636  }
637 
638  bool isCheapToSpeculateCttz() const override {
639  return true;
640  }
641 
642  bool isCheapToSpeculateCtlz() const override {
643  return true;
644  }
645 
646  bool isCtlzFast() const override {
647  return true;
648  }
649 
650  bool hasAndNotCompare(SDValue) const override {
651  return true;
652  }
653 
654  bool preferIncOfAddToSubOfNot(EVT VT) const override;
655 
656  bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
657  return VT.isScalarInteger();
658  }
659 
660  bool supportSplitCSR(MachineFunction *MF) const override {
661  return
663  MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
664  }
665 
666  void initializeSplitCSR(MachineBasicBlock *Entry) const override;
667 
668  void insertCopiesSplitCSR(
669  MachineBasicBlock *Entry,
670  const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
671 
672  /// getSetCCResultType - Return the ISD::SETCC ValueType
673  EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
674  EVT VT) const override;
675 
676  /// Return true if target always beneficiates from combining into FMA for a
677  /// given value type. This must typically return false on targets where FMA
678  /// takes more cycles to execute than FADD.
679  bool enableAggressiveFMAFusion(EVT VT) const override;
680 
681  /// getPreIndexedAddressParts - returns true by value, base pointer and
682  /// offset pointer and addressing mode by reference if the node's address
683  /// can be legally represented as pre-indexed load / store address.
684  bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
685  SDValue &Offset,
687  SelectionDAG &DAG) const override;
688 
689  /// SelectAddressEVXRegReg - Given the specified addressed, check to see if
690  /// it can be more efficiently represented as [r+imm].
691  bool SelectAddressEVXRegReg(SDValue N, SDValue &Base, SDValue &Index,
692  SelectionDAG &DAG) const;
693 
694  /// SelectAddressRegReg - Given the specified addressed, check to see if it
695  /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment
696  /// is non-zero, only accept displacement which is not suitable for [r+imm].
697  /// Returns false if it can be represented by [r+imm], which are preferred.
698  bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index,
699  SelectionDAG &DAG,
700  unsigned EncodingAlignment = 0) const;
701 
702  /// SelectAddressRegImm - Returns true if the address N can be represented
703  /// by a base register plus a signed 16-bit displacement [r+imm], and if it
704  /// is not better represented as reg+reg. If \p EncodingAlignment is
705  /// non-zero, only accept displacements suitable for instruction encoding
706  /// requirement, i.e. multiples of 4 for DS form.
707  bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base,
708  SelectionDAG &DAG,
709  unsigned EncodingAlignment) const;
710 
711  /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
712  /// represented as an indexed [r+r] operation.
713  bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index,
714  SelectionDAG &DAG) const;
715 
716  Sched::Preference getSchedulingPreference(SDNode *N) const override;
717 
718  /// LowerOperation - Provide custom lowering hooks for some operations.
719  ///
720  SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
721 
722  /// ReplaceNodeResults - Replace the results of node with an illegal result
723  /// type with new values built out of custom code.
724  ///
725  void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
726  SelectionDAG &DAG) const override;
727 
728  SDValue expandVSXLoadForLE(SDNode *N, DAGCombinerInfo &DCI) const;
729  SDValue expandVSXStoreForLE(SDNode *N, DAGCombinerInfo &DCI) const;
730 
731  SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
732 
733  SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
734  SmallVectorImpl<SDNode *> &Created) const override;
735 
736  Register getRegisterByName(const char* RegName, EVT VT,
737  const MachineFunction &MF) const override;
738 
739  void computeKnownBitsForTargetNode(const SDValue Op,
740  KnownBits &Known,
741  const APInt &DemandedElts,
742  const SelectionDAG &DAG,
743  unsigned Depth = 0) const override;
744 
745  Align getPrefLoopAlignment(MachineLoop *ML) const override;
746 
747  bool shouldInsertFencesForAtomic(const Instruction *I) const override {
748  return true;
749  }
750 
751  Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst,
752  AtomicOrdering Ord) const override;
753  Instruction *emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst,
754  AtomicOrdering Ord) const override;
755 
757  EmitInstrWithCustomInserter(MachineInstr &MI,
758  MachineBasicBlock *MBB) const override;
759  MachineBasicBlock *EmitAtomicBinary(MachineInstr &MI,
760  MachineBasicBlock *MBB,
761  unsigned AtomicSize,
762  unsigned BinOpcode,
763  unsigned CmpOpcode = 0,
764  unsigned CmpPred = 0) const;
765  MachineBasicBlock *EmitPartwordAtomicBinary(MachineInstr &MI,
766  MachineBasicBlock *MBB,
767  bool is8bit,
768  unsigned Opcode,
769  unsigned CmpOpcode = 0,
770  unsigned CmpPred = 0) const;
771 
772  MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr &MI,
773  MachineBasicBlock *MBB) const;
774 
775  MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr &MI,
776  MachineBasicBlock *MBB) const;
777 
778  ConstraintType getConstraintType(StringRef Constraint) const override;
779 
780  /// Examine constraint string and operand type and determine a weight value.
781  /// The operand object must already have been set up with the operand type.
782  ConstraintWeight getSingleConstraintMatchWeight(
783  AsmOperandInfo &info, const char *constraint) const override;
784 
785  std::pair<unsigned, const TargetRegisterClass *>
786  getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
787  StringRef Constraint, MVT VT) const override;
788 
789  /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
790  /// function arguments in the caller parameter area. This is the actual
791  /// alignment, not its logarithm.
792  unsigned getByValTypeAlignment(Type *Ty,
793  const DataLayout &DL) const override;
794 
795  /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
796  /// vector. If it is invalid, don't add anything to Ops.
797  void LowerAsmOperandForConstraint(SDValue Op,
798  std::string &Constraint,
799  std::vector<SDValue> &Ops,
800  SelectionDAG &DAG) const override;
801 
802  unsigned
803  getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
804  if (ConstraintCode == "es")
806  else if (ConstraintCode == "o")
808  else if (ConstraintCode == "Q")
810  else if (ConstraintCode == "Z")
812  else if (ConstraintCode == "Zy")
814  return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
815  }
816 
817  /// isLegalAddressingMode - Return true if the addressing mode represented
818  /// by AM is legal for this target, for a load/store of the specified type.
819  bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
820  Type *Ty, unsigned AS,
821  Instruction *I = nullptr) const override;
822 
823  /// isLegalICmpImmediate - Return true if the specified immediate is legal
824  /// icmp immediate, that is the target has icmp instructions which can
825  /// compare a register against the immediate without having to materialize
826  /// the immediate into a register.
827  bool isLegalICmpImmediate(int64_t Imm) const override;
828 
829  /// isLegalAddImmediate - Return true if the specified immediate is legal
830  /// add immediate, that is the target has add instructions which can
831  /// add a register and the immediate without having to materialize
832  /// the immediate into a register.
833  bool isLegalAddImmediate(int64_t Imm) const override;
834 
835  /// isTruncateFree - Return true if it's free to truncate a value of
836  /// type Ty1 to type Ty2. e.g. On PPC it's free to truncate a i64 value in
837  /// register X1 to i32 by referencing its sub-register R1.
838  bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
839  bool isTruncateFree(EVT VT1, EVT VT2) const override;
840 
841  bool isZExtFree(SDValue Val, EVT VT2) const override;
842 
843  bool isFPExtFree(EVT DestVT, EVT SrcVT) const override;
844 
845  /// Returns true if it is beneficial to convert a load of a constant
846  /// to just the constant itself.
847  bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
848  Type *Ty) const override;
849 
850  bool convertSelectOfConstantsToMath(EVT VT) const override {
851  return true;
852  }
853 
855  EVT VT) const override {
856  // Only handle float load/store pair because float(fpr) load/store
857  // instruction has more cycles than integer(gpr) load/store in PPC.
858  if (Opc != ISD::LOAD && Opc != ISD::STORE)
859  return false;
860  if (VT != MVT::f32 && VT != MVT::f64)
861  return false;
862 
863  return true;
864  }
865 
866  // Returns true if the address of the global is stored in TOC entry.
867  bool isAccessedAsGotIndirect(SDValue N) const;
868 
869  bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
870 
871  bool getTgtMemIntrinsic(IntrinsicInfo &Info,
872  const CallInst &I,
873  MachineFunction &MF,
874  unsigned Intrinsic) const override;
875 
876  /// getOptimalMemOpType - Returns the target specific optimal type for load
877  /// and store operations as a result of memset, memcpy, and memmove
878  /// lowering. If DstAlign is zero that means it's safe to destination
879  /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
880  /// means there isn't a need to check it against alignment requirement,
881  /// probably because the source does not need to be loaded. If 'IsMemset' is
882  /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
883  /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
884  /// source is constant so it does not need to be loaded.
885  /// It returns EVT::Other if the type should be determined using generic
886  /// target-independent logic.
887  EVT
888  getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
889  bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
890  const AttributeList &FuncAttributes) const override;
891 
892  /// Is unaligned memory access allowed for the given type, and is it fast
893  /// relative to software emulation.
894  bool allowsMisalignedMemoryAccesses(
895  EVT VT, unsigned AddrSpace, unsigned Align = 1,
897  bool *Fast = nullptr) const override;
898 
899  /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
900  /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
901  /// expanded to FMAs when this method returns true, otherwise fmuladd is
902  /// expanded to fmul + fadd.
903  bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
904 
905  const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
906 
907  // Should we expand the build vector with shuffles?
908  bool
909  shouldExpandBuildVectorWithShuffles(EVT VT,
910  unsigned DefinedValues) const override;
911 
912  /// createFastISel - This method returns a target-specific FastISel object,
913  /// or null if the target does not support "fast" instruction selection.
915  const TargetLibraryInfo *LibInfo) const override;
916 
917  /// Returns true if an argument of type Ty needs to be passed in a
918  /// contiguous block of registers in calling convention CallConv.
920  Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override {
921  // We support any array type as "consecutive" block in the parameter
922  // save area. The element type defines the alignment requirement and
923  // whether the argument should go in GPRs, FPRs, or VRs if available.
924  //
925  // Note that clang uses this capability both to implement the ELFv2
926  // homogeneous float/vector aggregate ABI, and to avoid having to use
927  // "byval" when passing aggregates that might fully fit in registers.
928  return Ty->isArrayTy();
929  }
930 
931  /// If a physical register, this returns the register that receives the
932  /// exception address on entry to an EH pad.
933  unsigned
934  getExceptionPointerRegister(const Constant *PersonalityFn) const override;
935 
936  /// If a physical register, this returns the register that receives the
937  /// exception typeid on entry to a landing pad.
938  unsigned
939  getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
940 
941  /// Override to support customized stack guard loading.
942  bool useLoadStackGuardNode() const override;
943  void insertSSPDeclarations(Module &M) const override;
944 
945  bool isFPImmLegal(const APFloat &Imm, EVT VT,
946  bool ForCodeSize) const override;
947 
948  unsigned getJumpTableEncoding() const override;
949  bool isJumpTableRelative() const override;
950  SDValue getPICJumpTableRelocBase(SDValue Table,
951  SelectionDAG &DAG) const override;
952  const MCExpr *getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
953  unsigned JTI,
954  MCContext &Ctx) const override;
955 
956  private:
957  struct ReuseLoadInfo {
958  SDValue Ptr;
959  SDValue Chain;
960  SDValue ResChain;
961  MachinePointerInfo MPI;
962  bool IsDereferenceable = false;
963  bool IsInvariant = false;
964  unsigned Alignment = 0;
965  AAMDNodes AAInfo;
966  const MDNode *Ranges = nullptr;
967 
968  ReuseLoadInfo() = default;
969 
970  MachineMemOperand::Flags MMOFlags() const {
972  if (IsDereferenceable)
974  if (IsInvariant)
976  return F;
977  }
978  };
979 
980  bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
981  // Addrspacecasts are always noops.
982  return true;
983  }
984 
985  bool canReuseLoadAddress(SDValue Op, EVT MemVT, ReuseLoadInfo &RLI,
986  SelectionDAG &DAG,
988  void spliceIntoChain(SDValue ResChain, SDValue NewResChain,
989  SelectionDAG &DAG) const;
990 
991  void LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
992  SelectionDAG &DAG, const SDLoc &dl) const;
993  SDValue LowerFP_TO_INTDirectMove(SDValue Op, SelectionDAG &DAG,
994  const SDLoc &dl) const;
995 
996  bool directMoveIsProfitable(const SDValue &Op) const;
997  SDValue LowerINT_TO_FPDirectMove(SDValue Op, SelectionDAG &DAG,
998  const SDLoc &dl) const;
999 
1000  SDValue LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
1001  const SDLoc &dl) const;
1002 
1003  SDValue LowerTRUNCATEVector(SDValue Op, SelectionDAG &DAG) const;
1004 
1005  SDValue getFramePointerFrameIndex(SelectionDAG & DAG) const;
1006  SDValue getReturnAddrFrameIndex(SelectionDAG & DAG) const;
1007 
1008  bool
1009  IsEligibleForTailCallOptimization(SDValue Callee,
1010  CallingConv::ID CalleeCC,
1011  bool isVarArg,
1013  SelectionDAG& DAG) const;
1014 
1015  bool
1016  IsEligibleForTailCallOptimization_64SVR4(
1017  SDValue Callee,
1018  CallingConv::ID CalleeCC,
1019  ImmutableCallSite CS,
1020  bool isVarArg,
1021  const SmallVectorImpl<ISD::OutputArg> &Outs,
1022  const SmallVectorImpl<ISD::InputArg> &Ins,
1023  SelectionDAG& DAG) const;
1024 
1025  SDValue EmitTailCallLoadFPAndRetAddr(SelectionDAG &DAG, int SPDiff,
1026  SDValue Chain, SDValue &LROpOut,
1027  SDValue &FPOpOut,
1028  const SDLoc &dl) const;
1029 
1030  SDValue getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, SDValue GA) const;
1031 
1032  SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
1033  SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
1034  SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
1035  SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
1036  SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
1037  SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
1038  SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
1039  SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
1040  SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
1042  SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
1043  SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
1044  SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
1045  SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const;
1046  SDValue LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, SelectionDAG &DAG) const;
1048  SDValue LowerEH_DWARF_CFA(SDValue Op, SelectionDAG &DAG) const;
1049  SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
1050  SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
1051  SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
1052  SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
1053  SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
1054  const SDLoc &dl) const;
1055  SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
1056  SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
1057  SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const;
1058  SDValue LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const;
1059  SDValue LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const;
1060  SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
1062  SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
1064  SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
1065  SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
1066  SDValue LowerREM(SDValue Op, SelectionDAG &DAG) const;
1067  SDValue LowerBSWAP(SDValue Op, SelectionDAG &DAG) const;
1068  SDValue LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
1070  SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
1071  SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
1072  SDValue LowerABS(SDValue Op, SelectionDAG &DAG) const;
1073  SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
1074 
1075  SDValue LowerVectorLoad(SDValue Op, SelectionDAG &DAG) const;
1076  SDValue LowerVectorStore(SDValue Op, SelectionDAG &DAG) const;
1077 
1078  SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
1079  CallingConv::ID CallConv, bool isVarArg,
1080  const SmallVectorImpl<ISD::InputArg> &Ins,
1081  const SDLoc &dl, SelectionDAG &DAG,
1082  SmallVectorImpl<SDValue> &InVals) const;
1083  SDValue FinishCall(CallingConv::ID CallConv, const SDLoc &dl,
1084  bool isTailCall, bool isVarArg, bool isPatchPoint,
1085  bool hasNest, SelectionDAG &DAG,
1086  SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
1087  SDValue InFlag, SDValue Chain, SDValue CallSeqStart,
1088  SDValue &Callee, int SPDiff, unsigned NumBytes,
1089  const SmallVectorImpl<ISD::InputArg> &Ins,
1090  SmallVectorImpl<SDValue> &InVals,
1091  ImmutableCallSite CS) const;
1092 
1093  SDValue
1094  LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1095  const SmallVectorImpl<ISD::InputArg> &Ins,
1096  const SDLoc &dl, SelectionDAG &DAG,
1097  SmallVectorImpl<SDValue> &InVals) const override;
1098 
1100  SmallVectorImpl<SDValue> &InVals) const override;
1101 
1102  bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1103  bool isVarArg,
1104  const SmallVectorImpl<ISD::OutputArg> &Outs,
1105  LLVMContext &Context) const override;
1106 
1107  SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1108  const SmallVectorImpl<ISD::OutputArg> &Outs,
1109  const SmallVectorImpl<SDValue> &OutVals,
1110  const SDLoc &dl, SelectionDAG &DAG) const override;
1111 
1112  SDValue extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT,
1113  SelectionDAG &DAG, SDValue ArgVal,
1114  const SDLoc &dl) const;
1115 
1116  SDValue LowerFormalArguments_Darwin(
1117  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1118  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1119  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
1120  SDValue LowerFormalArguments_64SVR4(
1121  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1122  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1123  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
1124  SDValue LowerFormalArguments_32SVR4(
1125  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1126  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1127  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
1128 
1129  SDValue createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff,
1130  SDValue CallSeqStart,
1131  ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
1132  const SDLoc &dl) const;
1133 
1134  SDValue LowerCall_Darwin(SDValue Chain, SDValue Callee,
1135  CallingConv::ID CallConv, bool isVarArg,
1136  bool isTailCall, bool isPatchPoint,
1137  const SmallVectorImpl<ISD::OutputArg> &Outs,
1138  const SmallVectorImpl<SDValue> &OutVals,
1139  const SmallVectorImpl<ISD::InputArg> &Ins,
1140  const SDLoc &dl, SelectionDAG &DAG,
1141  SmallVectorImpl<SDValue> &InVals,
1142  ImmutableCallSite CS) const;
1143  SDValue LowerCall_64SVR4(SDValue Chain, SDValue Callee,
1144  CallingConv::ID CallConv, bool isVarArg,
1145  bool isTailCall, bool isPatchPoint,
1146  const SmallVectorImpl<ISD::OutputArg> &Outs,
1147  const SmallVectorImpl<SDValue> &OutVals,
1148  const SmallVectorImpl<ISD::InputArg> &Ins,
1149  const SDLoc &dl, SelectionDAG &DAG,
1150  SmallVectorImpl<SDValue> &InVals,
1151  ImmutableCallSite CS) const;
1152  SDValue LowerCall_32SVR4(SDValue Chain, SDValue Callee,
1153  CallingConv::ID CallConv, bool isVarArg,
1154  bool isTailCall, bool isPatchPoint,
1155  const SmallVectorImpl<ISD::OutputArg> &Outs,
1156  const SmallVectorImpl<SDValue> &OutVals,
1157  const SmallVectorImpl<ISD::InputArg> &Ins,
1158  const SDLoc &dl, SelectionDAG &DAG,
1159  SmallVectorImpl<SDValue> &InVals,
1160  ImmutableCallSite CS) const;
1161  SDValue LowerCall_AIX(SDValue Chain, SDValue Callee,
1162  CallingConv::ID CallConv, bool isVarArg,
1163  bool isTailCall, bool isPatchPoint,
1164  const SmallVectorImpl<ISD::OutputArg> &Outs,
1165  const SmallVectorImpl<SDValue> &OutVals,
1166  const SmallVectorImpl<ISD::InputArg> &Ins,
1167  const SDLoc &dl, SelectionDAG &DAG,
1168  SmallVectorImpl<SDValue> &InVals,
1169  ImmutableCallSite CS) const;
1170 
1171  SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
1172  SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
1173  SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
1174 
1175  SDValue DAGCombineExtBoolTrunc(SDNode *N, DAGCombinerInfo &DCI) const;
1176  SDValue DAGCombineBuildVector(SDNode *N, DAGCombinerInfo &DCI) const;
1177  SDValue DAGCombineTruncBoolExt(SDNode *N, DAGCombinerInfo &DCI) const;
1178  SDValue combineStoreFPToInt(SDNode *N, DAGCombinerInfo &DCI) const;
1179  SDValue combineFPToIntToFP(SDNode *N, DAGCombinerInfo &DCI) const;
1180  SDValue combineSHL(SDNode *N, DAGCombinerInfo &DCI) const;
1181  SDValue combineSRA(SDNode *N, DAGCombinerInfo &DCI) const;
1182  SDValue combineSRL(SDNode *N, DAGCombinerInfo &DCI) const;
1183  SDValue combineMUL(SDNode *N, DAGCombinerInfo &DCI) const;
1184  SDValue combineADD(SDNode *N, DAGCombinerInfo &DCI) const;
1185  SDValue combineTRUNCATE(SDNode *N, DAGCombinerInfo &DCI) const;
1186  SDValue combineSetCC(SDNode *N, DAGCombinerInfo &DCI) const;
1187  SDValue combineABS(SDNode *N, DAGCombinerInfo &DCI) const;
1188  SDValue combineVSelect(SDNode *N, DAGCombinerInfo &DCI) const;
1189  SDValue combineVReverseMemOP(ShuffleVectorSDNode *SVN, LSBaseSDNode *LSBase,
1190  DAGCombinerInfo &DCI) const;
1191 
1192  /// ConvertSETCCToSubtract - looks at SETCC that compares ints. It replaces
1193  /// SETCC with integer subtraction when (1) there is a legal way of doing it
1194  /// (2) keeping the result of comparison in GPR has performance benefit.
1195  SDValue ConvertSETCCToSubtract(SDNode *N, DAGCombinerInfo &DCI) const;
1196 
1197  SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
1198  int &RefinementSteps, bool &UseOneConstNR,
1199  bool Reciprocal) const override;
1200  SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
1201  int &RefinementSteps) const override;
1202  unsigned combineRepeatedFPDivisors() const override;
1203 
1204  SDValue
1205  combineElementTruncationToVectorTruncation(SDNode *N,
1206  DAGCombinerInfo &DCI) const;
1207 
1208  /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be
1209  /// handled by the VINSERTH instruction introduced in ISA 3.0. This is
1210  /// essentially any shuffle of v8i16 vectors that just inserts one element
1211  /// from one vector into the other.
1212  SDValue lowerToVINSERTH(ShuffleVectorSDNode *N, SelectionDAG &DAG) const;
1213 
1214  /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be
1215  /// handled by the VINSERTB instruction introduced in ISA 3.0. This is
1216  /// essentially v16i8 vector version of VINSERTH.
1217  SDValue lowerToVINSERTB(ShuffleVectorSDNode *N, SelectionDAG &DAG) const;
1218 
1219  // Return whether the call instruction can potentially be optimized to a
1220  // tail call. This will cause the optimizers to attempt to move, or
1221  // duplicate return instructions to help enable tail call optimizations.
1222  bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
1223  bool hasBitPreservingFPLogic(EVT VT) const override;
1224  bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
1225  }; // end class PPCTargetLowering
1226 
1227  namespace PPC {
1228 
1230  const TargetLibraryInfo *LibInfo);
1231 
1232  } // end namespace PPC
1233 
1234  bool isIntS16Immediate(SDNode *N, int16_t &Imm);
1235  bool isIntS16Immediate(SDValue Op, int16_t &Imm);
1236 
1237 } // end namespace llvm
1238 
1239 #endif // LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H
G8RC = ADDI_TLSLD_L_ADDR G8RReg, Symbol, Symbol - Op that combines ADDI_TLSLD_L and GET_TLSLD_ADDR un...
x3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS model, produces an ADDI8 instruction tha...
static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG)
static SDValue LowerCallResult(SDValue Chain, SDValue InFlag, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:914
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:112
bool isSelectSupported(SelectSupportKind Kind) const override
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
FastISel * createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo)
Return with a flag operand, matched by &#39;blr&#39;.
Newer FCTI[D,W]UZ floating-point-to-integer conversion instructions for unsigned integers with round ...
SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG)
get_VSPLTI_elt - If this is a build_vector of constants which can be formed by using a vspltis[bhw] i...
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
getPreferredVectorAction - The code we generate when vector types are legalized by promoting the inte...
GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a byte-swapping load instruction.
static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
LLVMContext & Context
QVFPERM = This corresponds to the QPX qvfperm instruction.
This class represents lattice values for constants.
Definition: AllocatorList.h:23
bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override
Return true if it is profitable for dag combiner to transform a floating point op of specified opcode...
GPRC = address of GLOBAL_OFFSET_TABLE.
G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS model, produces an ADDI8 instruction ...
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:65
static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
Extract SPE register component, second argument is high or low.
The following two target-specific nodes are used for calls through function pointers in the 64-bit SV...
VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded during instruction selection to optimi...
bool isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, unsigned &InsertAtByte, bool &Swap, bool IsLE)
isXXINSERTWMask - Return true if this VECTOR_SHUFFLE can be handled by the XXINSERTW instruction intr...
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition: ValueTypes.h:145
This class represents a function call, abstracting a target machine&#39;s calling convention.
This file contains the declarations for metadata subclasses.
QBRC, CHAIN = QVLFSb CHAIN, Ptr The 4xf32 load used for v4i1 constants.
bool isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, bool &Swap, bool IsLE)
isXXSLDWIShuffleMask - Return true if this is a shuffle mask suitable for a XXSLDWI instruction...
Function Alias Analysis Results
CHAIN = RFEBB CHAIN, State - Return from event-based branch.
VEXTS, ByteWidth - takes an input in VSFRC and produces an output in VSFRC that is sign-extended from...
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:323
bool hasAndNotCompare(SDValue) const override
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) != Y —> (~X & Y) ...
unsigned const TargetRegisterInfo * TRI
Metadata node.
Definition: Metadata.h:863
F(f)
bool convertSelectOfConstantsToMath(EVT VT) const override
Return true if a select of constants (select Cond, C1, C2) should be transformed into simple math ops...
CALL - A direct function call.
CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a BCTRL instruction.
bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a VPKUDUM instruction.
Floating-point-to-interger conversion instructions.
Newer FCFID[US] integer-to-floating-point conversion instructions for unsigned integers and single-pr...
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a VPKUHUM instruction.
GlobalBaseReg - On Darwin, this node represents the result of the mflr at function entry...
BUILD_SPE64 and EXTRACT_SPE are analogous to BUILD_PAIR and EXTRACT_ELEMENT but take f64 arguments in...
bool isCheapToSpeculateCtlz() const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
unsigned getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize, SelectionDAG &DAG)
getSplatIdxForPPCMnemonics - Return the splat index as a value that is appropriate for PPC mnemonics ...
G8RC = ADDI_TLSGD_L_ADDR G8RReg, Symbol, Symbol - Op that combines ADDI_TLSGD_L and GET_TLS_ADDR unti...
Base class for LoadSDNode and StoreSDNode.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:35
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:779
MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override
EVT is not used in-tree, but is used by out-of-tree target.
This file contains the simple types necessary to represent the attributes associated with functions a...
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
The memory access is dereferenceable (i.e., doesn&#39;t trap).
static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG)
Direct move from a GPR to a VSX register (algebraic)
x3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS model, produces an ADDI8 instruction t...
bool shouldInsertFencesForAtomic(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic...
ATOMIC_CMP_SWAP - the exact same as the target-independent nodes except they ensure that the compare ...
QVALIGNI = This corresponds to the QPX qvaligni instruction.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
AtomicOrdering
Atomic ordering for LLVM&#39;s memory model.
Context object for machine code objects.
Definition: MCContext.h:65
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override
Returns true if an argument of type Ty needs to be passed in a contiguous block of registers in calli...
bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize)
isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand specifies a splat of a singl...
This is a fast-path instruction selection class that generates poor code and doesn&#39;t support illegal ...
Definition: FastISel.h:66
An SDNode for Power9 vector absolute value difference.
CHAIN = BDNZ CHAIN, DESTBB - These are used to create counter-based loops.
R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction.
G8RC = ADDIS_TLSGD_HA x2, Symbol - For the general-dynamic TLS model, produces an ADDIS8 instruction ...
This contains information for each constraint that we are lowering.
CHAIN = STXVD2X CHAIN, VSRC, Ptr - Occurs only for little endian.
virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const
bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for a VRGL* instruction with the ...
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
VSRC, CHAIN = LD_SPLAT, CHAIN, Ptr - a splatting load memory instructions such as LXVDSX...
This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to compute an offset from native ...
bool isXXBRWShuffleMask(ShuffleVectorSDNode *N)
isXXBRWShuffleMask - Return true if this is a shuffle mask suitable for a XXBRW instruction.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition: MCRegister.h:19
VSRC, CHAIN = LXVD2X_LE CHAIN, Ptr - Occurs only for little endian.
static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG)
FSEL - Traditional three-operand fsel node.
bool isXXBRQShuffleMask(ShuffleVectorSDNode *N)
isXXBRQShuffleMask - Return true if this is a shuffle mask suitable for a XXBRQ instruction.
Machine Value Type.
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:64
ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls
static Value * LowerBSWAP(LLVMContext &Context, Value *V, Instruction *IP)
Emit the code to lower bswap of V before the specified instruction IP.
unsigned getScalarSizeInBits() const
This is an important base class in LLVM.
Definition: Constant.h:41
G8RC = ADDIS_DTPREL_HA x3, Symbol - For the local-dynamic TLS model, produces an ADDIS8 instruction t...
SExtVElems, takes an input vector of a smaller type and sign extends to an output vector of a larger ...
VECINSERT - The PPC vector insert instruction.
Direct move from a VSX register to a GPR.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Definition: ISDOpcodes.h:988
CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a MTCTR instruction.
STFIWX - The STFIWX instruction.
FCFID - The FCFID instruction, taking an f64 operand and producing and f64 value containing the FP re...
Store scalar integers from VSR.
bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a VPKUWUM instruction.
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:42
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
CHAIN = CLRBHRB CHAIN - Clear branch history rolling buffer.
GPRC, CHAIN = LFIWAX CHAIN, Ptr - This is a floating-point load which sign-extends from a 32-bit inte...
G8RC = ADDIS_TLSLD_HA x2, Symbol - For the local-dynamic TLS model, produces an ADDIS8 instruction th...
static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG, const X86Subtarget &Subtarget)
G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec TLS model, produces a LD instruction ...
QVESPLATI = This corresponds to the QPX qvesplati instruction.
VSRC, CHAIN = LOAD_VEC_BE CHAIN, Ptr - Occurs only for little endian.
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
lazy value info
Common code between 32-bit and 64-bit PowerPC targets.
int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift amount, otherwise return -1...
bool supportSplitCSR(MachineFunction *MF) const override
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
Extended Value Type.
Definition: ValueTypes.h:33
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
This structure contains all information that is necessary for lowering calls.
This class contains a discriminated union of information about pointers in memory operands...
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:40
bool isXXBRDShuffleMask(ShuffleVectorSDNode *N)
isXXBRDShuffleMask - Return true if this is a shuffle mask suitable for a XXBRD instruction.
CHAIN,FLAG = BCTRL(CHAIN, ADDR, INFLAG) - The combination of a bctrl instruction and the TOC reload r...
GPRC, CHAIN = LXSIZX, CHAIN, Ptr, ByteWidth - This is a load of an integer smaller than 64 bits into ...
FP_EXTEND_HALF(VECTOR, IDX) - Custom extend upper (IDX=0) half or lower (IDX=1) half of v4f32 to v2f6...
Extract a subvector from unsigned integer vector and convert to FP.
QBFLT = Access the underlying QPX floating-point boolean representation.
EXTSWSLI = The PPC extswsli instruction, which does an extend-sign word and shift left immediate...
static SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG)
x3 = GET_TLSLD_ADDR x3, Symbol - For the local-dynamic TLS model, produces a call to __tls_get_addr(s...
GPRC = TOC_ENTRY GA, TOC Loads the entry for GA from the TOC, where the TOC base is given by the last...
XXSPLT - The PPC VSX splat instructions.
VECSHL - The PPC vector shift left instruction.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:212
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:221
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:837
G8RC = ADD_TLS G8RReg, Symbol - Used by the initial-exec TLS model, produces an ADD instruction that ...
bool convertSetCCLogicToBitwiseLogic(EVT VT) const override
Use bitwise logic to make pairs of compares more efficient.
Provides information about what library functions are available for the current target.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:643
CHAIN = SC CHAIN, Imm128 - System call.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
x3 = GET_TLS_ADDR x3, Symbol - For the general-dynamic TLS model, produces a call to __tls_get_addr(s...
bool isCheapToSpeculateCttz() const override
Return true if it is cheap to speculate a call to intrinsic cttz.
static const int FIRST_TARGET_MEMORY_OPCODE
FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations which do not reference a specific me...
Definition: ISDOpcodes.h:921
Represents one node in the SelectionDAG.
VPERM - The PPC VPERM Instruction.
bool isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, bool &Swap, bool IsLE)
isXXPERMDIShuffleMask - Return true if this is a shuffle mask suitable for a XXPERMDI instruction...
static bool Enabled
Definition: Statistic.cpp:50
const Function & getFunction() const
Return the LLVM function that this machine code represents.
STXSIX - The STXSI[bh]X instruction.
i1 = ANDIo_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the eq or gt bit of CR0 after execu...
G8RC = ADDIS_GOT_TPREL_HA x2, Symbol - Used by the initial-exec TLS model, produces an ADDIS8 instruc...
Class for arbitrary precision integers.
Definition: APInt.h:69
QVGPCI = This corresponds to the QPX qvgpci instruction.
amdgpu Simplify well known AMD library false FunctionCallee Callee
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
The combination of sra[wd]i and addze used to implemented signed integer division by a power of 2...
Flags
Flags values. These may be or&#39;d together.
GPRC = address of GLOBAL_OFFSET_TABLE.
Representation of each machine instruction.
Definition: MachineInstr.h:64
GPRC, CHAIN = MFBHRBE CHAIN, Entry, Dummy - Move from branch history rolling buffer entry...
unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override
SelectSupportKind
Enum that describes what type of support for selects the target has.
Reciprocal estimate instructions (unary FP ops).
bool isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for a VMRGEW or VMRGOW instructi...
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:643
F8RC = MFFS - This moves the FPSCR (not modeled) into the register.
Establish a view to a call site for examination.
Definition: CallSite.h:906
CHAIN = STORE_VEC_BE CHAIN, VSRC, Ptr - Occurs only for little endian.
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
Direct move from a GPR to a VSX register (zero)
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
The CMPB instruction (takes two operands of i32 or i64).
The memory access always returns the same value (or traps).
CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a byte-swapping store instruction. ...
uint32_t Size
Definition: Profile.cpp:46
TC_RETURN - A tail call return.
VSRC, CHAIN = XXSWAPD CHAIN, VSRC - Occurs only for little endian.
bool isCtlzFast() const override
Return true if ctlz instruction is fast.
XXREVERSE - The PPC VSX reverse instruction.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
Direct move of 2 consecutive GPR to a VSX register.
CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This corresponds to the COND_BRANCH pseudo ...
static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
These nodes represent PPC shifts.
VSRC, CHAIN = LD_VSX_LH CHAIN, Ptr - This is a floating-point load of a v2f32 value into the lower ha...
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
Extract a subvector from signed integer vector and convert to FP.
RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP* instructions.
FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64 operand, producing an f64 value...
Hi/Lo - These represent the high and low 16-bit parts of a global address respectively.
bool isXXBRHShuffleMask(ShuffleVectorSDNode *N)
isXXBRHShuffleMask - Return true if this is a shuffle mask suitable for a XXBRH instruction.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding towards zero.
An SDNode for swaps that are not associated with any loads/stores and thereby have no chain...
RESVEC, OUTFLAG = VCMPo(LHS, RHS, OPC) - Represents one of the altivec VCMP*o instructions.
GPRC, CHAIN = LFIWZX CHAIN, Ptr - This is a floating-point load which zero-extends from a 32-bit inte...
static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
bool isIntS16Immediate(SDNode *N, int16_t &Imm)
isIntS16Immediate - This method tests to see if the node is either a 32-bit or 64-bit immediate...
bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for a VRGH* instruction with the ...
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
XXPERMDI - The PPC XXPERMDI instruction.
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:221
This file describes how to lower LLVM code to machine code.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:951
int isQVALIGNIShuffleMask(SDNode *N)
If this is a qvaligni shuffle mask, return the shift amount, otherwise return -1. ...