LLVM  9.0.0svn
PPCISelLowering.h
Go to the documentation of this file.
1 //===-- PPCISelLowering.h - PPC32 DAG Lowering Interface --------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that PPC uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
15 #define LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
16 
17 #include "PPC.h"
18 #include "PPCInstrInfo.h"
26 #include "llvm/IR/Attributes.h"
27 #include "llvm/IR/CallingConv.h"
28 #include "llvm/IR/Function.h"
29 #include "llvm/IR/InlineAsm.h"
30 #include "llvm/IR/Metadata.h"
31 #include "llvm/IR/Type.h"
33 #include <utility>
34 
35 namespace llvm {
36 
37  namespace PPCISD {
38 
39  // When adding a NEW PPCISD node please add it to the correct position in
40  // the enum. The order of elements in this enum matters!
41  // Values that are added after this entry:
42  // STBRX = ISD::FIRST_TARGET_MEMORY_OPCODE
43  // are considerd memory opcodes and are treated differently than entries
44  // that come before it. For example, ADD or MUL should be placed before
45  // the ISD::FIRST_TARGET_MEMORY_OPCODE while a LOAD or STORE should come
46  // after it.
47  enum NodeType : unsigned {
48  // Start the numbering where the builtin ops and target ops leave off.
50 
51  /// FSEL - Traditional three-operand fsel node.
52  ///
54 
55  /// FCFID - The FCFID instruction, taking an f64 operand and producing
56  /// and f64 value containing the FP representation of the integer that
57  /// was temporarily in the f64 operand.
59 
60  /// Newer FCFID[US] integer-to-floating-point conversion instructions for
61  /// unsigned integers and single-precision outputs.
63 
64  /// FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64
65  /// operand, producing an f64 value containing the integer representation
66  /// of that FP value.
68 
69  /// Newer FCTI[D,W]UZ floating-point-to-integer conversion instructions for
70  /// unsigned integers with round toward zero.
72 
73  /// Floating-point-to-interger conversion instructions
75 
76  /// VEXTS, ByteWidth - takes an input in VSFRC and produces an output in
77  /// VSFRC that is sign-extended from ByteWidth to a 64-byte integer.
79 
80  /// SExtVElems, takes an input vector of a smaller type and sign
81  /// extends to an output vector of a larger type.
83 
84  /// Reciprocal estimate instructions (unary FP ops).
86 
87  // VMADDFP, VNMSUBFP - The VMADDFP and VNMSUBFP instructions, taking
88  // three v4f32 operands and producing a v4f32 result.
90 
91  /// VPERM - The PPC VPERM Instruction.
92  ///
94 
95  /// XXSPLT - The PPC VSX splat instructions
96  ///
98 
99  /// VECINSERT - The PPC vector insert instruction
100  ///
102 
103  /// XXREVERSE - The PPC VSX reverse instruction
104  ///
106 
107  /// VECSHL - The PPC vector shift left instruction
108  ///
110 
111  /// XXPERMDI - The PPC XXPERMDI instruction
112  ///
114 
115  /// The CMPB instruction (takes two operands of i32 or i64).
117 
118  /// Hi/Lo - These represent the high and low 16-bit parts of a global
119  /// address respectively. These nodes have two operands, the first of
120  /// which must be a TargetGlobalAddress, and the second of which must be a
121  /// Constant. Selected naively, these turn into 'lis G+C' and 'li G+C',
122  /// though these are usually folded into other nodes.
123  Hi, Lo,
124 
125  /// The following two target-specific nodes are used for calls through
126  /// function pointers in the 64-bit SVR4 ABI.
127 
128  /// OPRC, CHAIN = DYNALLOC(CHAIN, NEGSIZE, FRAME_INDEX)
129  /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to
130  /// compute an allocation on the stack.
132 
133  /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to
134  /// compute an offset from native SP to the address of the most recent
135  /// dynamic alloca.
137 
138  /// GlobalBaseReg - On Darwin, this node represents the result of the mflr
139  /// at function entry, used for PIC code.
141 
142  /// These nodes represent PPC shifts.
143  ///
144  /// For scalar types, only the last `n + 1` bits of the shift amounts
145  /// are used, where n is log2(sizeof(element) * 8). See sld/slw, etc.
146  /// for exact behaviors.
147  ///
148  /// For vector types, only the last n bits are used. See vsld.
150 
151  /// EXTSWSLI = The PPC extswsli instruction, which does an extend-sign
152  /// word and shift left immediate.
154 
155  /// The combination of sra[wd]i and addze used to implemented signed
156  /// integer division by a power of 2. The first operand is the dividend,
157  /// and the second is the constant shift amount (representing the
158  /// divisor).
160 
161  /// CALL - A direct function call.
162  /// CALL_NOP is a call with the special NOP which follows 64-bit
163  /// SVR4 calls.
165 
166  /// CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a
167  /// MTCTR instruction.
169 
170  /// CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a
171  /// BCTRL instruction.
173 
174  /// CHAIN,FLAG = BCTRL(CHAIN, ADDR, INFLAG) - The combination of a bctrl
175  /// instruction and the TOC reload required on SVR4 PPC64.
177 
178  /// Return with a flag operand, matched by 'blr'
180 
181  /// R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction.
182  /// This copies the bits corresponding to the specified CRREG into the
183  /// resultant GPR. Bits corresponding to other CR regs are undefined.
185 
186  /// Direct move from a VSX register to a GPR
188 
189  /// Direct move from a GPR to a VSX register (algebraic)
191 
192  /// Direct move from a GPR to a VSX register (zero)
194 
195  /// Direct move of 2 consective GPR to a VSX register.
197 
198  /// Extract a subvector from signed integer vector and convert to FP.
199  /// It is primarily used to convert a (widened) illegal integer vector
200  /// type to a legal floating point vector type.
201  /// For example v2i32 -> widened to v4i32 -> v2f64
203 
204  /// Extract a subvector from unsigned integer vector and convert to FP.
205  /// As with SINT_VEC_TO_FP, used for converting illegal types.
207 
208  // FIXME: Remove these once the ANDI glue bug is fixed:
209  /// i1 = ANDIo_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the
210  /// eq or gt bit of CR0 after executing andi. x, 1. This is used to
211  /// implement truncation of i32 or i64 to i1.
213 
214  // READ_TIME_BASE - A read of the 64-bit time-base register on a 32-bit
215  // target (returns (Lo, Hi)). It takes a chain operand.
217 
218  // EH_SJLJ_SETJMP - SjLj exception handling setjmp.
220 
221  // EH_SJLJ_LONGJMP - SjLj exception handling longjmp.
223 
224  /// RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP*
225  /// instructions. For lack of better number, we use the opcode number
226  /// encoding for the OPC field to identify the compare. For example, 838
227  /// is VCMPGTSH.
229 
230  /// RESVEC, OUTFLAG = VCMPo(LHS, RHS, OPC) - Represents one of the
231  /// altivec VCMP*o instructions. For lack of better number, we use the
232  /// opcode number encoding for the OPC field to identify the compare. For
233  /// example, 838 is VCMPGTSH.
235 
236  /// CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This
237  /// corresponds to the COND_BRANCH pseudo instruction. CRRC is the
238  /// condition register to branch on, OPC is the branch opcode to use (e.g.
239  /// PPC::BLE), DESTBB is the destination block to branch to, and INFLAG is
240  /// an optional input flag argument.
242 
243  /// CHAIN = BDNZ CHAIN, DESTBB - These are used to create counter-based
244  /// loops.
246 
247  /// F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding
248  /// towards zero. Used only as part of the long double-to-int
249  /// conversion sequence.
251 
252  /// F8RC = MFFS - This moves the FPSCR (not modeled) into the register.
254 
255  /// TC_RETURN - A tail call return.
256  /// operand #0 chain
257  /// operand #1 callee (register or absolute)
258  /// operand #2 stack adjustment
259  /// operand #3 optional in flag
261 
262  /// ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls
265 
266  /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by initial-exec TLS
267  /// on PPC32.
269 
270  /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by general dynamic and
271  /// local dynamic TLS on PPC32.
273 
274  /// G8RC = ADDIS_GOT_TPREL_HA %x2, Symbol - Used by the initial-exec
275  /// TLS model, produces an ADDIS8 instruction that adds the GOT
276  /// base to sym\@got\@tprel\@ha.
278 
279  /// G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec
280  /// TLS model, produces a LD instruction with base register G8RReg
281  /// and offset sym\@got\@tprel\@l. This completes the addition that
282  /// finds the offset of "sym" relative to the thread pointer.
284 
285  /// G8RC = ADD_TLS G8RReg, Symbol - Used by the initial-exec TLS
286  /// model, produces an ADD instruction that adds the contents of
287  /// G8RReg to the thread pointer. Symbol contains a relocation
288  /// sym\@tls which is to be replaced by the thread pointer and
289  /// identifies to the linker that the instruction is part of a
290  /// TLS sequence.
292 
293  /// G8RC = ADDIS_TLSGD_HA %x2, Symbol - For the general-dynamic TLS
294  /// model, produces an ADDIS8 instruction that adds the GOT base
295  /// register to sym\@got\@tlsgd\@ha.
297 
298  /// %x3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS
299  /// model, produces an ADDI8 instruction that adds G8RReg to
300  /// sym\@got\@tlsgd\@l and stores the result in X3. Hidden by
301  /// ADDIS_TLSGD_L_ADDR until after register assignment.
303 
304  /// %x3 = GET_TLS_ADDR %x3, Symbol - For the general-dynamic TLS
305  /// model, produces a call to __tls_get_addr(sym\@tlsgd). Hidden by
306  /// ADDIS_TLSGD_L_ADDR until after register assignment.
308 
309  /// G8RC = ADDI_TLSGD_L_ADDR G8RReg, Symbol, Symbol - Op that
310  /// combines ADDI_TLSGD_L and GET_TLS_ADDR until expansion following
311  /// register assignment.
313 
314  /// G8RC = ADDIS_TLSLD_HA %x2, Symbol - For the local-dynamic TLS
315  /// model, produces an ADDIS8 instruction that adds the GOT base
316  /// register to sym\@got\@tlsld\@ha.
318 
319  /// %x3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS
320  /// model, produces an ADDI8 instruction that adds G8RReg to
321  /// sym\@got\@tlsld\@l and stores the result in X3. Hidden by
322  /// ADDIS_TLSLD_L_ADDR until after register assignment.
324 
325  /// %x3 = GET_TLSLD_ADDR %x3, Symbol - For the local-dynamic TLS
326  /// model, produces a call to __tls_get_addr(sym\@tlsld). Hidden by
327  /// ADDIS_TLSLD_L_ADDR until after register assignment.
329 
330  /// G8RC = ADDI_TLSLD_L_ADDR G8RReg, Symbol, Symbol - Op that
331  /// combines ADDI_TLSLD_L and GET_TLSLD_ADDR until expansion
332  /// following register assignment.
334 
335  /// G8RC = ADDIS_DTPREL_HA %x3, Symbol - For the local-dynamic TLS
336  /// model, produces an ADDIS8 instruction that adds X3 to
337  /// sym\@dtprel\@ha.
339 
340  /// G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS
341  /// model, produces an ADDI8 instruction that adds G8RReg to
342  /// sym\@got\@dtprel\@l.
344 
345  /// VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded
346  /// during instruction selection to optimize a BUILD_VECTOR into
347  /// operations on splats. This is necessary to avoid losing these
348  /// optimizations due to constant folding.
350 
351  /// CHAIN = SC CHAIN, Imm128 - System call. The 7-bit unsigned
352  /// operand identifies the operating system entry point.
353  SC,
354 
355  /// CHAIN = CLRBHRB CHAIN - Clear branch history rolling buffer.
357 
358  /// GPRC, CHAIN = MFBHRBE CHAIN, Entry, Dummy - Move from branch
359  /// history rolling buffer entry.
361 
362  /// CHAIN = RFEBB CHAIN, State - Return from event-based branch.
364 
365  /// VSRC, CHAIN = XXSWAPD CHAIN, VSRC - Occurs only for little
366  /// endian. Maps to an xxswapd instruction that corrects an lxvd2x
367  /// or stxvd2x instruction. The chain is necessary because the
368  /// sequence replaces a load and needs to provide the same number
369  /// of outputs.
371 
372  /// An SDNode for swaps that are not associated with any loads/stores
373  /// and thereby have no chain.
375 
376  /// An SDNode for Power9 vector absolute value difference.
377  /// operand #0 vector
378  /// operand #1 vector
379  /// operand #2 constant i32 0 or 1, to indicate whether needs to patch
380  /// the most significant bit for signed i32
381  ///
382  /// Power9 VABSD* instructions are designed to support unsigned integer
383  /// vectors (byte/halfword/word), if we want to make use of them for signed
384  /// integer vectors, we have to flip their sign bits first. To flip sign bit
385  /// for byte/halfword integer vector would become inefficient, but for word
386  /// integer vector, we can leverage XVNEGSP to make it efficiently. eg:
387  /// abs(sub(a,b)) => VABSDUW(a+0x80000000, b+0x80000000)
388  /// => VABSDUW((XVNEGSP a), (XVNEGSP b))
390 
391  /// QVFPERM = This corresponds to the QPX qvfperm instruction.
393 
394  /// QVGPCI = This corresponds to the QPX qvgpci instruction.
396 
397  /// QVALIGNI = This corresponds to the QPX qvaligni instruction.
399 
400  /// QVESPLATI = This corresponds to the QPX qvesplati instruction.
402 
403  /// QBFLT = Access the underlying QPX floating-point boolean
404  /// representation.
406 
407  /// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a
408  /// byte-swapping store instruction. It byte-swaps the low "Type" bits of
409  /// the GPRC input, then stores it through Ptr. Type can be either i16 or
410  /// i32.
412 
413  /// GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a
414  /// byte-swapping load instruction. It loads "Type" bits, byte swaps it,
415  /// then puts it in the bottom bits of the GPRC. TYPE can be either i16
416  /// or i32.
418 
419  /// STFIWX - The STFIWX instruction. The first operand is an input token
420  /// chain, then an f64 value to store, then an address to store it to.
422 
423  /// GPRC, CHAIN = LFIWAX CHAIN, Ptr - This is a floating-point
424  /// load which sign-extends from a 32-bit integer value into the
425  /// destination 64-bit register.
427 
428  /// GPRC, CHAIN = LFIWZX CHAIN, Ptr - This is a floating-point
429  /// load which zero-extends from a 32-bit integer value into the
430  /// destination 64-bit register.
432 
433  /// GPRC, CHAIN = LXSIZX, CHAIN, Ptr, ByteWidth - This is a load of an
434  /// integer smaller than 64 bits into a VSR. The integer is zero-extended.
435  /// This can be used for converting loaded integers to floating point.
437 
438  /// STXSIX - The STXSI[bh]X instruction. The first operand is an input
439  /// chain, then an f64 value to store, then an address to store it to,
440  /// followed by a byte-width for the store.
442 
443  /// VSRC, CHAIN = LXVD2X_LE CHAIN, Ptr - Occurs only for little endian.
444  /// Maps directly to an lxvd2x instruction that will be followed by
445  /// an xxswapd.
447 
448  /// CHAIN = STXVD2X CHAIN, VSRC, Ptr - Occurs only for little endian.
449  /// Maps directly to an stxvd2x instruction that will be preceded by
450  /// an xxswapd.
452 
453  /// Store scalar integers from VSR.
455 
456  /// QBRC, CHAIN = QVLFSb CHAIN, Ptr
457  /// The 4xf32 load used for v4i1 constants.
459 
460  /// ATOMIC_CMP_SWAP - the exact same as the target-independent nodes
461  /// except they ensure that the compare input is zero-extended for
462  /// sub-word versions because the atomic loads zero-extend.
464 
465  /// GPRC = TOC_ENTRY GA, TOC
466  /// Loads the entry for GA from the TOC, where the TOC base is given by
467  /// the last operand.
469  };
470 
471  } // end namespace PPCISD
472 
473  /// Define some predicates that are used for node matching.
474  namespace PPC {
475 
476  /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
477  /// VPKUHUM instruction.
478  bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
479  SelectionDAG &DAG);
480 
481  /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
482  /// VPKUWUM instruction.
483  bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
484  SelectionDAG &DAG);
485 
486  /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
487  /// VPKUDUM instruction.
488  bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
489  SelectionDAG &DAG);
490 
491  /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
492  /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
493  bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
494  unsigned ShuffleKind, SelectionDAG &DAG);
495 
496  /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
497  /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
498  bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
499  unsigned ShuffleKind, SelectionDAG &DAG);
500 
501  /// isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for
502  /// a VMRGEW or VMRGOW instruction
503  bool isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
504  unsigned ShuffleKind, SelectionDAG &DAG);
505  /// isXXSLDWIShuffleMask - Return true if this is a shuffle mask suitable
506  /// for a XXSLDWI instruction.
507  bool isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
508  bool &Swap, bool IsLE);
509 
510  /// isXXBRHShuffleMask - Return true if this is a shuffle mask suitable
511  /// for a XXBRH instruction.
513 
514  /// isXXBRWShuffleMask - Return true if this is a shuffle mask suitable
515  /// for a XXBRW instruction.
517 
518  /// isXXBRDShuffleMask - Return true if this is a shuffle mask suitable
519  /// for a XXBRD instruction.
521 
522  /// isXXBRQShuffleMask - Return true if this is a shuffle mask suitable
523  /// for a XXBRQ instruction.
525 
526  /// isXXPERMDIShuffleMask - Return true if this is a shuffle mask suitable
527  /// for a XXPERMDI instruction.
528  bool isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
529  bool &Swap, bool IsLE);
530 
531  /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the
532  /// shift amount, otherwise return -1.
533  int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
534  SelectionDAG &DAG);
535 
536  /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
537  /// specifies a splat of a single element that is suitable for input to
538  /// VSPLTB/VSPLTH/VSPLTW.
539  bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize);
540 
541  /// isXXINSERTWMask - Return true if this VECTOR_SHUFFLE can be handled by
542  /// the XXINSERTW instruction introduced in ISA 3.0. This is essentially any
543  /// shuffle of v4f32/v4i32 vectors that just inserts one element from one
544  /// vector into the other. This function will also set a couple of
545  /// output parameters for how much the source vector needs to be shifted and
546  /// what byte number needs to be specified for the instruction to put the
547  /// element in the desired location of the target vector.
548  bool isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
549  unsigned &InsertAtByte, bool &Swap, bool IsLE);
550 
551  /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
552  /// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
553  unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize, SelectionDAG &DAG);
554 
555  /// get_VSPLTI_elt - If this is a build_vector of constants which can be
556  /// formed by using a vspltis[bhw] instruction of the specified element
557  /// size, return the constant being splatted. The ByteSize field indicates
558  /// the number of bytes of each element [124] -> [bhw].
559  SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG);
560 
561  /// If this is a qvaligni shuffle mask, return the shift
562  /// amount, otherwise return -1.
564 
565  } // end namespace PPC
566 
568  const PPCSubtarget &Subtarget;
569 
570  public:
571  explicit PPCTargetLowering(const PPCTargetMachine &TM,
572  const PPCSubtarget &STI);
573 
574  /// getTargetNodeName() - This method returns the name of a target specific
575  /// DAG node.
576  const char *getTargetNodeName(unsigned Opcode) const override;
577 
578  bool isSelectSupported(SelectSupportKind Kind) const override {
579  // PowerPC does not support scalar condition selects on vectors.
580  return (Kind != SelectSupportKind::ScalarCondVectorVal);
581  }
582 
583  /// getPreferredVectorAction - The code we generate when vector types are
584  /// legalized by promoting the integer element type is often much worse
585  /// than code we generate if we widen the type for applicable vector types.
586  /// The issue with promoting is that the vector is scalaraized, individual
587  /// elements promoted and then the vector is rebuilt. So say we load a pair
588  /// of v4i8's and shuffle them. This will turn into a mess of 8 extending
589  /// loads, moves back into VSR's (or memory ops if we don't have moves) and
590  /// then the VPERM for the shuffle. All in all a very slow sequence.
592  const override {
593  if (VT.getScalarSizeInBits() % 8 == 0)
594  return TypeWidenVector;
596  }
597 
598  bool useSoftFloat() const override;
599 
600  bool hasSPE() const;
601 
602  MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override {
603  return MVT::i32;
604  }
605 
606  bool isCheapToSpeculateCttz() const override {
607  return true;
608  }
609 
610  bool isCheapToSpeculateCtlz() const override {
611  return true;
612  }
613 
614  bool isCtlzFast() const override {
615  return true;
616  }
617 
618  bool hasAndNotCompare(SDValue) const override {
619  return true;
620  }
621 
622  bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
623  return VT.isScalarInteger();
624  }
625 
626  bool supportSplitCSR(MachineFunction *MF) const override {
627  return
629  MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
630  }
631 
632  void initializeSplitCSR(MachineBasicBlock *Entry) const override;
633 
634  void insertCopiesSplitCSR(
635  MachineBasicBlock *Entry,
636  const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
637 
638  /// getSetCCResultType - Return the ISD::SETCC ValueType
639  EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
640  EVT VT) const override;
641 
642  /// Return true if target always beneficiates from combining into FMA for a
643  /// given value type. This must typically return false on targets where FMA
644  /// takes more cycles to execute than FADD.
645  bool enableAggressiveFMAFusion(EVT VT) const override;
646 
647  /// getPreIndexedAddressParts - returns true by value, base pointer and
648  /// offset pointer and addressing mode by reference if the node's address
649  /// can be legally represented as pre-indexed load / store address.
650  bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
651  SDValue &Offset,
653  SelectionDAG &DAG) const override;
654 
655  /// SelectAddressRegReg - Given the specified addressed, check to see if it
656  /// can be represented as an indexed [r+r] operation. Returns false if it
657  /// can be more efficiently represented with [r+imm].
658  bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index,
659  SelectionDAG &DAG) const;
660 
661  /// SelectAddressRegImm - Returns true if the address N can be represented
662  /// by a base register plus a signed 16-bit displacement [r+imm], and if it
663  /// is not better represented as reg+reg. If Aligned is true, only accept
664  /// displacements suitable for STD and friends, i.e. multiples of 4.
665  bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base,
666  SelectionDAG &DAG, unsigned Alignment) const;
667 
668  /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
669  /// represented as an indexed [r+r] operation.
670  bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index,
671  SelectionDAG &DAG) const;
672 
673  Sched::Preference getSchedulingPreference(SDNode *N) const override;
674 
675  /// LowerOperation - Provide custom lowering hooks for some operations.
676  ///
677  SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
678 
679  /// ReplaceNodeResults - Replace the results of node with an illegal result
680  /// type with new values built out of custom code.
681  ///
682  void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
683  SelectionDAG &DAG) const override;
684 
685  SDValue expandVSXLoadForLE(SDNode *N, DAGCombinerInfo &DCI) const;
686  SDValue expandVSXStoreForLE(SDNode *N, DAGCombinerInfo &DCI) const;
687 
688  SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
689 
690  SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
691  SmallVectorImpl<SDNode *> &Created) const override;
692 
693  unsigned getRegisterByName(const char* RegName, EVT VT,
694  SelectionDAG &DAG) const override;
695 
696  void computeKnownBitsForTargetNode(const SDValue Op,
697  KnownBits &Known,
698  const APInt &DemandedElts,
699  const SelectionDAG &DAG,
700  unsigned Depth = 0) const override;
701 
702  unsigned getPrefLoopAlignment(MachineLoop *ML) const override;
703 
704  bool shouldInsertFencesForAtomic(const Instruction *I) const override {
705  return true;
706  }
707 
708  Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst,
709  AtomicOrdering Ord) const override;
710  Instruction *emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst,
711  AtomicOrdering Ord) const override;
712 
714  EmitInstrWithCustomInserter(MachineInstr &MI,
715  MachineBasicBlock *MBB) const override;
716  MachineBasicBlock *EmitAtomicBinary(MachineInstr &MI,
717  MachineBasicBlock *MBB,
718  unsigned AtomicSize,
719  unsigned BinOpcode,
720  unsigned CmpOpcode = 0,
721  unsigned CmpPred = 0) const;
722  MachineBasicBlock *EmitPartwordAtomicBinary(MachineInstr &MI,
723  MachineBasicBlock *MBB,
724  bool is8bit,
725  unsigned Opcode,
726  unsigned CmpOpcode = 0,
727  unsigned CmpPred = 0) const;
728 
729  MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr &MI,
730  MachineBasicBlock *MBB) const;
731 
732  MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr &MI,
733  MachineBasicBlock *MBB) const;
734 
735  ConstraintType getConstraintType(StringRef Constraint) const override;
736 
737  /// Examine constraint string and operand type and determine a weight value.
738  /// The operand object must already have been set up with the operand type.
739  ConstraintWeight getSingleConstraintMatchWeight(
740  AsmOperandInfo &info, const char *constraint) const override;
741 
742  std::pair<unsigned, const TargetRegisterClass *>
743  getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
744  StringRef Constraint, MVT VT) const override;
745 
746  /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
747  /// function arguments in the caller parameter area. This is the actual
748  /// alignment, not its logarithm.
749  unsigned getByValTypeAlignment(Type *Ty,
750  const DataLayout &DL) const override;
751 
752  /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
753  /// vector. If it is invalid, don't add anything to Ops.
754  void LowerAsmOperandForConstraint(SDValue Op,
755  std::string &Constraint,
756  std::vector<SDValue> &Ops,
757  SelectionDAG &DAG) const override;
758 
759  unsigned
760  getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
761  if (ConstraintCode == "es")
763  else if (ConstraintCode == "o")
765  else if (ConstraintCode == "Q")
767  else if (ConstraintCode == "Z")
769  else if (ConstraintCode == "Zy")
771  return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
772  }
773 
774  /// isLegalAddressingMode - Return true if the addressing mode represented
775  /// by AM is legal for this target, for a load/store of the specified type.
776  bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
777  Type *Ty, unsigned AS,
778  Instruction *I = nullptr) const override;
779 
780  /// isLegalICmpImmediate - Return true if the specified immediate is legal
781  /// icmp immediate, that is the target has icmp instructions which can
782  /// compare a register against the immediate without having to materialize
783  /// the immediate into a register.
784  bool isLegalICmpImmediate(int64_t Imm) const override;
785 
786  /// isLegalAddImmediate - Return true if the specified immediate is legal
787  /// add immediate, that is the target has add instructions which can
788  /// add a register and the immediate without having to materialize
789  /// the immediate into a register.
790  bool isLegalAddImmediate(int64_t Imm) const override;
791 
792  /// isTruncateFree - Return true if it's free to truncate a value of
793  /// type Ty1 to type Ty2. e.g. On PPC it's free to truncate a i64 value in
794  /// register X1 to i32 by referencing its sub-register R1.
795  bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
796  bool isTruncateFree(EVT VT1, EVT VT2) const override;
797 
798  bool isZExtFree(SDValue Val, EVT VT2) const override;
799 
800  bool isFPExtFree(EVT DestVT, EVT SrcVT) const override;
801 
802  /// Returns true if it is beneficial to convert a load of a constant
803  /// to just the constant itself.
804  bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
805  Type *Ty) const override;
806 
807  bool convertSelectOfConstantsToMath(EVT VT) const override {
808  return true;
809  }
810 
811  // Returns true if the address of the global is stored in TOC entry.
812  bool isAccessedAsGotIndirect(SDValue N) const;
813 
814  bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
815 
816  bool getTgtMemIntrinsic(IntrinsicInfo &Info,
817  const CallInst &I,
818  MachineFunction &MF,
819  unsigned Intrinsic) const override;
820 
821  /// getOptimalMemOpType - Returns the target specific optimal type for load
822  /// and store operations as a result of memset, memcpy, and memmove
823  /// lowering. If DstAlign is zero that means it's safe to destination
824  /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
825  /// means there isn't a need to check it against alignment requirement,
826  /// probably because the source does not need to be loaded. If 'IsMemset' is
827  /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
828  /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
829  /// source is constant so it does not need to be loaded.
830  /// It returns EVT::Other if the type should be determined using generic
831  /// target-independent logic.
832  EVT
833  getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
834  bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
835  MachineFunction &MF) const override;
836 
837  /// Is unaligned memory access allowed for the given type, and is it fast
838  /// relative to software emulation.
839  bool allowsMisalignedMemoryAccesses(EVT VT,
840  unsigned AddrSpace,
841  unsigned Align = 1,
842  bool *Fast = nullptr) const override;
843 
844  /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
845  /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
846  /// expanded to FMAs when this method returns true, otherwise fmuladd is
847  /// expanded to fmul + fadd.
848  bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
849 
850  const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
851 
852  // Should we expand the build vector with shuffles?
853  bool
854  shouldExpandBuildVectorWithShuffles(EVT VT,
855  unsigned DefinedValues) const override;
856 
857  /// createFastISel - This method returns a target-specific FastISel object,
858  /// or null if the target does not support "fast" instruction selection.
860  const TargetLibraryInfo *LibInfo) const override;
861 
862  /// Returns true if an argument of type Ty needs to be passed in a
863  /// contiguous block of registers in calling convention CallConv.
865  Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override {
866  // We support any array type as "consecutive" block in the parameter
867  // save area. The element type defines the alignment requirement and
868  // whether the argument should go in GPRs, FPRs, or VRs if available.
869  //
870  // Note that clang uses this capability both to implement the ELFv2
871  // homogeneous float/vector aggregate ABI, and to avoid having to use
872  // "byval" when passing aggregates that might fully fit in registers.
873  return Ty->isArrayTy();
874  }
875 
876  /// If a physical register, this returns the register that receives the
877  /// exception address on entry to an EH pad.
878  unsigned
879  getExceptionPointerRegister(const Constant *PersonalityFn) const override;
880 
881  /// If a physical register, this returns the register that receives the
882  /// exception typeid on entry to a landing pad.
883  unsigned
884  getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
885 
886  /// Override to support customized stack guard loading.
887  bool useLoadStackGuardNode() const override;
888  void insertSSPDeclarations(Module &M) const override;
889 
890  bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
891 
892  unsigned getJumpTableEncoding() const override;
893  bool isJumpTableRelative() const override;
894  SDValue getPICJumpTableRelocBase(SDValue Table,
895  SelectionDAG &DAG) const override;
896  const MCExpr *getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
897  unsigned JTI,
898  MCContext &Ctx) const override;
899 
900  unsigned getNumRegistersForCallingConv(LLVMContext &Context,
901  CallingConv:: ID CC,
902  EVT VT) const override;
903 
904  MVT getRegisterTypeForCallingConv(LLVMContext &Context,
905  CallingConv:: ID CC,
906  EVT VT) const override;
907 
908  private:
909  struct ReuseLoadInfo {
910  SDValue Ptr;
911  SDValue Chain;
912  SDValue ResChain;
913  MachinePointerInfo MPI;
914  bool IsDereferenceable = false;
915  bool IsInvariant = false;
916  unsigned Alignment = 0;
917  AAMDNodes AAInfo;
918  const MDNode *Ranges = nullptr;
919 
920  ReuseLoadInfo() = default;
921 
922  MachineMemOperand::Flags MMOFlags() const {
924  if (IsDereferenceable)
926  if (IsInvariant)
928  return F;
929  }
930  };
931 
932  bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
933  // Addrspacecasts are always noops.
934  return true;
935  }
936 
937  bool canReuseLoadAddress(SDValue Op, EVT MemVT, ReuseLoadInfo &RLI,
938  SelectionDAG &DAG,
940  void spliceIntoChain(SDValue ResChain, SDValue NewResChain,
941  SelectionDAG &DAG) const;
942 
943  void LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
944  SelectionDAG &DAG, const SDLoc &dl) const;
945  SDValue LowerFP_TO_INTDirectMove(SDValue Op, SelectionDAG &DAG,
946  const SDLoc &dl) const;
947 
948  bool directMoveIsProfitable(const SDValue &Op) const;
949  SDValue LowerINT_TO_FPDirectMove(SDValue Op, SelectionDAG &DAG,
950  const SDLoc &dl) const;
951 
952  SDValue LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
953  const SDLoc &dl) const;
954 
955  SDValue LowerTRUNCATEVector(SDValue Op, SelectionDAG &DAG) const;
956 
957  SDValue getFramePointerFrameIndex(SelectionDAG & DAG) const;
958  SDValue getReturnAddrFrameIndex(SelectionDAG & DAG) const;
959 
960  bool
961  IsEligibleForTailCallOptimization(SDValue Callee,
962  CallingConv::ID CalleeCC,
963  bool isVarArg,
965  SelectionDAG& DAG) const;
966 
967  bool
968  IsEligibleForTailCallOptimization_64SVR4(
969  SDValue Callee,
970  CallingConv::ID CalleeCC,
972  bool isVarArg,
975  SelectionDAG& DAG) const;
976 
977  SDValue EmitTailCallLoadFPAndRetAddr(SelectionDAG &DAG, int SPDiff,
978  SDValue Chain, SDValue &LROpOut,
979  SDValue &FPOpOut,
980  const SDLoc &dl) const;
981 
983  SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
984  SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
985  SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
986  SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
987  SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
988  SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
989  SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
990  SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
992  SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
993  SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
994  SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
995  SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const;
996  SDValue LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, SelectionDAG &DAG) const;
998  SDValue LowerEH_DWARF_CFA(SDValue Op, SelectionDAG &DAG) const;
999  SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
1000  SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
1001  SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
1002  SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
1003  SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
1004  const SDLoc &dl) const;
1005  SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
1006  SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
1007  SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const;
1008  SDValue LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const;
1009  SDValue LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const;
1010  SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
1014  SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
1015  SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
1016  SDValue LowerREM(SDValue Op, SelectionDAG &DAG) const;
1017  SDValue LowerBSWAP(SDValue Op, SelectionDAG &DAG) const;
1018  SDValue LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
1020  SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
1021  SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
1022  SDValue LowerABS(SDValue Op, SelectionDAG &DAG) const;
1023 
1024  SDValue LowerVectorLoad(SDValue Op, SelectionDAG &DAG) const;
1025  SDValue LowerVectorStore(SDValue Op, SelectionDAG &DAG) const;
1026 
1027  SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
1028  CallingConv::ID CallConv, bool isVarArg,
1029  const SmallVectorImpl<ISD::InputArg> &Ins,
1030  const SDLoc &dl, SelectionDAG &DAG,
1031  SmallVectorImpl<SDValue> &InVals) const;
1032  SDValue FinishCall(CallingConv::ID CallConv, const SDLoc &dl,
1033  bool isTailCall, bool isVarArg, bool isPatchPoint,
1034  bool hasNest, SelectionDAG &DAG,
1035  SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
1036  SDValue InFlag, SDValue Chain, SDValue CallSeqStart,
1037  SDValue &Callee, int SPDiff, unsigned NumBytes,
1038  const SmallVectorImpl<ISD::InputArg> &Ins,
1039  SmallVectorImpl<SDValue> &InVals,
1040  ImmutableCallSite CS) const;
1041 
1042  SDValue
1043  LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1044  const SmallVectorImpl<ISD::InputArg> &Ins,
1045  const SDLoc &dl, SelectionDAG &DAG,
1046  SmallVectorImpl<SDValue> &InVals) const override;
1047 
1049  SmallVectorImpl<SDValue> &InVals) const override;
1050 
1051  bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1052  bool isVarArg,
1053  const SmallVectorImpl<ISD::OutputArg> &Outs,
1054  LLVMContext &Context) const override;
1055 
1056  SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1057  const SmallVectorImpl<ISD::OutputArg> &Outs,
1058  const SmallVectorImpl<SDValue> &OutVals,
1059  const SDLoc &dl, SelectionDAG &DAG) const override;
1060 
1061  SDValue extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT,
1062  SelectionDAG &DAG, SDValue ArgVal,
1063  const SDLoc &dl) const;
1064 
1065  SDValue LowerFormalArguments_Darwin(
1066  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1067  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1068  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
1069  SDValue LowerFormalArguments_64SVR4(
1070  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1071  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1072  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
1073  SDValue LowerFormalArguments_32SVR4(
1074  SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1075  const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1076  SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
1077 
1078  SDValue createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff,
1079  SDValue CallSeqStart,
1080  ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
1081  const SDLoc &dl) const;
1082 
1083  SDValue LowerCall_Darwin(SDValue Chain, SDValue Callee,
1084  CallingConv::ID CallConv, bool isVarArg,
1085  bool isTailCall, bool isPatchPoint,
1086  const SmallVectorImpl<ISD::OutputArg> &Outs,
1087  const SmallVectorImpl<SDValue> &OutVals,
1088  const SmallVectorImpl<ISD::InputArg> &Ins,
1089  const SDLoc &dl, SelectionDAG &DAG,
1090  SmallVectorImpl<SDValue> &InVals,
1091  ImmutableCallSite CS) const;
1092  SDValue LowerCall_64SVR4(SDValue Chain, SDValue Callee,
1093  CallingConv::ID CallConv, bool isVarArg,
1094  bool isTailCall, bool isPatchPoint,
1095  const SmallVectorImpl<ISD::OutputArg> &Outs,
1096  const SmallVectorImpl<SDValue> &OutVals,
1097  const SmallVectorImpl<ISD::InputArg> &Ins,
1098  const SDLoc &dl, SelectionDAG &DAG,
1099  SmallVectorImpl<SDValue> &InVals,
1100  ImmutableCallSite CS) const;
1101  SDValue LowerCall_32SVR4(SDValue Chain, SDValue Callee,
1102  CallingConv::ID CallConv, bool isVarArg,
1103  bool isTailCall, bool isPatchPoint,
1104  const SmallVectorImpl<ISD::OutputArg> &Outs,
1105  const SmallVectorImpl<SDValue> &OutVals,
1106  const SmallVectorImpl<ISD::InputArg> &Ins,
1107  const SDLoc &dl, SelectionDAG &DAG,
1108  SmallVectorImpl<SDValue> &InVals,
1109  ImmutableCallSite CS) const;
1110 
1111  SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
1112  SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
1113  SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
1114 
1115  SDValue DAGCombineExtBoolTrunc(SDNode *N, DAGCombinerInfo &DCI) const;
1116  SDValue DAGCombineBuildVector(SDNode *N, DAGCombinerInfo &DCI) const;
1117  SDValue DAGCombineTruncBoolExt(SDNode *N, DAGCombinerInfo &DCI) const;
1118  SDValue combineStoreFPToInt(SDNode *N, DAGCombinerInfo &DCI) const;
1119  SDValue combineFPToIntToFP(SDNode *N, DAGCombinerInfo &DCI) const;
1120  SDValue combineSHL(SDNode *N, DAGCombinerInfo &DCI) const;
1121  SDValue combineSRA(SDNode *N, DAGCombinerInfo &DCI) const;
1122  SDValue combineSRL(SDNode *N, DAGCombinerInfo &DCI) const;
1123  SDValue combineADD(SDNode *N, DAGCombinerInfo &DCI) const;
1124  SDValue combineTRUNCATE(SDNode *N, DAGCombinerInfo &DCI) const;
1125  SDValue combineSetCC(SDNode *N, DAGCombinerInfo &DCI) const;
1126  SDValue combineABS(SDNode *N, DAGCombinerInfo &DCI) const;
1127  SDValue combineVSelect(SDNode *N, DAGCombinerInfo &DCI) const;
1128 
1129  /// ConvertSETCCToSubtract - looks at SETCC that compares ints. It replaces
1130  /// SETCC with integer subtraction when (1) there is a legal way of doing it
1131  /// (2) keeping the result of comparison in GPR has performance benefit.
1132  SDValue ConvertSETCCToSubtract(SDNode *N, DAGCombinerInfo &DCI) const;
1133 
1134  SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
1135  int &RefinementSteps, bool &UseOneConstNR,
1136  bool Reciprocal) const override;
1137  SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
1138  int &RefinementSteps) const override;
1139  unsigned combineRepeatedFPDivisors() const override;
1140 
1141  SDValue
1142  combineElementTruncationToVectorTruncation(SDNode *N,
1143  DAGCombinerInfo &DCI) const;
1144 
1145  /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be
1146  /// handled by the VINSERTH instruction introduced in ISA 3.0. This is
1147  /// essentially any shuffle of v8i16 vectors that just inserts one element
1148  /// from one vector into the other.
1149  SDValue lowerToVINSERTH(ShuffleVectorSDNode *N, SelectionDAG &DAG) const;
1150 
1151  /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be
1152  /// handled by the VINSERTB instruction introduced in ISA 3.0. This is
1153  /// essentially v16i8 vector version of VINSERTH.
1154  SDValue lowerToVINSERTB(ShuffleVectorSDNode *N, SelectionDAG &DAG) const;
1155 
1156  // Return whether the call instruction can potentially be optimized to a
1157  // tail call. This will cause the optimizers to attempt to move, or
1158  // duplicate return instructions to help enable tail call optimizations.
1159  bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
1160  bool hasBitPreservingFPLogic(EVT VT) const override;
1161  bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
1162  }; // end class PPCTargetLowering
1163 
1164  namespace PPC {
1165 
1167  const TargetLibraryInfo *LibInfo);
1168 
1169  } // end namespace PPC
1170 
1171  bool isIntS16Immediate(SDNode *N, int16_t &Imm);
1172  bool isIntS16Immediate(SDValue Op, int16_t &Imm);
1173 
1174 } // end namespace llvm
1175 
1176 #endif // LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG)
G8RC = ADDI_TLSLD_L_ADDR G8RReg, Symbol, Symbol - Op that combines ADDI_TLSLD_L and GET_TLSLD_ADDR un...
x3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS model, produces an ADDI8 instruction tha...
static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG)
static SDValue LowerCallResult(SDValue Chain, SDValue InFlag, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:883
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:110
bool isSelectSupported(SelectSupportKind Kind) const override
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
FastISel * createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo)
Return with a flag operand, matched by &#39;blr&#39;.
Newer FCTI[D,W]UZ floating-point-to-integer conversion instructions for unsigned integers with round ...
SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG)
get_VSPLTI_elt - If this is a build_vector of constants which can be formed by using a vspltis[bhw] i...
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
getPreferredVectorAction - The code we generate when vector types are legalized by promoting the inte...
GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a byte-swapping load instruction.
static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
LLVMContext & Context
QVFPERM = This corresponds to the QPX qvfperm instruction.
This class represents lattice values for constants.
Definition: AllocatorList.h:23
GPRC = address of GLOBAL_OFFSET_TABLE.
G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS model, produces an ADDI8 instruction ...
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:64
static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
The following two target-specific nodes are used for calls through function pointers in the 64-bit SV...
VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded during instruction selection to optimi...
bool isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, unsigned &InsertAtByte, bool &Swap, bool IsLE)
isXXINSERTWMask - Return true if this VECTOR_SHUFFLE can be handled by the XXINSERTW instruction intr...
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition: ValueTypes.h:145
This class represents a function call, abstracting a target machine&#39;s calling convention.
This file contains the declarations for metadata subclasses.
QBRC, CHAIN = QVLFSb CHAIN, Ptr The 4xf32 load used for v4i1 constants.
bool isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, bool &Swap, bool IsLE)
isXXSLDWIShuffleMask - Return true if this is a shuffle mask suitable for a XXSLDWI instruction...
Function Alias Analysis Results
CHAIN = RFEBB CHAIN, State - Return from event-based branch.
VEXTS, ByteWidth - takes an input in VSFRC and produces an output in VSFRC that is sign-extended from...
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:320
bool hasAndNotCompare(SDValue) const override
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) != Y —> (~X & Y) ...
unsigned const TargetRegisterInfo * TRI
Metadata node.
Definition: Metadata.h:863
F(f)
bool convertSelectOfConstantsToMath(EVT VT) const override
Return true if a select of constants (select Cond, C1, C2) should be transformed into simple math ops...
CALL - A direct function call.
CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a BCTRL instruction.
bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a VPKUDUM instruction.
Floating-point-to-interger conversion instructions.
Newer FCFID[US] integer-to-floating-point conversion instructions for unsigned integers and single-pr...
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a VPKUHUM instruction.
GlobalBaseReg - On Darwin, this node represents the result of the mflr at function entry...
bool isCheapToSpeculateCtlz() const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
G8RC = ADDI_TLSGD_L_ADDR G8RReg, Symbol, Symbol - Op that combines ADDI_TLSGD_L and GET_TLS_ADDR unti...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:35
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:742
MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override
EVT is not used in-tree, but is used by out-of-tree target.
This file contains the simple types necessary to represent the attributes associated with functions a...
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
The memory access is dereferenceable (i.e., doesn&#39;t trap).
static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG)
Direct move from a GPR to a VSX register (algebraic)
x3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS model, produces an ADDI8 instruction t...
bool shouldInsertFencesForAtomic(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic...
ATOMIC_CMP_SWAP - the exact same as the target-independent nodes except they ensure that the compare ...
QVALIGNI = This corresponds to the QPX qvaligni instruction.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
AtomicOrdering
Atomic ordering for LLVM&#39;s memory model.
Context object for machine code objects.
Definition: MCContext.h:62
static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG)
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override
Returns true if an argument of type Ty needs to be passed in a contiguous block of registers in calli...
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:42
bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize)
isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand specifies a splat of a singl...
This is a fast-path instruction selection class that generates poor code and doesn&#39;t support illegal ...
Definition: FastISel.h:66
An SDNode for Power9 vector absolute value difference.
static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG)
CHAIN = BDNZ CHAIN, DESTBB - These are used to create counter-based loops.
R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction.
G8RC = ADDIS_TLSGD_HA x2, Symbol - For the general-dynamic TLS model, produces an ADDIS8 instruction ...
This contains information for each constraint that we are lowering.
CHAIN = STXVD2X CHAIN, VSRC, Ptr - Occurs only for little endian.
virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const
bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for a VRGL* instruction with the ...
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to compute an offset from native ...
bool isXXBRWShuffleMask(ShuffleVectorSDNode *N)
isXXBRWShuffleMask - Return true if this is a shuffle mask suitable for a XXBRW instruction.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
VSRC, CHAIN = LXVD2X_LE CHAIN, Ptr - Occurs only for little endian.
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG)
FSEL - Traditional three-operand fsel node.
bool isXXBRQShuffleMask(ShuffleVectorSDNode *N)
isXXBRQShuffleMask - Return true if this is a shuffle mask suitable for a XXBRQ instruction.
Machine Value Type.
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls
static Value * LowerBSWAP(LLVMContext &Context, Value *V, Instruction *IP)
Emit the code to lower bswap of V before the specified instruction IP.
unsigned getScalarSizeInBits() const
This is an important base class in LLVM.
Definition: Constant.h:41
G8RC = ADDIS_DTPREL_HA x3, Symbol - For the local-dynamic TLS model, produces an ADDIS8 instruction t...
SExtVElems, takes an input vector of a smaller type and sign extends to an output vector of a larger ...
VECINSERT - The PPC vector insert instruction.
Direct move from a VSX register to a GPR.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Definition: ISDOpcodes.h:940
CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a MTCTR instruction.
unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize, SelectionDAG &DAG)
getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the specified isSplatShuffleMask...
STFIWX - The STFIWX instruction.
FCFID - The FCFID instruction, taking an f64 operand and producing and f64 value containing the FP re...
Store scalar integers from VSR.
bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a VPKUWUM instruction.
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
CHAIN = CLRBHRB CHAIN - Clear branch history rolling buffer.
GPRC, CHAIN = LFIWAX CHAIN, Ptr - This is a floating-point load which sign-extends from a 32-bit inte...
G8RC = ADDIS_TLSLD_HA x2, Symbol - For the local-dynamic TLS model, produces an ADDIS8 instruction th...
static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG, const X86Subtarget &Subtarget)
G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec TLS model, produces a LD instruction ...
QVESPLATI = This corresponds to the QPX qvesplati instruction.
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
lazy value info
Common code between 32-bit and 64-bit PowerPC targets.
int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift amount, otherwise return -1...
bool supportSplitCSR(MachineFunction *MF) const override
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
Extended Value Type.
Definition: ValueTypes.h:33
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
This structure contains all information that is necessary for lowering calls.
This class contains a discriminated union of information about pointers in memory operands...
bool isXXBRDShuffleMask(ShuffleVectorSDNode *N)
isXXBRDShuffleMask - Return true if this is a shuffle mask suitable for a XXBRD instruction.
CHAIN,FLAG = BCTRL(CHAIN, ADDR, INFLAG) - The combination of a bctrl instruction and the TOC reload r...
GPRC, CHAIN = LXSIZX, CHAIN, Ptr, ByteWidth - This is a load of an integer smaller than 64 bits into ...
Extract a subvector from unsigned integer vector and convert to FP.
QBFLT = Access the underlying QPX floating-point boolean representation.
EXTSWSLI = The PPC extswsli instruction, which does an extend-sign word and shift left immediate...
static SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG)
x3 = GET_TLSLD_ADDR x3, Symbol - For the local-dynamic TLS model, produces a call to __tls_get_addr(s...
GPRC = TOC_ENTRY GA, TOC Loads the entry for GA from the TOC, where the TOC base is given by the last...
XXSPLT - The PPC VSX splat instructions.
VECSHL - The PPC vector shift left instruction.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:212
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:221
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:839
G8RC = ADD_TLS G8RReg, Symbol - Used by the initial-exec TLS model, produces an ADD instruction that ...
bool convertSetCCLogicToBitwiseLogic(EVT VT) const override
Use bitwise logic to make pairs of compares more efficient.
Provides information about what library functions are available for the current target.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:643
CHAIN = SC CHAIN, Imm128 - System call.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
x3 = GET_TLS_ADDR x3, Symbol - For the general-dynamic TLS model, produces a call to __tls_get_addr(s...
bool isCheapToSpeculateCttz() const override
Return true if it is cheap to speculate a call to intrinsic cttz.
static const int FIRST_TARGET_MEMORY_OPCODE
FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations which do not reference a specific me...
Definition: ISDOpcodes.h:890
Represents one node in the SelectionDAG.
VPERM - The PPC VPERM Instruction.
bool isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, bool &Swap, bool IsLE)
isXXPERMDIShuffleMask - Return true if this is a shuffle mask suitable for a XXPERMDI instruction...
static bool Enabled
Definition: Statistic.cpp:50
const Function & getFunction() const
Return the LLVM function that this machine code represents.
STXSIX - The STXSI[bh]X instruction.
i1 = ANDIo_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the eq or gt bit of CR0 after execu...
G8RC = ADDIS_GOT_TPREL_HA x2, Symbol - Used by the initial-exec TLS model, produces an ADDIS8 instruc...
Class for arbitrary precision integers.
Definition: APInt.h:69
QVGPCI = This corresponds to the QPX qvgpci instruction.
amdgpu Simplify well known AMD library false FunctionCallee Callee
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
The combination of sra[wd]i and addze used to implemented signed integer division by a power of 2...
Flags
Flags values. These may be or&#39;d together.
GPRC = address of GLOBAL_OFFSET_TABLE.
Representation of each machine instruction.
Definition: MachineInstr.h:63
GPRC, CHAIN = MFBHRBE CHAIN, Entry, Dummy - Move from branch history rolling buffer entry...
unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override
SelectSupportKind
Enum that describes what type of support for selects the target has.
Reciprocal estimate instructions (unary FP ops).
bool isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for a VMRGEW or VMRGOW instructi...
F8RC = MFFS - This moves the FPSCR (not modeled) into the register.
Establish a view to a call site for examination.
Definition: CallSite.h:892
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
Direct move from a GPR to a VSX register (zero)
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
The CMPB instruction (takes two operands of i32 or i64).
The memory access always returns the same value (or traps).
CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a byte-swapping store instruction. ...
uint32_t Size
Definition: Profile.cpp:46
TC_RETURN - A tail call return.
const unsigned Kind
VSRC, CHAIN = XXSWAPD CHAIN, VSRC - Occurs only for little endian.
bool isCtlzFast() const override
Return true if ctlz instruction is fast.
XXREVERSE - The PPC VSX reverse instruction.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
Direct move of 2 consective GPR to a VSX register.
CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This corresponds to the COND_BRANCH pseudo ...
static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
These nodes represent PPC shifts.
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
Extract a subvector from signed integer vector and convert to FP.
RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP* instructions.
FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64 operand, producing an f64 value...
Hi/Lo - These represent the high and low 16-bit parts of a global address respectively.
bool isXXBRHShuffleMask(ShuffleVectorSDNode *N)
isXXBRHShuffleMask - Return true if this is a shuffle mask suitable for a XXBRH instruction.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding towards zero.
An SDNode for swaps that are not associated with any loads/stores and thereby have no chain...
RESVEC, OUTFLAG = VCMPo(LHS, RHS, OPC) - Represents one of the altivec VCMP*o instructions.
GPRC, CHAIN = LFIWZX CHAIN, Ptr - This is a floating-point load which zero-extends from a 32-bit inte...
static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
bool isIntS16Immediate(SDNode *N, int16_t &Imm)
isIntS16Immediate - This method tests to see if the node is either a 32-bit or 64-bit immediate...
bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for a VRGH* instruction with the ...
XXPERMDI - The PPC XXPERMDI instruction.
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:220
This file describes how to lower LLVM code to machine code.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:920
int isQVALIGNIShuffleMask(SDNode *N)
If this is a qvaligni shuffle mask, return the shift amount, otherwise return -1. ...