LLVM  3.7.0
AArch64ISelLowering.h
Go to the documentation of this file.
1 //==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the interfaces that AArch64 uses to lower LLVM code into a
11 // selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
16 #define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
17 
20 #include "llvm/IR/CallingConv.h"
21 #include "llvm/IR/Instruction.h"
23 
24 namespace llvm {
25 
26 namespace AArch64ISD {
27 
28 enum NodeType : unsigned {
30  WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
31  CALL, // Function call.
32 
33  // Produces the full sequence of instructions for getting the thread pointer
34  // offset of a variable into X0, using the TLSDesc model.
36  ADRP, // Page address of a TargetGlobalAddress operand.
37  ADDlow, // Add the low 12 bits of a TargetGlobalAddress operand.
38  LOADgot, // Load from automatically generated descriptor (e.g. Global
39  // Offset Table, TLS record).
40  RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
41  BRCOND, // Conditional branch instruction; "b.cond".
43  FCSEL, // Conditional move instruction.
44  CSINV, // Conditional select invert.
45  CSNEG, // Conditional select negate.
46  CSINC, // Conditional select increment.
47 
48  // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
49  // ELF.
51  ADC,
52  SBC, // adc, sbc instructions
53 
54  // Arithmetic instructions which write flags.
60 
61  // Floating point comparison
63 
64  // Floating point max and min instructions.
67 
68  // Scalar extract
70 
71  // Scalar-to-vector duplication
72  DUP,
77 
78  // Vector immedate moves
86 
87  // Vector immediate ops
90 
91  // Vector bit select: similar to ISD::VSELECT but not all bits within an
92  // element must be identical.
93  BSL,
94 
95  // Vector arithmetic negation
96  NEG,
97 
98  // Vector shuffles
109 
110  // Vector shift by scalar
114 
115  // Vector shift by scalar (again)
121 
122  // Vector comparisons
131 
132  // Vector zero comparisons
143 
144  // Vector across-lanes addition
145  // Only the lower result lane is defined.
148 
149  // Vector across-lanes min/max
150  // Only the lower result lane is defined.
155 
156  // Vector bitwise negation
158 
159  // Vector bitwise selection
161 
162  // Compare-and-branch
167 
168  // Tail calls
170 
171  // Custom prefetch handling
173 
174  // {s|u}int to FP within a FP register.
177 
178  /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
179  /// world w.r.t vectors; which causes additional REV instructions to be
180  /// generated to compensate for the byte-swapping. But sometimes we do
181  /// need to re-interpret the data in SIMD vector registers in big-endian
182  /// mode without emitting such REV instructions.
184 
187 
188  // NEON Load/Store with post-increment base updates
212 };
213 
214 } // end namespace AArch64ISD
215 
216 class AArch64Subtarget;
217 class AArch64TargetMachine;
218 
220  bool RequireStrictAlign;
221 
222 public:
223  explicit AArch64TargetLowering(const TargetMachine &TM,
224  const AArch64Subtarget &STI);
225 
226  /// Selects the correct CCAssignFn for a given CallingConvention value.
227  CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
228 
229  /// computeKnownBitsForTargetNode - Determine which of the bits specified in
230  /// Mask are known to be either zero or one and return them in the
231  /// KnownZero/KnownOne bitsets.
232  void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero,
233  APInt &KnownOne, const SelectionDAG &DAG,
234  unsigned Depth = 0) const override;
235 
236  MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
237 
238  /// allowsMisalignedMemoryAccesses - Returns true if the target allows
239  /// unaligned memory accesses of the specified type.
240  bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0,
241  unsigned Align = 1,
242  bool *Fast = nullptr) const override {
243  if (RequireStrictAlign)
244  return false;
245  // FIXME: True for Cyclone, but not necessary others.
246  if (Fast)
247  *Fast = true;
248  return true;
249  }
250 
251  /// LowerOperation - Provide custom lowering hooks for some operations.
252  SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
253 
254  const char *getTargetNodeName(unsigned Opcode) const override;
255 
256  SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
257 
258  /// getFunctionAlignment - Return the Log2 alignment of this function.
259  unsigned getFunctionAlignment(const Function *F) const;
260 
261  /// Returns true if a cast between SrcAS and DestAS is a noop.
262  bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
263  // Addrspacecasts are always noops.
264  return true;
265  }
266 
267  /// createFastISel - This method returns a target specific FastISel object,
268  /// or null if the target does not support "fast" ISel.
270  const TargetLibraryInfo *libInfo) const override;
271 
272  bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
273 
274  bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
275 
276  /// isShuffleMaskLegal - Return true if the given shuffle mask can be
277  /// codegen'd directly, or if it should be stack expanded.
278  bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const override;
279 
280  /// getSetCCResultType - Return the ISD::SETCC ValueType
281  EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
282  EVT VT) const override;
283 
285 
287  MachineBasicBlock *BB) const;
288 
291  MachineBasicBlock *MBB) const override;
292 
293  bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
294  unsigned Intrinsic) const override;
295 
296  bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
297  bool isTruncateFree(EVT VT1, EVT VT2) const override;
298 
299  bool isProfitableToHoist(Instruction *I) const override;
300 
301  bool isZExtFree(Type *Ty1, Type *Ty2) const override;
302  bool isZExtFree(EVT VT1, EVT VT2) const override;
303  bool isZExtFree(SDValue Val, EVT VT2) const override;
304 
305  bool hasPairedLoad(Type *LoadedType,
306  unsigned &RequiredAligment) const override;
307  bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
308 
309  unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
310 
313  ArrayRef<unsigned> Indices,
314  unsigned Factor) const override;
316  unsigned Factor) const override;
317 
318  bool isLegalAddImmediate(int64_t) const override;
319  bool isLegalICmpImmediate(int64_t) const override;
320 
321  EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
322  bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
323  MachineFunction &MF) const override;
324 
325  /// isLegalAddressingMode - Return true if the addressing mode represented
326  /// by AM is legal for this target, for a load/store of the specified type.
327  bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
328  unsigned AS) const override;
329 
330  /// \brief Return the cost of the scaling factor used in the addressing
331  /// mode represented by AM for this target, for a load/store
332  /// of the specified type.
333  /// If the AM is supported, the return value must be >= 0.
334  /// If the AM is not supported, it returns a negative value.
335  int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
336  unsigned AS) const override;
337 
338  /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
339  /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
340  /// expanded to FMAs when this method returns true, otherwise fmuladd is
341  /// expanded to fmul + fadd.
342  bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
343 
344  const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
345 
346  /// \brief Returns false if N is a bit extraction pattern of (X >> C) & Mask.
347  bool isDesirableToCommuteWithShift(const SDNode *N) const override;
348 
349  /// \brief Returns true if it is beneficial to convert a load of a constant
350  /// to just the constant itself.
352  Type *Ty) const override;
353 
354  bool hasLoadLinkedStoreConditional() const override;
355  Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
356  AtomicOrdering Ord) const override;
358  Value *Addr, AtomicOrdering Ord) const override;
359 
360  bool shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
361  bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
363  shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
364 
365  bool useLoadStackGuardNode() const override;
367  getPreferredVectorAction(EVT VT) const override;
368 
369 private:
370  bool isExtFreeImpl(const Instruction *Ext) const override;
371 
372  /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
373  /// make the right decision when generating code for different targets.
374  const AArch64Subtarget *Subtarget;
375 
376  void addTypeForNEON(EVT VT, EVT PromotedBitwiseVT);
377  void addDRTypeForNEON(MVT VT);
378  void addQRTypeForNEON(MVT VT);
379 
380  SDValue
381  LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
383  SelectionDAG &DAG,
384  SmallVectorImpl<SDValue> &InVals) const override;
385 
386  SDValue LowerCall(CallLoweringInfo & /*CLI*/,
387  SmallVectorImpl<SDValue> &InVals) const override;
388 
389  SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
390  CallingConv::ID CallConv, bool isVarArg,
393  bool isThisReturn, SDValue ThisVal) const;
394 
395  bool isEligibleForTailCallOptimization(
396  SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
397  bool isCalleeStructRet, bool isCallerStructRet,
399  const SmallVectorImpl<SDValue> &OutVals,
400  const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
401 
402  /// Finds the incoming stack arguments which overlap the given fixed stack
403  /// object and incorporates their load into the current chain. This prevents
404  /// an upcoming store from clobbering the stack argument before it's used.
405  SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
406  MachineFrameInfo *MFI, int ClobberedFI) const;
407 
408  bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
409 
410  bool IsTailCallConvention(CallingConv::ID CallCC) const;
411 
412  void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, SDLoc DL,
413  SDValue &Chain) const;
414 
415  bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
416  bool isVarArg,
418  LLVMContext &Context) const override;
419 
420  SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
422  const SmallVectorImpl<SDValue> &OutVals, SDLoc DL,
423  SelectionDAG &DAG) const override;
424 
425  SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
426  SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
427  SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
428  SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
429  SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, SDLoc DL,
430  SelectionDAG &DAG) const;
431  SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
432  SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
433  SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
434  SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
435  SDValue LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, SDValue RHS,
436  SDValue TVal, SDValue FVal, SDLoc dl,
437  SelectionDAG &DAG) const;
438  SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
439  SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
440  SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
441  SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
442  SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
443  SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
444  SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
445  SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
446  SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
447  SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
448  SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
449  SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
450  SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
451  SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
452  SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
453  SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
454  SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
455  SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
456  SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
457  SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
458  SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
459  SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
460  RTLIB::Libcall Call) const;
461  SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
462  SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
463  SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
464  SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
465  SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
466  SDValue LowerVectorAND(SDValue Op, SelectionDAG &DAG) const;
467  SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
468  SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
469  SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
470 
471  SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
472  std::vector<SDNode *> *Created) const override;
473  bool combineRepeatedFPDivisors(unsigned NumUsers) const override;
474 
475  ConstraintType getConstraintType(StringRef Constraint) const override;
476  unsigned getRegisterByName(const char* RegName, EVT VT,
477  SelectionDAG &DAG) const override;
478 
479  /// Examine constraint string and operand type and determine a weight value.
480  /// The operand object must already have been set up with the operand type.
482  getSingleConstraintMatchWeight(AsmOperandInfo &info,
483  const char *constraint) const override;
484 
485  std::pair<unsigned, const TargetRegisterClass *>
486  getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
487  StringRef Constraint, MVT VT) const override;
488  void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
489  std::vector<SDValue> &Ops,
490  SelectionDAG &DAG) const override;
491 
492  unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
493  if (ConstraintCode == "Q")
495  // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
496  // followed by llvm_unreachable so we'll leave them unimplemented in
497  // the backend for now.
498  return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
499  }
500 
501  bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
502  bool mayBeEmittedAsTailCall(CallInst *CI) const override;
503  bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
504  ISD::MemIndexedMode &AM, bool &IsInc,
505  SelectionDAG &DAG) const;
506  bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
508  SelectionDAG &DAG) const override;
509  bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
510  SDValue &Offset, ISD::MemIndexedMode &AM,
511  SelectionDAG &DAG) const override;
512 
513  void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
514  SelectionDAG &DAG) const override;
515 
516  bool functionArgumentNeedsConsecutiveRegisters(Type *Ty,
517  CallingConv::ID CallConv,
518  bool isVarArg) const override;
519 };
520 
521 namespace AArch64 {
523  const TargetLibraryInfo *libInfo);
524 } // end namespace AArch64
525 
526 } // end namespace llvm
527 
528 #endif
unsigned getFunctionAlignment(const Function *F) const
getFunctionAlignment - Return the Log2 alignment of this function.
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:724
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:104
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(EVT VT) const override
Return the preferred vector type legalization action.
CallInst - This class represents a function call, abstracting a target machine's calling convention...
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const override
Return the cost of the scaling factor used in the addressing mode represented by AM for this target...
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
ShuffleVectorInst - This instruction constructs a fixed permutation of two input vectors.
F(f)
LoadInst - an instruction for reading from memory.
Definition: Instructions.h:177
AtomicRMWInst - an instruction that atomically reads a memory location, combines it with another valu...
Definition: Instructions.h:674
MachineBasicBlock * EmitF128CSEL(MachineInstr *MI, MachineBasicBlock *BB) const
bool lowerInterleavedLoad(LoadInst *LI, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor) const override
Lower an interleaved load into a ldN intrinsic.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
AtomicRMWExpansionKind
Enum that specifies what a AtomicRMWInst is expanded to, if at all.
lazy value info
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:517
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the ISD::SETCC ValueType
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override
Returns true if a cast between SrcAS and DestAS is a noop.
AtomicOrdering
Definition: Instructions.h:38
SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition: FastISel.h:30
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace=0, unsigned Align=1, bool *Fast=nullptr) const override
allowsMisalignedMemoryAccesses - Returns true if the target allows unaligned memory accesses of the s...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: ArrayRef.h:31
StoreInst - an instruction for storing to memory.
Definition: Instructions.h:316
Natural vector cast.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:804
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
bool isFPImmLegal(const APFloat &Imm, EVT VT) const override
Returns true if the target can instruction select the specified FP immediate natively.
TargetLoweringBase::AtomicRMWExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all...
void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero, APInt &KnownOne, const SelectionDAG &DAG, unsigned Depth=0) const override
computeKnownBitsForTargetNode - Determine which of the bits specified in Mask are known to be either ...
bool hasPairedLoad(Type *LoadedType, unsigned &RequiredAligment) const override
Return true if the target supplies and combines to a paired load two loaded values of type LoadedType...
bool isLegalICmpImmediate(int64_t) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
Return true if it's free to truncate a value of type Ty1 to type Ty2.
A self-contained host- and target-independent arbitrary-precision floating-point software implementat...
Definition: APFloat.h:122
bool hasLoadLinkedStoreConditional() const override
True if AtomicExpandPass should use emitLoadLinked/emitStoreConditional and expand AtomicCmpXchgInst...
bool isZExtFree(Type *Ty1, Type *Ty2) const override
Return true if any actual instruction that defines a value of type Ty1 implicitly zero-extends the va...
MVT - Machine Value Type.
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:41
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
bool isDesirableToCommuteWithShift(const SDNode *N) const override
Returns false if N is a bit extraction pattern of (X >> C) & Mask.
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself...
EVT - Extended Value Type.
Definition: ValueTypes.h:31
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
CCState - This class holds information needed while lowering arguments and return values...
EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, MachineFunction &MF) const override
Returns the target specific optimal type for load and store operations as a result of memset...
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:179
MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override
EVT is not used in-tree, but is used by out-of-tree target.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const
AArch64TargetLowering(const TargetMachine &TM, const AArch64Subtarget &STI)
Provides information about what library functions are available for the current target.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
static const int FIRST_TARGET_MEMORY_OPCODE
FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations which do not reference a specific me...
Definition: ISDOpcodes.h:731
Represents one node in the SelectionDAG.
bool isProfitableToHoist(Instruction *I) const override
Check if it is profitable to hoist instruction in then/else to if.
static cl::opt< AlignMode > Align(cl::desc("Load/store alignment support"), cl::Hidden, cl::init(NoStrictAlign), cl::values(clEnumValN(StrictAlign,"aarch64-strict-align","Disallow all unaligned memory accesses"), clEnumValN(NoStrictAlign,"aarch64-no-strict-align","Allow unaligned memory accesses"), clEnumValEnd))
bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const override
Lower an interleaved store into a stN intrinsic.
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const
Selects the correct CCAssignFn for a given CallingConvention value.
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns true if the given (atomic) store should be expanded by the IR-level AtomicExpand pass into an...
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
Class for arbitrary precision integers.
Definition: APInt.h:73
AddrMode
ARM Addressing Modes.
Definition: ARMBaseInfo.h:235
Representation of each machine instruction.
Definition: MachineInstr.h:51
bool shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns true if the given (atomic) load should be expanded by the IR-level AtomicExpand pass into a l...
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override
createFastISel - This method returns a target specific FastISel object, or null if the target does no...
#define I(x, y, z)
Definition: MD5.cpp:54
#define N
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:42
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
Value * emitLoadLinked(IRBuilder<> &Builder, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type...
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const override
getTgtMemIntrinsic - Represent NEON load and store intrinsics as MemIntrinsicNodes.
bool useLoadStackGuardNode() const override
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
bool isLegalAddImmediate(int64_t) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
LLVM Value Representation.
Definition: Value.h:69
Value * emitStoreConditional(IRBuilder<> &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
Primary interface to the complete machine description for the target machine.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:40
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
bool isShuffleMaskLegal(const SmallVectorImpl< int > &M, EVT VT) const override
isShuffleMaskLegal - Return true if the given shuffle mask can be codegen'd directly, or if it should be stack expanded.
This file describes how to lower LLVM code to machine code.
bool isFMAFasterThanFMulAndFAdd(EVT VT) const override
isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster than a pair of fmul and fadd i...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:761