LLVM  4.0.0
AArch64ISelLowering.h
Go to the documentation of this file.
1 //==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the interfaces that AArch64 uses to lower LLVM code into a
11 // selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
16 #define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
17 
18 #include "AArch64.h"
21 #include "llvm/IR/CallingConv.h"
22 #include "llvm/IR/Instruction.h"
24 
25 namespace llvm {
26 
27 namespace AArch64ISD {
28 
29 enum NodeType : unsigned {
31  WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
32  CALL, // Function call.
33 
34  // Produces the full sequence of instructions for getting the thread pointer
35  // offset of a variable into X0, using the TLSDesc model.
37  ADRP, // Page address of a TargetGlobalAddress operand.
38  ADDlow, // Add the low 12 bits of a TargetGlobalAddress operand.
39  LOADgot, // Load from automatically generated descriptor (e.g. Global
40  // Offset Table, TLS record).
41  RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
42  BRCOND, // Conditional branch instruction; "b.cond".
44  FCSEL, // Conditional move instruction.
45  CSINV, // Conditional select invert.
46  CSNEG, // Conditional select negate.
47  CSINC, // Conditional select increment.
48 
49  // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
50  // ELF.
52  ADC,
53  SBC, // adc, sbc instructions
54 
55  // Arithmetic instructions which write flags.
61 
62  // Conditional compares. Operands: left,right,falsecc,cc,flags
66 
67  // Floating point comparison
69 
70  // Scalar extract
72 
73  // Scalar-to-vector duplication
74  DUP,
79 
80  // Vector immedate moves
88 
89  // Vector immediate ops
92 
93  // Vector bit select: similar to ISD::VSELECT but not all bits within an
94  // element must be identical.
95  BSL,
96 
97  // Vector arithmetic negation
98  NEG,
99 
100  // Vector shuffles
111 
112  // Vector shift by scalar
116 
117  // Vector shift by scalar (again)
123 
124  // Vector comparisons
133 
134  // Vector zero comparisons
145 
146  // Vector across-lanes addition
147  // Only the lower result lane is defined.
150 
151  // Vector across-lanes min/max
152  // Only the lower result lane is defined.
157 
158  // Vector bitwise negation
160 
161  // Vector bitwise selection
163 
164  // Compare-and-branch
169 
170  // Tail calls
172 
173  // Custom prefetch handling
175 
176  // {s|u}int to FP within a FP register.
179 
180  /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
181  /// world w.r.t vectors; which causes additional REV instructions to be
182  /// generated to compensate for the byte-swapping. But sometimes we do
183  /// need to re-interpret the data in SIMD vector registers in big-endian
184  /// mode without emitting such REV instructions.
186 
189 
190  // Reciprocal estimates and steps.
193 
194  // NEON Load/Store with post-increment base updates
218 };
219 
220 } // end namespace AArch64ISD
221 
222 namespace {
223 
224 // Any instruction that defines a 32-bit result zeros out the high half of the
225 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
226 // be copying from a truncate. But any other 32-bit operation will zero-extend
227 // up to 64 bits.
228 // FIXME: X86 also checks for CMOV here. Do we need something similar?
229 static inline bool isDef32(const SDNode &N) {
230  unsigned Opc = N.getOpcode();
231  return Opc != ISD::TRUNCATE && Opc != TargetOpcode::EXTRACT_SUBREG &&
232  Opc != ISD::CopyFromReg;
233 }
234 
235 } // end anonymous namespace
236 
237 class AArch64Subtarget;
238 class AArch64TargetMachine;
239 
241 public:
242  explicit AArch64TargetLowering(const TargetMachine &TM,
243  const AArch64Subtarget &STI);
244 
245  /// Selects the correct CCAssignFn for a given CallingConvention value.
246  CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
247 
248  /// Selects the correct CCAssignFn for a given CallingConvention value.
250 
251  /// Determine which of the bits specified in Mask are known to be either zero
252  /// or one and return them in the KnownZero/KnownOne bitsets.
253  void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero,
254  APInt &KnownOne, const SelectionDAG &DAG,
255  unsigned Depth = 0) const override;
256 
257  MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
258 
259  /// Returns true if the target allows unaligned memory accesses of the
260  /// specified type.
261  bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0,
262  unsigned Align = 1,
263  bool *Fast = nullptr) const override;
264 
265  /// Provide custom lowering hooks for some operations.
266  SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
267 
268  const char *getTargetNodeName(unsigned Opcode) const override;
269 
270  SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
271 
272  /// Returns true if a cast between SrcAS and DestAS is a noop.
273  bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
274  // Addrspacecasts are always noops.
275  return true;
276  }
277 
278  /// This method returns a target specific FastISel object, or null if the
279  /// target does not support "fast" ISel.
281  const TargetLibraryInfo *libInfo) const override;
282 
283  bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
284 
285  bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
286 
287  /// Return true if the given shuffle mask can be codegen'd directly, or if it
288  /// should be stack expanded.
289  bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const override;
290 
291  /// Return the ISD::SETCC ValueType.
293  EVT VT) const override;
294 
296 
298  MachineBasicBlock *BB) const;
299 
302  MachineBasicBlock *MBB) const override;
303 
304  bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
305  unsigned Intrinsic) const override;
306 
307  bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
308  bool isTruncateFree(EVT VT1, EVT VT2) const override;
309 
310  bool isProfitableToHoist(Instruction *I) const override;
311 
312  bool isZExtFree(Type *Ty1, Type *Ty2) const override;
313  bool isZExtFree(EVT VT1, EVT VT2) const override;
314  bool isZExtFree(SDValue Val, EVT VT2) const override;
315 
316  bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
317 
318  unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
319 
322  ArrayRef<unsigned> Indices,
323  unsigned Factor) const override;
325  unsigned Factor) const override;
326 
327  bool isLegalAddImmediate(int64_t) const override;
328  bool isLegalICmpImmediate(int64_t) const override;
329 
330  EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
331  bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
332  MachineFunction &MF) const override;
333 
334  /// Return true if the addressing mode represented by AM is legal for this
335  /// target, for a load/store of the specified type.
336  bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
337  unsigned AS) const override;
338 
339  /// \brief Return the cost of the scaling factor used in the addressing
340  /// mode represented by AM for this target, for a load/store
341  /// of the specified type.
342  /// If the AM is supported, the return value must be >= 0.
343  /// If the AM is not supported, it returns a negative value.
344  int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
345  unsigned AS) const override;
346 
347  /// Return true if an FMA operation is faster than a pair of fmul and fadd
348  /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
349  /// returns true, otherwise fmuladd is expanded to fmul + fadd.
350  bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
351 
352  const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
353 
354  /// \brief Returns false if N is a bit extraction pattern of (X >> C) & Mask.
355  bool isDesirableToCommuteWithShift(const SDNode *N) const override;
356 
357  /// \brief Returns true if it is beneficial to convert a load of a constant
358  /// to just the constant itself.
360  Type *Ty) const override;
361 
362  Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
363  AtomicOrdering Ord) const override;
365  Value *Addr, AtomicOrdering Ord) const override;
366 
367  void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override;
368 
370  shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
371  bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
373  shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
374 
375  bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
376 
377  bool useLoadStackGuardNode() const override;
379  getPreferredVectorAction(EVT VT) const override;
380 
381  /// If the target has a standard location for the stack protector cookie,
382  /// returns the address of that location. Otherwise, returns nullptr.
383  Value *getIRStackGuard(IRBuilder<> &IRB) const override;
384 
385  /// If the target has a standard location for the unsafe stack pointer,
386  /// returns the address of that location. Otherwise, returns nullptr.
387  Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const override;
388 
389  /// If a physical register, this returns the register that receives the
390  /// exception address on entry to an EH pad.
391  unsigned
392  getExceptionPointerRegister(const Constant *PersonalityFn) const override {
393  // FIXME: This is a guess. Has this been defined yet?
394  return AArch64::X0;
395  }
396 
397  /// If a physical register, this returns the register that receives the
398  /// exception typeid on entry to a landing pad.
399  unsigned
400  getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
401  // FIXME: This is a guess. Has this been defined yet?
402  return AArch64::X1;
403  }
404 
405  bool isIntDivCheap(EVT VT, AttributeSet Attr) const override;
406 
407  bool isCheapToSpeculateCttz() const override {
408  return true;
409  }
410 
411  bool isCheapToSpeculateCtlz() const override {
412  return true;
413  }
414 
415  bool hasAndNotCompare(SDValue) const override {
416  // 'bics'
417  return true;
418  }
419 
420  bool hasBitPreservingFPLogic(EVT VT) const override {
421  // FIXME: Is this always true? It should be true for vectors at least.
422  return VT == MVT::f32 || VT == MVT::f64;
423  }
424 
425  bool supportSplitCSR(MachineFunction *MF) const override {
427  MF->getFunction()->hasFnAttribute(Attribute::NoUnwind);
428  }
429  void initializeSplitCSR(MachineBasicBlock *Entry) const override;
431  MachineBasicBlock *Entry,
432  const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
433 
434  bool supportSwiftError() const override {
435  return true;
436  }
437 
438 private:
439  bool isExtFreeImpl(const Instruction *Ext) const override;
440 
441  /// Keep a pointer to the AArch64Subtarget around so that we can
442  /// make the right decision when generating code for different targets.
443  const AArch64Subtarget *Subtarget;
444 
445  void addTypeForNEON(MVT VT, MVT PromotedBitwiseVT);
446  void addDRTypeForNEON(MVT VT);
447  void addQRTypeForNEON(MVT VT);
448 
449  SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
450  bool isVarArg,
452  const SDLoc &DL, SelectionDAG &DAG,
453  SmallVectorImpl<SDValue> &InVals) const override;
454 
455  SDValue LowerCall(CallLoweringInfo & /*CLI*/,
456  SmallVectorImpl<SDValue> &InVals) const override;
457 
458  SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
459  CallingConv::ID CallConv, bool isVarArg,
461  const SDLoc &DL, SelectionDAG &DAG,
462  SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
463  SDValue ThisVal) const;
464 
465  SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
466 
467  bool isEligibleForTailCallOptimization(
468  SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
470  const SmallVectorImpl<SDValue> &OutVals,
471  const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
472 
473  /// Finds the incoming stack arguments which overlap the given fixed stack
474  /// object and incorporates their load into the current chain. This prevents
475  /// an upcoming store from clobbering the stack argument before it's used.
476  SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
477  MachineFrameInfo &MFI, int ClobberedFI) const;
478 
479  bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
480 
481  void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &DL,
482  SDValue &Chain) const;
483 
484  bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
485  bool isVarArg,
487  LLVMContext &Context) const override;
488 
489  SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
491  const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
492  SelectionDAG &DAG) const override;
493 
494  SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
495  SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
496  SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
497  SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
498  SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL,
499  SelectionDAG &DAG) const;
500  SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
501  SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
502  SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
503  SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
504  SDValue LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, SDValue RHS,
505  SDValue TVal, SDValue FVal, const SDLoc &dl,
506  SelectionDAG &DAG) const;
507  SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
508  SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
509  SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
510  SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
511  SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
512  SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
513  SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
514  SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
515  SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
516  SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
517  SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
518  SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
519  SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
520  SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
521  SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
522  SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
523  SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
524  SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
525  SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
526  SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
527  SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
528  SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
529  RTLIB::Libcall Call) const;
530  SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
531  SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
532  SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
533  SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
534  SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
535  SDValue LowerVectorAND(SDValue Op, SelectionDAG &DAG) const;
536  SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
537  SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
538  SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
539 
540  SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
541  std::vector<SDNode *> *Created) const override;
542  SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
543  int &ExtraSteps, bool &UseOneConst,
544  bool Reciprocal) const override;
545  SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
546  int &ExtraSteps) const override;
547  unsigned combineRepeatedFPDivisors() const override;
548 
549  ConstraintType getConstraintType(StringRef Constraint) const override;
550  unsigned getRegisterByName(const char* RegName, EVT VT,
551  SelectionDAG &DAG) const override;
552 
553  /// Examine constraint string and operand type and determine a weight value.
554  /// The operand object must already have been set up with the operand type.
556  getSingleConstraintMatchWeight(AsmOperandInfo &info,
557  const char *constraint) const override;
558 
559  std::pair<unsigned, const TargetRegisterClass *>
560  getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
561  StringRef Constraint, MVT VT) const override;
562 
563  const char *LowerXConstraint(EVT ConstraintVT) const override;
564 
565  void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
566  std::vector<SDValue> &Ops,
567  SelectionDAG &DAG) const override;
568 
569  unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
570  if (ConstraintCode == "Q")
572  // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
573  // followed by llvm_unreachable so we'll leave them unimplemented in
574  // the backend for now.
575  return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
576  }
577 
578  bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
579  bool mayBeEmittedAsTailCall(CallInst *CI) const override;
580  bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
581  ISD::MemIndexedMode &AM, bool &IsInc,
582  SelectionDAG &DAG) const;
583  bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
585  SelectionDAG &DAG) const override;
586  bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
587  SDValue &Offset, ISD::MemIndexedMode &AM,
588  SelectionDAG &DAG) const override;
589 
590  void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
591  SelectionDAG &DAG) const override;
592 
593  bool functionArgumentNeedsConsecutiveRegisters(Type *Ty,
594  CallingConv::ID CallConv,
595  bool isVarArg) const override;
596 
597  bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
598 };
599 
600 namespace AArch64 {
601 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
602  const TargetLibraryInfo *libInfo);
603 } // end namespace AArch64
604 
605 } // end namespace llvm
606 
607 #endif
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:762
unsigned getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:102
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
LLVMContext & Context
bool supportSplitCSR(MachineFunction *MF) const override
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC) const
Selects the correct CCAssignFn for a given CallingConvention value.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
an instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
Definition: Instructions.h:504
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace=0, unsigned Align=1, bool *Fast=nullptr) const override
Returns true if the target allows unaligned memory accesses of the specified type.
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(EVT VT) const override
Return the preferred vector type legalization action.
This class represents a function call, abstracting a target machine's calling convention.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const override
Return the cost of the scaling factor used in the addressing mode represented by AM for this target...
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Function Alias Analysis Results
This instruction constructs a fixed permutation of two input vectors.
const Function * getFunction() const
getFunction - Return the LLVM function that this machine code represents
An instruction for reading from memory.
Definition: Instructions.h:164
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:669
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
bool lowerInterleavedLoad(LoadInst *LI, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor) const override
Lower an interleaved load into a ldN intrinsic.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:165
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
lazy value info
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:588
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ISD::SETCC ValueType.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override
Returns true if a cast between SrcAS and DestAS is a noop.
AtomicOrdering
Atomic ordering for LLVM's memory model.
Value * getSafeStackPointerLocation(IRBuilder<> &IRB) const override
If the target has a standard location for the unsafe stack pointer, returns the address of that locat...
SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition: FastISel.h:31
void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override
MachineBasicBlock * MBB
An instruction for storing to memory.
Definition: Instructions.h:300
Natural vector cast.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:842
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
bool isFPImmLegal(const APFloat &Imm, EVT VT) const override
Returns true if the target can instruction select the specified FP immediate natively.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass...
bool isIntDivCheap(EVT VT, AttributeSet Attr) const override
Return true if integer divide is usually cheaper than a sequence of several shifts, adds, and multiplies for this target.
void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero, APInt &KnownOne, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
void initializeSplitCSR(MachineBasicBlock *Entry) const override
Perform necessary initialization to handle a subset of CSRs explicitly via copies.
unsigned getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
bool isLegalICmpImmediate(int64_t) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
bool hasAndNotCompare(SDValue) const override
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) != Y —> (~X & Y) !=...
bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns true if the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass into a ...
bool isZExtFree(Type *Ty1, Type *Ty2) const override
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
MVT - Machine Value Type.
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:48
This is an important base class in LLVM.
Definition: Constant.h:42
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
MachineBasicBlock * EmitF128CSEL(MachineInstr &MI, MachineBasicBlock *BB) const
bool isDesirableToCommuteWithShift(const SDNode *N) const override
Returns false if N is a bit extraction pattern of (X >> C) & Mask.
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself...
uint32_t Offset
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all...
EVT - Extended Value Type.
Definition: ValueTypes.h:31
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
CCState - This class holds information needed while lowering arguments and return values...
EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, MachineFunction &MF) const override
Returns the target specific optimal type for load and store operations as a result of memset...
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:166
MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override
EVT is not used in-tree, but is used by out-of-tree target.
virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const
AArch64TargetLowering(const TargetMachine &TM, const AArch64Subtarget &STI)
Provides information about what library functions are available for the current target.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
static const int FIRST_TARGET_MEMORY_OPCODE
FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations which do not reference a specific me...
Definition: ISDOpcodes.h:769
Represents one node in the SelectionDAG.
bool isProfitableToHoist(Instruction *I) const override
Check if it is profitable to hoist instruction in then/else to if.
bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const override
Lower an interleaved store into a stN intrinsic.
bool hasBitPreservingFPLogic(EVT VT) const override
Return true if it is safe to transform an integer-domain bitwise operation into the equivalent floati...
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const
Selects the correct CCAssignFn for a given CallingConvention value.
void insertCopiesSplitCSR(MachineBasicBlock *Entry, const SmallVectorImpl< MachineBasicBlock * > &Exits) const override
Insert explicit copies in entry and exit blocks.
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns true if the given (atomic) store should be expanded by the IR-level AtomicExpand pass into an...
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
Class for arbitrary precision integers.
Definition: APInt.h:77
Value * getIRStackGuard(IRBuilder<> &IRB) const override
If the target has a standard location for the stack protector cookie, returns the address of that loc...
AddrMode
ARM Addressing Modes.
Definition: ARMBaseInfo.h:235
Representation of each machine instruction.
Definition: MachineInstr.h:52
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:226
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override
This method returns a target specific FastISel object, or null if the target does not support "fast" ...
#define I(x, y, z)
Definition: MD5.cpp:54
#define N
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
Value * emitLoadLinked(IRBuilder<> &Builder, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type...
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const override
getTgtMemIntrinsic - Represent NEON load and store intrinsics as MemIntrinsicNodes.
bool useLoadStackGuardNode() const override
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition: ISDOpcodes.h:175
bool isLegalAddImmediate(int64_t) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
LLVM Value Representation.
Definition: Value.h:71
Value * emitStoreConditional(IRBuilder<> &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
Primary interface to the complete machine description for the target machine.
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:47
bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override
Return true if the target supplies and combines to a paired load two loaded values of type LoadedType...
bool isCheapToSpeculateCttz() const override
Return true if it is cheap to speculate a call to intrinsic cttz.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
Provide custom lowering hooks for some operations.
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:406
bool isCheapToSpeculateCtlz() const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
bool isShuffleMaskLegal(const SmallVectorImpl< int > &M, EVT VT) const override
Return true if the given shuffle mask can be codegen'd directly, or if it should be stack expanded...
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:42
This file describes how to lower LLVM code to machine code.
bool isFMAFasterThanFMulAndFAdd(EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:799