LLVM  8.0.0svn
AArch64ISelLowering.h
Go to the documentation of this file.
1 //==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the interfaces that AArch64 uses to lower LLVM code into a
11 // selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
16 #define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
17 
18 #include "AArch64.h"
22 #include "llvm/IR/CallingConv.h"
23 #include "llvm/IR/Instruction.h"
24 
25 namespace llvm {
26 
27 namespace AArch64ISD {
28 
29 enum NodeType : unsigned {
31  WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
32  CALL, // Function call.
33 
34  // Produces the full sequence of instructions for getting the thread pointer
35  // offset of a variable into X0, using the TLSDesc model.
37  ADRP, // Page address of a TargetGlobalAddress operand.
38  ADR, // ADR
39  ADDlow, // Add the low 12 bits of a TargetGlobalAddress operand.
40  LOADgot, // Load from automatically generated descriptor (e.g. Global
41  // Offset Table, TLS record).
42  RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
43  BRCOND, // Conditional branch instruction; "b.cond".
45  FCSEL, // Conditional move instruction.
46  CSINV, // Conditional select invert.
47  CSNEG, // Conditional select negate.
48  CSINC, // Conditional select increment.
49 
50  // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
51  // ELF.
53  ADC,
54  SBC, // adc, sbc instructions
55 
56  // Arithmetic instructions which write flags.
62 
63  // Conditional compares. Operands: left,right,falsecc,cc,flags
67 
68  // Floating point comparison
70 
71  // Scalar extract
73 
74  // Scalar-to-vector duplication
75  DUP,
80 
81  // Vector immedate moves
89 
90  // Vector immediate ops
93 
94  // Vector bit select: similar to ISD::VSELECT but not all bits within an
95  // element must be identical.
96  BSL,
97 
98  // Vector arithmetic negation
99  NEG,
100 
101  // Vector shuffles
112 
113  // Vector shift by scalar
117 
118  // Vector shift by scalar (again)
124 
125  // Vector comparisons
134 
135  // Vector zero comparisons
146 
147  // Vector across-lanes addition
148  // Only the lower result lane is defined.
151 
152  // Vector across-lanes min/max
153  // Only the lower result lane is defined.
158 
159  // Vector bitwise negation
161 
162  // Vector bitwise selection
164 
165  // Compare-and-branch
170 
171  // Tail calls
173 
174  // Custom prefetch handling
176 
177  // {s|u}int to FP within a FP register.
180 
181  /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
182  /// world w.r.t vectors; which causes additional REV instructions to be
183  /// generated to compensate for the byte-swapping. But sometimes we do
184  /// need to re-interpret the data in SIMD vector registers in big-endian
185  /// mode without emitting such REV instructions.
187 
190 
191  // Reciprocal estimates and steps.
194 
195  // NEON Load/Store with post-increment base updates
219 };
220 
221 } // end namespace AArch64ISD
222 
223 namespace {
224 
225 // Any instruction that defines a 32-bit result zeros out the high half of the
226 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
227 // be copying from a truncate. But any other 32-bit operation will zero-extend
228 // up to 64 bits.
229 // FIXME: X86 also checks for CMOV here. Do we need something similar?
230 static inline bool isDef32(const SDNode &N) {
231  unsigned Opc = N.getOpcode();
232  return Opc != ISD::TRUNCATE && Opc != TargetOpcode::EXTRACT_SUBREG &&
233  Opc != ISD::CopyFromReg;
234 }
235 
236 } // end anonymous namespace
237 
238 class AArch64Subtarget;
240 
242 public:
243  explicit AArch64TargetLowering(const TargetMachine &TM,
244  const AArch64Subtarget &STI);
245 
246  /// Selects the correct CCAssignFn for a given CallingConvention value.
247  CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
248 
249  /// Selects the correct CCAssignFn for a given CallingConvention value.
250  CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC) const;
251 
252  /// Determine which of the bits specified in Mask are known to be either zero
253  /// or one and return them in the KnownZero/KnownOne bitsets.
254  void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known,
255  const APInt &DemandedElts,
256  const SelectionDAG &DAG,
257  unsigned Depth = 0) const override;
258 
259  bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
260  TargetLoweringOpt &TLO) const override;
261 
262  MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
263 
264  /// Returns true if the target allows unaligned memory accesses of the
265  /// specified type.
266  bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0,
267  unsigned Align = 1,
268  bool *Fast = nullptr) const override;
269 
270  /// Provide custom lowering hooks for some operations.
271  SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
272 
273  const char *getTargetNodeName(unsigned Opcode) const override;
274 
275  SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
276 
277  /// Returns true if a cast between SrcAS and DestAS is a noop.
278  bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
279  // Addrspacecasts are always noops.
280  return true;
281  }
282 
283  /// This method returns a target specific FastISel object, or null if the
284  /// target does not support "fast" ISel.
286  const TargetLibraryInfo *libInfo) const override;
287 
288  bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
289 
290  bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
291 
292  /// Return true if the given shuffle mask can be codegen'd directly, or if it
293  /// should be stack expanded.
294  bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
295 
296  /// Return the ISD::SETCC ValueType.
297  EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
298  EVT VT) const override;
299 
300  SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
301 
302  MachineBasicBlock *EmitF128CSEL(MachineInstr &MI,
303  MachineBasicBlock *BB) const;
304 
306  EmitInstrWithCustomInserter(MachineInstr &MI,
307  MachineBasicBlock *MBB) const override;
308 
309  bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
310  MachineFunction &MF,
311  unsigned Intrinsic) const override;
312 
313  bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
314  EVT NewVT) const override;
315 
316  bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
317  bool isTruncateFree(EVT VT1, EVT VT2) const override;
318 
319  bool isProfitableToHoist(Instruction *I) const override;
320 
321  bool isZExtFree(Type *Ty1, Type *Ty2) const override;
322  bool isZExtFree(EVT VT1, EVT VT2) const override;
323  bool isZExtFree(SDValue Val, EVT VT2) const override;
324 
325  bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
326 
327  unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
328 
329  bool lowerInterleavedLoad(LoadInst *LI,
331  ArrayRef<unsigned> Indices,
332  unsigned Factor) const override;
333  bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
334  unsigned Factor) const override;
335 
336  bool isLegalAddImmediate(int64_t) const override;
337  bool isLegalICmpImmediate(int64_t) const override;
338 
339  bool shouldConsiderGEPOffsetSplit() const override;
340 
341  EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
342  bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
343  MachineFunction &MF) const override;
344 
345  /// Return true if the addressing mode represented by AM is legal for this
346  /// target, for a load/store of the specified type.
347  bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
348  unsigned AS,
349  Instruction *I = nullptr) const override;
350 
351  /// Return the cost of the scaling factor used in the addressing
352  /// mode represented by AM for this target, for a load/store
353  /// of the specified type.
354  /// If the AM is supported, the return value must be >= 0.
355  /// If the AM is not supported, it returns a negative value.
356  int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
357  unsigned AS) const override;
358 
359  /// Return true if an FMA operation is faster than a pair of fmul and fadd
360  /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
361  /// returns true, otherwise fmuladd is expanded to fmul + fadd.
362  bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
363 
364  const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
365 
366  /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
367  bool isDesirableToCommuteWithShift(const SDNode *N,
368  CombineLevel Level) const override;
369 
370  /// Returns true if it is beneficial to convert a load of a constant
371  /// to just the constant itself.
372  bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
373  Type *Ty) const override;
374 
375  /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
376  /// with this index.
377  bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
378  unsigned Index) const override;
379 
380  Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
381  AtomicOrdering Ord) const override;
382  Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
383  Value *Addr, AtomicOrdering Ord) const override;
384 
385  void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override;
386 
388  shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
389  bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
391  shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
392 
394  shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
395 
396  bool useLoadStackGuardNode() const override;
398  getPreferredVectorAction(EVT VT) const override;
399 
400  /// If the target has a standard location for the stack protector cookie,
401  /// returns the address of that location. Otherwise, returns nullptr.
402  Value *getIRStackGuard(IRBuilder<> &IRB) const override;
403 
404  /// If the target has a standard location for the unsafe stack pointer,
405  /// returns the address of that location. Otherwise, returns nullptr.
406  Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const override;
407 
408  /// If a physical register, this returns the register that receives the
409  /// exception address on entry to an EH pad.
410  unsigned
411  getExceptionPointerRegister(const Constant *PersonalityFn) const override {
412  // FIXME: This is a guess. Has this been defined yet?
413  return AArch64::X0;
414  }
415 
416  /// If a physical register, this returns the register that receives the
417  /// exception typeid on entry to a landing pad.
418  unsigned
419  getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
420  // FIXME: This is a guess. Has this been defined yet?
421  return AArch64::X1;
422  }
423 
424  bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
425 
426  bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
427  const SelectionDAG &DAG) const override {
428  // Do not merge to float value size (128 bytes) if no implicit
429  // float attribute is set.
430 
431  bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
432  Attribute::NoImplicitFloat);
433 
434  if (NoFloat)
435  return (MemVT.getSizeInBits() <= 64);
436  return true;
437  }
438 
439  bool isCheapToSpeculateCttz() const override {
440  return true;
441  }
442 
443  bool isCheapToSpeculateCtlz() const override {
444  return true;
445  }
446 
447  bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
448 
449  bool hasAndNotCompare(SDValue V) const override {
450  // We can use bics for any scalar.
451  return V.getValueType().isScalarInteger();
452  }
453 
454  bool hasAndNot(SDValue Y) const override {
455  EVT VT = Y.getValueType();
456 
457  if (!VT.isVector())
458  return hasAndNotCompare(Y);
459 
460  return VT.getSizeInBits() >= 64; // vector 'bic'
461  }
462 
464  unsigned KeptBits) const override {
465  // For vectors, we don't have a preference..
466  if (XVT.isVector())
467  return false;
468 
469  auto VTIsOk = [](EVT VT) -> bool {
470  return VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
471  VT == MVT::i64;
472  };
473 
474  // We are ok with KeptBitsVT being byte/word/dword, what SXT supports.
475  // XVT will be larger than KeptBitsVT.
476  MVT KeptBitsVT = MVT::getIntegerVT(KeptBits);
477  return VTIsOk(XVT) && VTIsOk(KeptBitsVT);
478  }
479 
480  bool hasBitPreservingFPLogic(EVT VT) const override {
481  // FIXME: Is this always true? It should be true for vectors at least.
482  return VT == MVT::f32 || VT == MVT::f64;
483  }
484 
485  bool supportSplitCSR(MachineFunction *MF) const override {
487  MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
488  }
489  void initializeSplitCSR(MachineBasicBlock *Entry) const override;
490  void insertCopiesSplitCSR(
491  MachineBasicBlock *Entry,
492  const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
493 
494  bool supportSwiftError() const override {
495  return true;
496  }
497 
498  /// Enable aggressive FMA fusion on targets that want it.
499  bool enableAggressiveFMAFusion(EVT VT) const override;
500 
501  /// Returns the size of the platform's va_list object.
502  unsigned getVaListSizeInBits(const DataLayout &DL) const override;
503 
504  /// Returns true if \p VecTy is a legal interleaved access type. This
505  /// function checks the vector element type and the overall width of the
506  /// vector.
507  bool isLegalInterleavedAccessType(VectorType *VecTy,
508  const DataLayout &DL) const;
509 
510  /// Returns the number of interleaved accesses that will be generated when
511  /// lowering accesses of the given type.
512  unsigned getNumInterleavedAccesses(VectorType *VecTy,
513  const DataLayout &DL) const;
514 
515  MachineMemOperand::Flags getMMOFlags(const Instruction &I) const override;
516 
517  bool functionArgumentNeedsConsecutiveRegisters(Type *Ty,
518  CallingConv::ID CallConv,
519  bool isVarArg) const override;
520 private:
521  /// Keep a pointer to the AArch64Subtarget around so that we can
522  /// make the right decision when generating code for different targets.
523  const AArch64Subtarget *Subtarget;
524 
525  bool isExtFreeImpl(const Instruction *Ext) const override;
526 
527  void addTypeForNEON(MVT VT, MVT PromotedBitwiseVT);
528  void addDRTypeForNEON(MVT VT);
529  void addQRTypeForNEON(MVT VT);
530 
531  SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
532  bool isVarArg,
534  const SDLoc &DL, SelectionDAG &DAG,
535  SmallVectorImpl<SDValue> &InVals) const override;
536 
537  SDValue LowerCall(CallLoweringInfo & /*CLI*/,
538  SmallVectorImpl<SDValue> &InVals) const override;
539 
540  SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
541  CallingConv::ID CallConv, bool isVarArg,
543  const SDLoc &DL, SelectionDAG &DAG,
544  SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
545  SDValue ThisVal) const;
546 
547  SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
548 
549  SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
550 
551  bool isEligibleForTailCallOptimization(
552  SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
554  const SmallVectorImpl<SDValue> &OutVals,
555  const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
556 
557  /// Finds the incoming stack arguments which overlap the given fixed stack
558  /// object and incorporates their load into the current chain. This prevents
559  /// an upcoming store from clobbering the stack argument before it's used.
560  SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
561  MachineFrameInfo &MFI, int ClobberedFI) const;
562 
563  bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
564 
565  void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &DL,
566  SDValue &Chain) const;
567 
568  bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
569  bool isVarArg,
571  LLVMContext &Context) const override;
572 
573  SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
575  const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
576  SelectionDAG &DAG) const override;
577 
578  SDValue getTargetNode(GlobalAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
579  unsigned Flag) const;
580  SDValue getTargetNode(JumpTableSDNode *N, EVT Ty, SelectionDAG &DAG,
581  unsigned Flag) const;
582  SDValue getTargetNode(ConstantPoolSDNode *N, EVT Ty, SelectionDAG &DAG,
583  unsigned Flag) const;
584  SDValue getTargetNode(BlockAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
585  unsigned Flag) const;
586  template <class NodeTy>
587  SDValue getGOT(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
588  template <class NodeTy>
589  SDValue getAddrLarge(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
590  template <class NodeTy>
591  SDValue getAddr(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
592  template <class NodeTy>
593  SDValue getAddrTiny(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
594  SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
595  SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
596  SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
597  SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
598  SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
599  SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL,
600  SelectionDAG &DAG) const;
601  SDValue LowerWindowsGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
602  SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
603  SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
604  SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
605  SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
607  SDValue TVal, SDValue FVal, const SDLoc &dl,
608  SelectionDAG &DAG) const;
609  SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
610  SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
611  SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
612  SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
613  SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
614  SDValue LowerWin64_VASTART(SDValue Op, SelectionDAG &DAG) const;
615  SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
616  SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
617  SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
618  SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
620  SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
624  SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
627  SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
628  SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
629  SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
630  SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
631  SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
632  SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
633  RTLIB::Libcall Call) const;
634  SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
635  SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
636  SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
637  SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
638  SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
639  SDValue LowerVectorAND(SDValue Op, SelectionDAG &DAG) const;
640  SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
642  SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
643  SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
644  SDValue LowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const;
645  SDValue LowerATOMIC_LOAD_AND(SDValue Op, SelectionDAG &DAG) const;
647  SDValue LowerWindowsDYNAMIC_STACKALLOC(SDValue Op, SDValue Chain,
648  SDValue &Size,
649  SelectionDAG &DAG) const;
650 
651  SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
652  SmallVectorImpl<SDNode *> &Created) const override;
653  SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
654  int &ExtraSteps, bool &UseOneConst,
655  bool Reciprocal) const override;
656  SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
657  int &ExtraSteps) const override;
658  unsigned combineRepeatedFPDivisors() const override;
659 
660  ConstraintType getConstraintType(StringRef Constraint) const override;
661  unsigned getRegisterByName(const char* RegName, EVT VT,
662  SelectionDAG &DAG) const override;
663 
664  /// Examine constraint string and operand type and determine a weight value.
665  /// The operand object must already have been set up with the operand type.
667  getSingleConstraintMatchWeight(AsmOperandInfo &info,
668  const char *constraint) const override;
669 
670  std::pair<unsigned, const TargetRegisterClass *>
671  getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
672  StringRef Constraint, MVT VT) const override;
673 
674  const char *LowerXConstraint(EVT ConstraintVT) const override;
675 
676  void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
677  std::vector<SDValue> &Ops,
678  SelectionDAG &DAG) const override;
679 
680  unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
681  if (ConstraintCode == "Q")
683  // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
684  // followed by llvm_unreachable so we'll leave them unimplemented in
685  // the backend for now.
686  return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
687  }
688 
689  bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
690  bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
691  bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
692  ISD::MemIndexedMode &AM, bool &IsInc,
693  SelectionDAG &DAG) const;
694  bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
696  SelectionDAG &DAG) const override;
697  bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
698  SDValue &Offset, ISD::MemIndexedMode &AM,
699  SelectionDAG &DAG) const override;
700 
701  void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
702  SelectionDAG &DAG) const override;
703 
704  bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
705 
706  void finalizeLowering(MachineFunction &MF) const override;
707 };
708 
709 namespace AArch64 {
711  const TargetLibraryInfo *libInfo);
712 } // end namespace AArch64
713 
714 } // end namespace llvm
715 
716 #endif
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG)
static MVT getIntegerVT(unsigned BitWidth)
static SDValue LowerCallResult(SDValue Chain, SDValue InFlag, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:846
unsigned getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
EVT getValueType() const
Return the ValueType of the referenced return value.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
LLVMContext & Context
bool supportSplitCSR(MachineFunction *MF) const override
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
an instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
Definition: Instructions.h:518
static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition: ValueTypes.h:146
This class represents a function call, abstracting a target machine&#39;s calling convention.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
Function Alias Analysis Results
This instruction constructs a fixed permutation of two input vectors.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:321
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:43
unsigned const TargetRegisterInfo * TRI
An instruction for reading from memory.
Definition: Instructions.h:168
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:681
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
bool hasAndNotCompare(SDValue V) const override
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) != Y —> (~X & Y) ...
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:42
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:743
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override
Returns true if a cast between SrcAS and DestAS is a noop.
AtomicOrdering
Atomic ordering for LLVM&#39;s memory model.
static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG)
This is a fast-path instruction selection class that generates poor code and doesn&#39;t support illegal ...
Definition: FastISel.h:67
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:292
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:395
static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG)
This contains information for each constraint that we are lowering.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
An instruction for storing to memory.
Definition: Instructions.h:310
Natural vector cast.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:928
virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const
static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG)
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
amdgpu Simplify well known AMD library false Value * Callee
unsigned getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:118
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG)
Machine Value Type.
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:69
This is an important base class in LLVM.
Definition: Constant.h:42
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Definition: ISDOpcodes.h:903
CombineLevel
Definition: DAGCombine.h:16
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
lazy value info
Extended Value Type.
Definition: ValueTypes.h:34
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
This structure contains all information that is necessary for lowering calls.
static Value * LowerCTPOP(LLVMContext &Context, Value *V, Instruction *IP)
Emit the code to lower ctpop of V before the specified instruction IP.
CCState - This class holds information needed while lowering arguments and return values...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:213
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:222
Provides information about what library functions are available for the current target.
AddressSpace
Definition: NVPTXBaseInfo.h:22
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
static const int FIRST_TARGET_MEMORY_OPCODE
FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations which do not reference a specific me...
Definition: ISDOpcodes.h:853
Represents one node in the SelectionDAG.
static bool Enabled
Definition: Statistic.cpp:51
const Function & getFunction() const
Return the LLVM function that this machine code represents.
bool hasBitPreservingFPLogic(EVT VT) const override
Return true if it is safe to transform an integer-domain bitwise operation into the equivalent floati...
Class to represent vector types.
Definition: DerivedTypes.h:393
Class for arbitrary precision integers.
Definition: APInt.h:70
bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, const SelectionDAG &DAG) const override
Returns if it&#39;s reasonable to merge stores to MemVT size.
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
Flags
Flags values. These may be or&#39;d together.
static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
Representation of each machine instruction.
Definition: MachineInstr.h:64
static unsigned getScalingFactorCost(const TargetTransformInfo &TTI, const LSRUse &LU, const Formula &F, const Loop &L)
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:151
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
uint32_t Size
Definition: Profile.cpp:47
bool hasAndNot(SDValue Y) const override
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition: ISDOpcodes.h:175
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
LLVM Value Representation.
Definition: Value.h:73
static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG)
bool shouldTransformSignedTruncationCheck(EVT XVT, unsigned KeptBits) const override
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be trun...
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:59
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
bool isCheapToSpeculateCttz() const override
Return true if it is cheap to speculate a call to intrinsic cttz.
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:454
bool isCheapToSpeculateCtlz() const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
This file describes how to lower LLVM code to machine code.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:883