LLVM  10.0.0svn
AArch64ISelLowering.h
Go to the documentation of this file.
1 //==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that AArch64 uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
15 #define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
16 
17 #include "AArch64.h"
21 #include "llvm/IR/CallingConv.h"
22 #include "llvm/IR/Instruction.h"
23 
24 namespace llvm {
25 
26 namespace AArch64ISD {
27 
28 enum NodeType : unsigned {
30  WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
31  CALL, // Function call.
32 
33  // Produces the full sequence of instructions for getting the thread pointer
34  // offset of a variable into X0, using the TLSDesc model.
36  ADRP, // Page address of a TargetGlobalAddress operand.
37  ADR, // ADR
38  ADDlow, // Add the low 12 bits of a TargetGlobalAddress operand.
39  LOADgot, // Load from automatically generated descriptor (e.g. Global
40  // Offset Table, TLS record).
41  RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
42  BRCOND, // Conditional branch instruction; "b.cond".
44  FCSEL, // Conditional move instruction.
45  CSINV, // Conditional select invert.
46  CSNEG, // Conditional select negate.
47  CSINC, // Conditional select increment.
48 
49  // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
50  // ELF.
52  ADC,
53  SBC, // adc, sbc instructions
54 
55  // Arithmetic instructions which write flags.
61 
62  // Conditional compares. Operands: left,right,falsecc,cc,flags
66 
67  // Floating point comparison
69 
70  // Scalar extract
72 
73  // Scalar-to-vector duplication
74  DUP,
79 
80  // Vector immedate moves
88 
89  // Vector immediate ops
92 
93  // Vector bit select: similar to ISD::VSELECT but not all bits within an
94  // element must be identical.
95  BSL,
96 
97  // Vector arithmetic negation
98  NEG,
99 
100  // Vector shuffles
111 
112  // Vector shift by scalar
116 
117  // Vector shift by scalar (again)
123 
124  // Vector comparisons
133 
134  // Vector zero comparisons
145 
146  // Vector across-lanes addition
147  // Only the lower result lane is defined.
150 
151  // Vector across-lanes min/max
152  // Only the lower result lane is defined.
157 
158  // Vector bitwise negation
160 
161  // Vector bitwise selection
163 
164  // Compare-and-branch
169 
170  // Tail calls
172 
173  // Custom prefetch handling
175 
176  // {s|u}int to FP within a FP register.
179 
180  /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
181  /// world w.r.t vectors; which causes additional REV instructions to be
182  /// generated to compensate for the byte-swapping. But sometimes we do
183  /// need to re-interpret the data in SIMD vector registers in big-endian
184  /// mode without emitting such REV instructions.
186 
189 
190  // Reciprocal estimates and steps.
193 
194  // NEON Load/Store with post-increment base updates
218 
223 
224 };
225 
226 } // end namespace AArch64ISD
227 
228 namespace {
229 
230 // Any instruction that defines a 32-bit result zeros out the high half of the
231 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
232 // be copying from a truncate. But any other 32-bit operation will zero-extend
233 // up to 64 bits.
234 // FIXME: X86 also checks for CMOV here. Do we need something similar?
235 static inline bool isDef32(const SDNode &N) {
236  unsigned Opc = N.getOpcode();
237  return Opc != ISD::TRUNCATE && Opc != TargetOpcode::EXTRACT_SUBREG &&
238  Opc != ISD::CopyFromReg;
239 }
240 
241 } // end anonymous namespace
242 
243 class AArch64Subtarget;
245 
247 public:
248  explicit AArch64TargetLowering(const TargetMachine &TM,
249  const AArch64Subtarget &STI);
250 
251  /// Selects the correct CCAssignFn for a given CallingConvention value.
252  CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
253 
254  /// Selects the correct CCAssignFn for a given CallingConvention value.
255  CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC) const;
256 
257  /// Determine which of the bits specified in Mask are known to be either zero
258  /// or one and return them in the KnownZero/KnownOne bitsets.
259  void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known,
260  const APInt &DemandedElts,
261  const SelectionDAG &DAG,
262  unsigned Depth = 0) const override;
263 
264  bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
265  TargetLoweringOpt &TLO) const override;
266 
267  MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
268 
269  /// Returns true if the target allows unaligned memory accesses of the
270  /// specified type.
271  bool allowsMisalignedMemoryAccesses(
272  EVT VT, unsigned AddrSpace = 0, unsigned Align = 1,
274  bool *Fast = nullptr) const override;
275 
276  /// Provide custom lowering hooks for some operations.
277  SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
278 
279  const char *getTargetNodeName(unsigned Opcode) const override;
280 
281  SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
282 
283  /// Returns true if a cast between SrcAS and DestAS is a noop.
284  bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
285  // Addrspacecasts are always noops.
286  return true;
287  }
288 
289  /// This method returns a target specific FastISel object, or null if the
290  /// target does not support "fast" ISel.
292  const TargetLibraryInfo *libInfo) const override;
293 
294  bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
295 
296  bool isFPImmLegal(const APFloat &Imm, EVT VT,
297  bool ForCodeSize) const override;
298 
299  /// Return true if the given shuffle mask can be codegen'd directly, or if it
300  /// should be stack expanded.
301  bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
302 
303  /// Return the ISD::SETCC ValueType.
304  EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
305  EVT VT) const override;
306 
307  SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
308 
309  MachineBasicBlock *EmitF128CSEL(MachineInstr &MI,
310  MachineBasicBlock *BB) const;
311 
312  MachineBasicBlock *EmitLoweredCatchRet(MachineInstr &MI,
313  MachineBasicBlock *BB) const;
314 
315  MachineBasicBlock *EmitLoweredCatchPad(MachineInstr &MI,
316  MachineBasicBlock *BB) const;
317 
319  EmitInstrWithCustomInserter(MachineInstr &MI,
320  MachineBasicBlock *MBB) const override;
321 
322  bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
323  MachineFunction &MF,
324  unsigned Intrinsic) const override;
325 
326  bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
327  EVT NewVT) const override;
328 
329  bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
330  bool isTruncateFree(EVT VT1, EVT VT2) const override;
331 
332  bool isProfitableToHoist(Instruction *I) const override;
333 
334  bool isZExtFree(Type *Ty1, Type *Ty2) const override;
335  bool isZExtFree(EVT VT1, EVT VT2) const override;
336  bool isZExtFree(SDValue Val, EVT VT2) const override;
337 
338  bool shouldSinkOperands(Instruction *I,
339  SmallVectorImpl<Use *> &Ops) const override;
340 
341  bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
342 
343  unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
344 
345  bool lowerInterleavedLoad(LoadInst *LI,
347  ArrayRef<unsigned> Indices,
348  unsigned Factor) const override;
349  bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
350  unsigned Factor) const override;
351 
352  bool isLegalAddImmediate(int64_t) const override;
353  bool isLegalICmpImmediate(int64_t) const override;
354 
355  bool shouldConsiderGEPOffsetSplit() const override;
356 
357  EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
358  bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
359  const AttributeList &FuncAttributes) const override;
360 
361  /// Return true if the addressing mode represented by AM is legal for this
362  /// target, for a load/store of the specified type.
363  bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
364  unsigned AS,
365  Instruction *I = nullptr) const override;
366 
367  /// Return the cost of the scaling factor used in the addressing
368  /// mode represented by AM for this target, for a load/store
369  /// of the specified type.
370  /// If the AM is supported, the return value must be >= 0.
371  /// If the AM is not supported, it returns a negative value.
372  int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
373  unsigned AS) const override;
374 
375  /// Return true if an FMA operation is faster than a pair of fmul and fadd
376  /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
377  /// returns true, otherwise fmuladd is expanded to fmul + fadd.
378  bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
379 
380  const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
381 
382  /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
383  bool isDesirableToCommuteWithShift(const SDNode *N,
384  CombineLevel Level) const override;
385 
386  /// Returns true if it is beneficial to convert a load of a constant
387  /// to just the constant itself.
388  bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
389  Type *Ty) const override;
390 
391  /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
392  /// with this index.
393  bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
394  unsigned Index) const override;
395 
396  Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
397  AtomicOrdering Ord) const override;
398  Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
399  Value *Addr, AtomicOrdering Ord) const override;
400 
401  void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override;
402 
404  shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
405  bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
407  shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
408 
410  shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
411 
412  bool useLoadStackGuardNode() const override;
414  getPreferredVectorAction(MVT VT) const override;
415 
416  /// If the target has a standard location for the stack protector cookie,
417  /// returns the address of that location. Otherwise, returns nullptr.
418  Value *getIRStackGuard(IRBuilder<> &IRB) const override;
419 
420  void insertSSPDeclarations(Module &M) const override;
421  Value *getSDagStackGuard(const Module &M) const override;
422  Function *getSSPStackGuardCheck(const Module &M) const override;
423 
424  /// If the target has a standard location for the unsafe stack pointer,
425  /// returns the address of that location. Otherwise, returns nullptr.
426  Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const override;
427 
428  /// If a physical register, this returns the register that receives the
429  /// exception address on entry to an EH pad.
430  unsigned
431  getExceptionPointerRegister(const Constant *PersonalityFn) const override {
432  // FIXME: This is a guess. Has this been defined yet?
433  return AArch64::X0;
434  }
435 
436  /// If a physical register, this returns the register that receives the
437  /// exception typeid on entry to a landing pad.
438  unsigned
439  getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
440  // FIXME: This is a guess. Has this been defined yet?
441  return AArch64::X1;
442  }
443 
444  bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
445 
446  bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
447  const SelectionDAG &DAG) const override {
448  // Do not merge to float value size (128 bytes) if no implicit
449  // float attribute is set.
450 
451  bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
452  Attribute::NoImplicitFloat);
453 
454  if (NoFloat)
455  return (MemVT.getSizeInBits() <= 64);
456  return true;
457  }
458 
459  bool isCheapToSpeculateCttz() const override {
460  return true;
461  }
462 
463  bool isCheapToSpeculateCtlz() const override {
464  return true;
465  }
466 
467  bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
468 
469  bool hasAndNotCompare(SDValue V) const override {
470  // We can use bics for any scalar.
471  return V.getValueType().isScalarInteger();
472  }
473 
474  bool hasAndNot(SDValue Y) const override {
475  EVT VT = Y.getValueType();
476 
477  if (!VT.isVector())
478  return hasAndNotCompare(Y);
479 
480  return VT.getSizeInBits() >= 64; // vector 'bic'
481  }
482 
483  bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override {
485  return false;
486  return true;
487  }
488 
490  unsigned KeptBits) const override {
491  // For vectors, we don't have a preference..
492  if (XVT.isVector())
493  return false;
494 
495  auto VTIsOk = [](EVT VT) -> bool {
496  return VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
497  VT == MVT::i64;
498  };
499 
500  // We are ok with KeptBitsVT being byte/word/dword, what SXT supports.
501  // XVT will be larger than KeptBitsVT.
502  MVT KeptBitsVT = MVT::getIntegerVT(KeptBits);
503  return VTIsOk(XVT) && VTIsOk(KeptBitsVT);
504  }
505 
506  bool preferIncOfAddToSubOfNot(EVT VT) const override;
507 
508  bool hasBitPreservingFPLogic(EVT VT) const override {
509  // FIXME: Is this always true? It should be true for vectors at least.
510  return VT == MVT::f32 || VT == MVT::f64;
511  }
512 
513  bool supportSplitCSR(MachineFunction *MF) const override {
515  MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
516  }
517  void initializeSplitCSR(MachineBasicBlock *Entry) const override;
518  void insertCopiesSplitCSR(
519  MachineBasicBlock *Entry,
520  const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
521 
522  bool supportSwiftError() const override {
523  return true;
524  }
525 
526  /// Enable aggressive FMA fusion on targets that want it.
527  bool enableAggressiveFMAFusion(EVT VT) const override;
528 
529  /// Returns the size of the platform's va_list object.
530  unsigned getVaListSizeInBits(const DataLayout &DL) const override;
531 
532  /// Returns true if \p VecTy is a legal interleaved access type. This
533  /// function checks the vector element type and the overall width of the
534  /// vector.
535  bool isLegalInterleavedAccessType(VectorType *VecTy,
536  const DataLayout &DL) const;
537 
538  /// Returns the number of interleaved accesses that will be generated when
539  /// lowering accesses of the given type.
540  unsigned getNumInterleavedAccesses(VectorType *VecTy,
541  const DataLayout &DL) const;
542 
543  MachineMemOperand::Flags getMMOFlags(const Instruction &I) const override;
544 
545  bool functionArgumentNeedsConsecutiveRegisters(Type *Ty,
546  CallingConv::ID CallConv,
547  bool isVarArg) const override;
548  /// Used for exception handling on Win64.
549  bool needsFixedCatchObjects() const override;
550 private:
551  /// Keep a pointer to the AArch64Subtarget around so that we can
552  /// make the right decision when generating code for different targets.
553  const AArch64Subtarget *Subtarget;
554 
555  bool isExtFreeImpl(const Instruction *Ext) const override;
556 
557  void addTypeForNEON(MVT VT, MVT PromotedBitwiseVT);
558  void addDRTypeForNEON(MVT VT);
559  void addQRTypeForNEON(MVT VT);
560 
561  SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
562  bool isVarArg,
564  const SDLoc &DL, SelectionDAG &DAG,
565  SmallVectorImpl<SDValue> &InVals) const override;
566 
567  SDValue LowerCall(CallLoweringInfo & /*CLI*/,
568  SmallVectorImpl<SDValue> &InVals) const override;
569 
570  SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
571  CallingConv::ID CallConv, bool isVarArg,
573  const SDLoc &DL, SelectionDAG &DAG,
574  SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
575  SDValue ThisVal) const;
576 
577  SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
578 
579  SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
580 
581  bool isEligibleForTailCallOptimization(
582  SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
584  const SmallVectorImpl<SDValue> &OutVals,
585  const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
586 
587  /// Finds the incoming stack arguments which overlap the given fixed stack
588  /// object and incorporates their load into the current chain. This prevents
589  /// an upcoming store from clobbering the stack argument before it's used.
590  SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
591  MachineFrameInfo &MFI, int ClobberedFI) const;
592 
593  bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
594 
595  void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &DL,
596  SDValue &Chain) const;
597 
598  bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
599  bool isVarArg,
601  LLVMContext &Context) const override;
602 
603  SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
605  const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
606  SelectionDAG &DAG) const override;
607 
609  unsigned Flag) const;
611  unsigned Flag) const;
613  unsigned Flag) const;
615  unsigned Flag) const;
616  template <class NodeTy>
617  SDValue getGOT(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
618  template <class NodeTy>
619  SDValue getAddrLarge(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
620  template <class NodeTy>
621  SDValue getAddr(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
622  template <class NodeTy>
623  SDValue getAddrTiny(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
624  SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
625  SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
626  SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
627  SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
628  SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
629  SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL,
630  SelectionDAG &DAG) const;
631  SDValue LowerWindowsGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
632  SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
633  SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
634  SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
635  SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
637  SDValue TVal, SDValue FVal, const SDLoc &dl,
638  SelectionDAG &DAG) const;
639  SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
640  SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
641  SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
642  SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
643  SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
644  SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
645  SDValue LowerWin64_VASTART(SDValue Op, SelectionDAG &DAG) const;
646  SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
647  SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
648  SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
649  SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
650  SDValue LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const;
652  SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
653  SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
656  SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
659  SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
660  SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
661  SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
662  SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
663  SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
664  SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
665  RTLIB::Libcall Call) const;
666  SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
667  SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
668  SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
670  SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
671  SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
672  SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
674  SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
675  SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
676  SDValue LowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const;
677  SDValue LowerATOMIC_LOAD_AND(SDValue Op, SelectionDAG &DAG) const;
679  SDValue LowerWindowsDYNAMIC_STACKALLOC(SDValue Op, SDValue Chain,
680  SDValue &Size,
681  SelectionDAG &DAG) const;
682 
683  SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
684  SmallVectorImpl<SDNode *> &Created) const override;
685  SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
686  int &ExtraSteps, bool &UseOneConst,
687  bool Reciprocal) const override;
688  SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
689  int &ExtraSteps) const override;
690  unsigned combineRepeatedFPDivisors() const override;
691 
692  ConstraintType getConstraintType(StringRef Constraint) const override;
693  unsigned getRegisterByName(const char* RegName, EVT VT,
694  SelectionDAG &DAG) const override;
695 
696  /// Examine constraint string and operand type and determine a weight value.
697  /// The operand object must already have been set up with the operand type.
699  getSingleConstraintMatchWeight(AsmOperandInfo &info,
700  const char *constraint) const override;
701 
702  std::pair<unsigned, const TargetRegisterClass *>
703  getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
704  StringRef Constraint, MVT VT) const override;
705 
706  const char *LowerXConstraint(EVT ConstraintVT) const override;
707 
708  void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
709  std::vector<SDValue> &Ops,
710  SelectionDAG &DAG) const override;
711 
712  unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
713  if (ConstraintCode == "Q")
715  // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
716  // followed by llvm_unreachable so we'll leave them unimplemented in
717  // the backend for now.
718  return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
719  }
720 
721  bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
722  bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
723  bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
724  ISD::MemIndexedMode &AM, bool &IsInc,
725  SelectionDAG &DAG) const;
726  bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
728  SelectionDAG &DAG) const override;
729  bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
730  SDValue &Offset, ISD::MemIndexedMode &AM,
731  SelectionDAG &DAG) const override;
732 
733  void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
734  SelectionDAG &DAG) const override;
735 
736  bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
737 
738  void finalizeLowering(MachineFunction &MF) const override;
739 };
740 
741 namespace AArch64 {
743  const TargetLibraryInfo *libInfo);
744 } // end namespace AArch64
745 
746 } // end namespace llvm
747 
748 #endif
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG)
static MVT getIntegerVT(unsigned BitWidth)
static SDValue LowerCallResult(SDValue Chain, SDValue InFlag, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:913
unsigned getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:110
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
EVT getValueType() const
Return the ValueType of the referenced return value.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
LLVMContext & Context
bool supportSplitCSR(MachineFunction *MF) const override
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
This class represents lattice values for constants.
Definition: AllocatorList.h:23
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:65
An instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
Definition: Instructions.h:530
static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition: ValueTypes.h:145
This class represents a function call, abstracting a target machine&#39;s calling convention.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
Function Alias Analysis Results
This instruction constructs a fixed permutation of two input vectors.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:323
unsigned const TargetRegisterInfo * TRI
An instruction for reading from memory.
Definition: Instructions.h:167
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:693
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
bool hasAndNotCompare(SDValue V) const override
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) != Y —> (~X & Y) ...
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:779
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override
Returns true if a cast between SrcAS and DestAS is a noop.
AtomicOrdering
Atomic ordering for LLVM&#39;s memory model.
This is a fast-path instruction selection class that generates poor code and doesn&#39;t support illegal ...
Definition: FastISel.h:66
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:291
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:404
static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG)
This contains information for each constraint that we are lowering.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
An instruction for storing to memory.
Definition: Instructions.h:320
Natural vector cast.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:995
virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const
static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG)
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
unsigned getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:117
static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG)
Machine Value Type.
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:64
This is an important base class in LLVM.
Definition: Constant.h:41
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Definition: ISDOpcodes.h:970
CombineLevel
Definition: DAGCombine.h:15
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
lazy value info
Extended Value Type.
Definition: ValueTypes.h:33
static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
This structure contains all information that is necessary for lowering calls.
static Value * LowerCTPOP(LLVMContext &Context, Value *V, Instruction *IP)
Emit the code to lower ctpop of V before the specified instruction IP.
CCState - This class holds information needed while lowering arguments and return values...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:212
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:221
Provides information about what library functions are available for the current target.
AddressSpace
Definition: NVPTXBaseInfo.h:21
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
static const int FIRST_TARGET_MEMORY_OPCODE
FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations which do not reference a specific me...
Definition: ISDOpcodes.h:920
Represents one node in the SelectionDAG.
static bool Enabled
Definition: Statistic.cpp:50
const Function & getFunction() const
Return the LLVM function that this machine code represents.
bool hasBitPreservingFPLogic(EVT VT) const override
Return true if it is safe to transform an integer-domain bitwise operation into the equivalent floati...
Class to represent vector types.
Definition: DerivedTypes.h:427
Class for arbitrary precision integers.
Definition: APInt.h:69
bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, const SelectionDAG &DAG) const override
Returns if it&#39;s reasonable to merge stores to MemVT size.
amdgpu Simplify well known AMD library false FunctionCallee Callee
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
Flags
Flags values. These may be or&#39;d together.
static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
Representation of each machine instruction.
Definition: MachineInstr.h:64
static unsigned getScalingFactorCost(const TargetTransformInfo &TTI, const LSRUse &LU, const Formula &F, const Loop &L)
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:150
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
Definition: Function.h:619
uint32_t Size
Definition: Profile.cpp:46
bool hasAndNot(SDValue Y) const override
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition: ISDOpcodes.h:174
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override
Return true if SHIFT instructions should be expanded to SHIFT_PARTS instructions, and false if a libr...
LLVM Value Representation.
Definition: Value.h:72
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:42
static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG)
bool shouldTransformSignedTruncationCheck(EVT XVT, unsigned KeptBits) const override
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be trun...
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:65
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
bool isCheapToSpeculateCttz() const override
Return true if it is cheap to speculate a call to intrinsic cttz.
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:498
bool isCheapToSpeculateCtlz() const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
This file describes how to lower LLVM code to machine code.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:950