LLVM  9.0.0svn
AArch64ISelLowering.h
Go to the documentation of this file.
1 //==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that AArch64 uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
15 #define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
16 
17 #include "AArch64.h"
21 #include "llvm/IR/CallingConv.h"
22 #include "llvm/IR/Instruction.h"
23 
24 namespace llvm {
25 
26 namespace AArch64ISD {
27 
28 enum NodeType : unsigned {
30  WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
31  CALL, // Function call.
32 
33  // Produces the full sequence of instructions for getting the thread pointer
34  // offset of a variable into X0, using the TLSDesc model.
36  ADRP, // Page address of a TargetGlobalAddress operand.
37  ADR, // ADR
38  ADDlow, // Add the low 12 bits of a TargetGlobalAddress operand.
39  LOADgot, // Load from automatically generated descriptor (e.g. Global
40  // Offset Table, TLS record).
41  RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
42  BRCOND, // Conditional branch instruction; "b.cond".
44  FCSEL, // Conditional move instruction.
45  CSINV, // Conditional select invert.
46  CSNEG, // Conditional select negate.
47  CSINC, // Conditional select increment.
48 
49  // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
50  // ELF.
52  ADC,
53  SBC, // adc, sbc instructions
54 
55  // Arithmetic instructions which write flags.
61 
62  // Conditional compares. Operands: left,right,falsecc,cc,flags
66 
67  // Floating point comparison
69 
70  // Scalar extract
72 
73  // Scalar-to-vector duplication
74  DUP,
79 
80  // Vector immedate moves
88 
89  // Vector immediate ops
92 
93  // Vector bit select: similar to ISD::VSELECT but not all bits within an
94  // element must be identical.
95  BSL,
96 
97  // Vector arithmetic negation
98  NEG,
99 
100  // Vector shuffles
111 
112  // Vector shift by scalar
116 
117  // Vector shift by scalar (again)
123 
124  // Vector comparisons
133 
134  // Vector zero comparisons
145 
146  // Vector across-lanes addition
147  // Only the lower result lane is defined.
150 
151  // Vector across-lanes min/max
152  // Only the lower result lane is defined.
157 
158  // Vector bitwise negation
160 
161  // Vector bitwise selection
163 
164  // Compare-and-branch
169 
170  // Tail calls
172 
173  // Custom prefetch handling
175 
176  // {s|u}int to FP within a FP register.
179 
180  /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
181  /// world w.r.t vectors; which causes additional REV instructions to be
182  /// generated to compensate for the byte-swapping. But sometimes we do
183  /// need to re-interpret the data in SIMD vector registers in big-endian
184  /// mode without emitting such REV instructions.
186 
189 
190  // Reciprocal estimates and steps.
193 
194  // NEON Load/Store with post-increment base updates
218 };
219 
220 } // end namespace AArch64ISD
221 
222 namespace {
223 
224 // Any instruction that defines a 32-bit result zeros out the high half of the
225 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
226 // be copying from a truncate. But any other 32-bit operation will zero-extend
227 // up to 64 bits.
228 // FIXME: X86 also checks for CMOV here. Do we need something similar?
229 static inline bool isDef32(const SDNode &N) {
230  unsigned Opc = N.getOpcode();
231  return Opc != ISD::TRUNCATE && Opc != TargetOpcode::EXTRACT_SUBREG &&
232  Opc != ISD::CopyFromReg;
233 }
234 
235 } // end anonymous namespace
236 
237 class AArch64Subtarget;
239 
241 public:
242  explicit AArch64TargetLowering(const TargetMachine &TM,
243  const AArch64Subtarget &STI);
244 
245  /// Selects the correct CCAssignFn for a given CallingConvention value.
246  CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
247 
248  /// Selects the correct CCAssignFn for a given CallingConvention value.
249  CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC) const;
250 
251  /// Determine which of the bits specified in Mask are known to be either zero
252  /// or one and return them in the KnownZero/KnownOne bitsets.
253  void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known,
254  const APInt &DemandedElts,
255  const SelectionDAG &DAG,
256  unsigned Depth = 0) const override;
257 
258  bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
259  TargetLoweringOpt &TLO) const override;
260 
261  MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
262 
263  /// Returns true if the target allows unaligned memory accesses of the
264  /// specified type.
265  bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0,
266  unsigned Align = 1,
267  bool *Fast = nullptr) const override;
268 
269  /// Provide custom lowering hooks for some operations.
270  SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
271 
272  const char *getTargetNodeName(unsigned Opcode) const override;
273 
274  SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
275 
276  /// Returns true if a cast between SrcAS and DestAS is a noop.
277  bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
278  // Addrspacecasts are always noops.
279  return true;
280  }
281 
282  /// This method returns a target specific FastISel object, or null if the
283  /// target does not support "fast" ISel.
285  const TargetLibraryInfo *libInfo) const override;
286 
287  bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
288 
289  bool isFPImmLegal(const APFloat &Imm, EVT VT,
290  bool ForCodeSize) const override;
291 
292  /// Return true if the given shuffle mask can be codegen'd directly, or if it
293  /// should be stack expanded.
294  bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
295 
296  /// Return the ISD::SETCC ValueType.
297  EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
298  EVT VT) const override;
299 
300  SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
301 
302  MachineBasicBlock *EmitF128CSEL(MachineInstr &MI,
303  MachineBasicBlock *BB) const;
304 
305  MachineBasicBlock *EmitLoweredCatchRet(MachineInstr &MI,
306  MachineBasicBlock *BB) const;
307 
308  MachineBasicBlock *EmitLoweredCatchPad(MachineInstr &MI,
309  MachineBasicBlock *BB) const;
310 
312  EmitInstrWithCustomInserter(MachineInstr &MI,
313  MachineBasicBlock *MBB) const override;
314 
315  bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
316  MachineFunction &MF,
317  unsigned Intrinsic) const override;
318 
319  bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
320  EVT NewVT) const override;
321 
322  bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
323  bool isTruncateFree(EVT VT1, EVT VT2) const override;
324 
325  bool isProfitableToHoist(Instruction *I) const override;
326 
327  bool isZExtFree(Type *Ty1, Type *Ty2) const override;
328  bool isZExtFree(EVT VT1, EVT VT2) const override;
329  bool isZExtFree(SDValue Val, EVT VT2) const override;
330 
331  bool shouldSinkOperands(Instruction *I,
332  SmallVectorImpl<Use *> &Ops) const override;
333 
334  bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
335 
336  unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
337 
338  bool lowerInterleavedLoad(LoadInst *LI,
340  ArrayRef<unsigned> Indices,
341  unsigned Factor) const override;
342  bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
343  unsigned Factor) const override;
344 
345  bool isLegalAddImmediate(int64_t) const override;
346  bool isLegalICmpImmediate(int64_t) const override;
347 
348  bool shouldConsiderGEPOffsetSplit() const override;
349 
350  EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
351  bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
352  MachineFunction &MF) const override;
353 
354  /// Return true if the addressing mode represented by AM is legal for this
355  /// target, for a load/store of the specified type.
356  bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
357  unsigned AS,
358  Instruction *I = nullptr) const override;
359 
360  /// Return the cost of the scaling factor used in the addressing
361  /// mode represented by AM for this target, for a load/store
362  /// of the specified type.
363  /// If the AM is supported, the return value must be >= 0.
364  /// If the AM is not supported, it returns a negative value.
365  int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
366  unsigned AS) const override;
367 
368  /// Return true if an FMA operation is faster than a pair of fmul and fadd
369  /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
370  /// returns true, otherwise fmuladd is expanded to fmul + fadd.
371  bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
372 
373  const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
374 
375  /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
376  bool isDesirableToCommuteWithShift(const SDNode *N,
377  CombineLevel Level) const override;
378 
379  /// Returns true if it is beneficial to convert a load of a constant
380  /// to just the constant itself.
381  bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
382  Type *Ty) const override;
383 
384  /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
385  /// with this index.
386  bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
387  unsigned Index) const override;
388 
389  Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
390  AtomicOrdering Ord) const override;
391  Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
392  Value *Addr, AtomicOrdering Ord) const override;
393 
394  void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override;
395 
397  shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
398  bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
400  shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
401 
403  shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
404 
405  bool useLoadStackGuardNode() const override;
407  getPreferredVectorAction(MVT VT) const override;
408 
409  /// If the target has a standard location for the stack protector cookie,
410  /// returns the address of that location. Otherwise, returns nullptr.
411  Value *getIRStackGuard(IRBuilder<> &IRB) const override;
412 
413  void insertSSPDeclarations(Module &M) const override;
414  Value *getSDagStackGuard(const Module &M) const override;
415  Function *getSSPStackGuardCheck(const Module &M) const override;
416 
417  /// If the target has a standard location for the unsafe stack pointer,
418  /// returns the address of that location. Otherwise, returns nullptr.
419  Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const override;
420 
421  /// If a physical register, this returns the register that receives the
422  /// exception address on entry to an EH pad.
423  unsigned
424  getExceptionPointerRegister(const Constant *PersonalityFn) const override {
425  // FIXME: This is a guess. Has this been defined yet?
426  return AArch64::X0;
427  }
428 
429  /// If a physical register, this returns the register that receives the
430  /// exception typeid on entry to a landing pad.
431  unsigned
432  getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
433  // FIXME: This is a guess. Has this been defined yet?
434  return AArch64::X1;
435  }
436 
437  bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
438 
439  bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
440  const SelectionDAG &DAG) const override {
441  // Do not merge to float value size (128 bytes) if no implicit
442  // float attribute is set.
443 
444  bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
445  Attribute::NoImplicitFloat);
446 
447  if (NoFloat)
448  return (MemVT.getSizeInBits() <= 64);
449  return true;
450  }
451 
452  bool isCheapToSpeculateCttz() const override {
453  return true;
454  }
455 
456  bool isCheapToSpeculateCtlz() const override {
457  return true;
458  }
459 
460  bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
461 
462  bool hasAndNotCompare(SDValue V) const override {
463  // We can use bics for any scalar.
464  return V.getValueType().isScalarInteger();
465  }
466 
467  bool hasAndNot(SDValue Y) const override {
468  EVT VT = Y.getValueType();
469 
470  if (!VT.isVector())
471  return hasAndNotCompare(Y);
472 
473  return VT.getSizeInBits() >= 64; // vector 'bic'
474  }
475 
476  bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override {
478  return false;
479  return true;
480  }
481 
483  unsigned KeptBits) const override {
484  // For vectors, we don't have a preference..
485  if (XVT.isVector())
486  return false;
487 
488  auto VTIsOk = [](EVT VT) -> bool {
489  return VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
490  VT == MVT::i64;
491  };
492 
493  // We are ok with KeptBitsVT being byte/word/dword, what SXT supports.
494  // XVT will be larger than KeptBitsVT.
495  MVT KeptBitsVT = MVT::getIntegerVT(KeptBits);
496  return VTIsOk(XVT) && VTIsOk(KeptBitsVT);
497  }
498 
499  bool hasBitPreservingFPLogic(EVT VT) const override {
500  // FIXME: Is this always true? It should be true for vectors at least.
501  return VT == MVT::f32 || VT == MVT::f64;
502  }
503 
504  bool supportSplitCSR(MachineFunction *MF) const override {
506  MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
507  }
508  void initializeSplitCSR(MachineBasicBlock *Entry) const override;
509  void insertCopiesSplitCSR(
510  MachineBasicBlock *Entry,
511  const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
512 
513  bool supportSwiftError() const override {
514  return true;
515  }
516 
517  /// Enable aggressive FMA fusion on targets that want it.
518  bool enableAggressiveFMAFusion(EVT VT) const override;
519 
520  /// Returns the size of the platform's va_list object.
521  unsigned getVaListSizeInBits(const DataLayout &DL) const override;
522 
523  /// Returns true if \p VecTy is a legal interleaved access type. This
524  /// function checks the vector element type and the overall width of the
525  /// vector.
526  bool isLegalInterleavedAccessType(VectorType *VecTy,
527  const DataLayout &DL) const;
528 
529  /// Returns the number of interleaved accesses that will be generated when
530  /// lowering accesses of the given type.
531  unsigned getNumInterleavedAccesses(VectorType *VecTy,
532  const DataLayout &DL) const;
533 
534  MachineMemOperand::Flags getMMOFlags(const Instruction &I) const override;
535 
536  bool functionArgumentNeedsConsecutiveRegisters(Type *Ty,
537  CallingConv::ID CallConv,
538  bool isVarArg) const override;
539  /// Used for exception handling on Win64.
540  bool needsFixedCatchObjects() const override;
541 private:
542  /// Keep a pointer to the AArch64Subtarget around so that we can
543  /// make the right decision when generating code for different targets.
544  const AArch64Subtarget *Subtarget;
545 
546  bool isExtFreeImpl(const Instruction *Ext) const override;
547 
548  void addTypeForNEON(MVT VT, MVT PromotedBitwiseVT);
549  void addDRTypeForNEON(MVT VT);
550  void addQRTypeForNEON(MVT VT);
551 
552  SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
553  bool isVarArg,
555  const SDLoc &DL, SelectionDAG &DAG,
556  SmallVectorImpl<SDValue> &InVals) const override;
557 
558  SDValue LowerCall(CallLoweringInfo & /*CLI*/,
559  SmallVectorImpl<SDValue> &InVals) const override;
560 
561  SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
562  CallingConv::ID CallConv, bool isVarArg,
564  const SDLoc &DL, SelectionDAG &DAG,
565  SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
566  SDValue ThisVal) const;
567 
568  SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
569 
570  SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
571 
572  bool isEligibleForTailCallOptimization(
573  SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
575  const SmallVectorImpl<SDValue> &OutVals,
576  const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
577 
578  /// Finds the incoming stack arguments which overlap the given fixed stack
579  /// object and incorporates their load into the current chain. This prevents
580  /// an upcoming store from clobbering the stack argument before it's used.
581  SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
582  MachineFrameInfo &MFI, int ClobberedFI) const;
583 
584  bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
585 
586  void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &DL,
587  SDValue &Chain) const;
588 
589  bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
590  bool isVarArg,
592  LLVMContext &Context) const override;
593 
594  SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
596  const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
597  SelectionDAG &DAG) const override;
598 
599  SDValue getTargetNode(GlobalAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
600  unsigned Flag) const;
601  SDValue getTargetNode(JumpTableSDNode *N, EVT Ty, SelectionDAG &DAG,
602  unsigned Flag) const;
603  SDValue getTargetNode(ConstantPoolSDNode *N, EVT Ty, SelectionDAG &DAG,
604  unsigned Flag) const;
605  SDValue getTargetNode(BlockAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
606  unsigned Flag) const;
607  template <class NodeTy>
608  SDValue getGOT(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
609  template <class NodeTy>
610  SDValue getAddrLarge(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
611  template <class NodeTy>
612  SDValue getAddr(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
613  template <class NodeTy>
614  SDValue getAddrTiny(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
615  SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
616  SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
617  SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
618  SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
619  SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
620  SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL,
621  SelectionDAG &DAG) const;
622  SDValue LowerWindowsGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
623  SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
624  SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
625  SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
626  SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
628  SDValue TVal, SDValue FVal, const SDLoc &dl,
629  SelectionDAG &DAG) const;
630  SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
631  SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
632  SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
633  SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
634  SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
635  SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
636  SDValue LowerWin64_VASTART(SDValue Op, SelectionDAG &DAG) const;
637  SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
638  SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
639  SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
640  SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
641  SDValue LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const;
643  SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
647  SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
650  SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
651  SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
652  SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
653  SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
654  SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
655  SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
656  RTLIB::Libcall Call) const;
657  SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
658  SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
659  SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
661  SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
662  SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
663  SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
665  SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
666  SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
667  SDValue LowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const;
668  SDValue LowerATOMIC_LOAD_AND(SDValue Op, SelectionDAG &DAG) const;
670  SDValue LowerWindowsDYNAMIC_STACKALLOC(SDValue Op, SDValue Chain,
671  SDValue &Size,
672  SelectionDAG &DAG) const;
673 
674  SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
675  SmallVectorImpl<SDNode *> &Created) const override;
676  SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
677  int &ExtraSteps, bool &UseOneConst,
678  bool Reciprocal) const override;
679  SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
680  int &ExtraSteps) const override;
681  unsigned combineRepeatedFPDivisors() const override;
682 
683  ConstraintType getConstraintType(StringRef Constraint) const override;
684  unsigned getRegisterByName(const char* RegName, EVT VT,
685  SelectionDAG &DAG) const override;
686 
687  /// Examine constraint string and operand type and determine a weight value.
688  /// The operand object must already have been set up with the operand type.
690  getSingleConstraintMatchWeight(AsmOperandInfo &info,
691  const char *constraint) const override;
692 
693  std::pair<unsigned, const TargetRegisterClass *>
694  getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
695  StringRef Constraint, MVT VT) const override;
696 
697  const char *LowerXConstraint(EVT ConstraintVT) const override;
698 
699  void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
700  std::vector<SDValue> &Ops,
701  SelectionDAG &DAG) const override;
702 
703  unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
704  if (ConstraintCode == "Q")
706  // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
707  // followed by llvm_unreachable so we'll leave them unimplemented in
708  // the backend for now.
709  return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
710  }
711 
712  bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
713  bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
714  bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
715  ISD::MemIndexedMode &AM, bool &IsInc,
716  SelectionDAG &DAG) const;
717  bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
719  SelectionDAG &DAG) const override;
720  bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
721  SDValue &Offset, ISD::MemIndexedMode &AM,
722  SelectionDAG &DAG) const override;
723 
724  void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
725  SelectionDAG &DAG) const override;
726 
727  bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
728 
729  void finalizeLowering(MachineFunction &MF) const override;
730 };
731 
732 namespace AArch64 {
734  const TargetLibraryInfo *libInfo);
735 } // end namespace AArch64
736 
737 } // end namespace llvm
738 
739 #endif
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG)
static MVT getIntegerVT(unsigned BitWidth)
static SDValue LowerCallResult(SDValue Chain, SDValue InFlag, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:886
unsigned getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:110
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
EVT getValueType() const
Return the ValueType of the referenced return value.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
LLVMContext & Context
bool supportSplitCSR(MachineFunction *MF) const override
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
This class represents lattice values for constants.
Definition: AllocatorList.h:23
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:65
an instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
Definition: Instructions.h:528
static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition: ValueTypes.h:145
This class represents a function call, abstracting a target machine&#39;s calling convention.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
Function Alias Analysis Results
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:42
This instruction constructs a fixed permutation of two input vectors.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:320
unsigned const TargetRegisterInfo * TRI
An instruction for reading from memory.
Definition: Instructions.h:167
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:691
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
bool hasAndNotCompare(SDValue V) const override
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) != Y —> (~X & Y) ...
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:742
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override
Returns true if a cast between SrcAS and DestAS is a noop.
AtomicOrdering
Atomic ordering for LLVM&#39;s memory model.
static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG)
This is a fast-path instruction selection class that generates poor code and doesn&#39;t support illegal ...
Definition: FastISel.h:66
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:291
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:397
static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG)
static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG)
This contains information for each constraint that we are lowering.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
An instruction for storing to memory.
Definition: Instructions.h:320
Natural vector cast.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:968
virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const
static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG)
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
unsigned getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:117
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG)
Machine Value Type.
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:64
This is an important base class in LLVM.
Definition: Constant.h:41
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Definition: ISDOpcodes.h:943
CombineLevel
Definition: DAGCombine.h:15
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
lazy value info
Extended Value Type.
Definition: ValueTypes.h:33
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
This structure contains all information that is necessary for lowering calls.
static Value * LowerCTPOP(LLVMContext &Context, Value *V, Instruction *IP)
Emit the code to lower ctpop of V before the specified instruction IP.
CCState - This class holds information needed while lowering arguments and return values...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:212
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:221
Provides information about what library functions are available for the current target.
AddressSpace
Definition: NVPTXBaseInfo.h:21
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
static const int FIRST_TARGET_MEMORY_OPCODE
FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations which do not reference a specific me...
Definition: ISDOpcodes.h:893
Represents one node in the SelectionDAG.
static bool Enabled
Definition: Statistic.cpp:50
const Function & getFunction() const
Return the LLVM function that this machine code represents.
bool hasBitPreservingFPLogic(EVT VT) const override
Return true if it is safe to transform an integer-domain bitwise operation into the equivalent floati...
Class to represent vector types.
Definition: DerivedTypes.h:424
Class for arbitrary precision integers.
Definition: APInt.h:69
bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, const SelectionDAG &DAG) const override
Returns if it&#39;s reasonable to merge stores to MemVT size.
amdgpu Simplify well known AMD library false FunctionCallee Callee
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
Flags
Flags values. These may be or&#39;d together.
static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
Representation of each machine instruction.
Definition: MachineInstr.h:63
static unsigned getScalingFactorCost(const TargetTransformInfo &TTI, const LSRUse &LU, const Formula &F, const Loop &L)
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:150
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
bool optForMinSize() const
Optimize this function for minimum size (-Oz).
Definition: Function.h:594
uint32_t Size
Definition: Profile.cpp:46
bool hasAndNot(SDValue Y) const override
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition: ISDOpcodes.h:174
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override
Return true if SHIFT instructions should be expanded to SHIFT_PARTS instructions, and false if a libr...
LLVM Value Representation.
Definition: Value.h:72
static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG)
bool shouldTransformSignedTruncationCheck(EVT XVT, unsigned KeptBits) const override
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be trun...
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:65
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
bool isCheapToSpeculateCttz() const override
Return true if it is cheap to speculate a call to intrinsic cttz.
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:473
bool isCheapToSpeculateCtlz() const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
This file describes how to lower LLVM code to machine code.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:923