LLVM  10.0.0svn
AArch64ISelLowering.h
Go to the documentation of this file.
1 //==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that AArch64 uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
15 #define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
16 
17 #include "AArch64.h"
21 #include "llvm/IR/CallingConv.h"
22 #include "llvm/IR/Instruction.h"
23 
24 namespace llvm {
25 
26 namespace AArch64ISD {
27 
28 enum NodeType : unsigned {
30  WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
31  CALL, // Function call.
32 
33  // Produces the full sequence of instructions for getting the thread pointer
34  // offset of a variable into X0, using the TLSDesc model.
36  ADRP, // Page address of a TargetGlobalAddress operand.
37  ADR, // ADR
38  ADDlow, // Add the low 12 bits of a TargetGlobalAddress operand.
39  LOADgot, // Load from automatically generated descriptor (e.g. Global
40  // Offset Table, TLS record).
41  RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
42  BRCOND, // Conditional branch instruction; "b.cond".
44  FCSEL, // Conditional move instruction.
45  CSINV, // Conditional select invert.
46  CSNEG, // Conditional select negate.
47  CSINC, // Conditional select increment.
48 
49  // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
50  // ELF.
52  ADC,
53  SBC, // adc, sbc instructions
54 
55  // Arithmetic instructions which write flags.
61 
62  // Conditional compares. Operands: left,right,falsecc,cc,flags
66 
67  // Floating point comparison
69 
70  // Scalar extract
72 
73  // Scalar-to-vector duplication
74  DUP,
79 
80  // Vector immedate moves
88 
89  // Vector immediate ops
92 
93  // Vector bit select: similar to ISD::VSELECT but not all bits within an
94  // element must be identical.
95  BSL,
96 
97  // Vector arithmetic negation
98  NEG,
99 
100  // Vector shuffles
111 
112  // Vector shift by scalar
116 
117  // Vector shift by scalar (again)
123 
124  // Vector comparisons
133 
134  // Vector zero comparisons
145 
146  // Vector across-lanes addition
147  // Only the lower result lane is defined.
150 
151  // Vector across-lanes min/max
152  // Only the lower result lane is defined.
157 
158  // Vector bitwise negation
160 
161  // Vector bitwise selection
163 
164  // Compare-and-branch
169 
170  // Tail calls
172 
173  // Custom prefetch handling
175 
176  // {s|u}int to FP within a FP register.
179 
180  /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
181  /// world w.r.t vectors; which causes additional REV instructions to be
182  /// generated to compensate for the byte-swapping. But sometimes we do
183  /// need to re-interpret the data in SIMD vector registers in big-endian
184  /// mode without emitting such REV instructions.
186 
189 
190  // Reciprocal estimates and steps.
193 
198 
199  // NEON Load/Store with post-increment base updates
223 
228 
229 };
230 
231 } // end namespace AArch64ISD
232 
233 namespace {
234 
235 // Any instruction that defines a 32-bit result zeros out the high half of the
236 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
237 // be copying from a truncate. But any other 32-bit operation will zero-extend
238 // up to 64 bits.
239 // FIXME: X86 also checks for CMOV here. Do we need something similar?
240 static inline bool isDef32(const SDNode &N) {
241  unsigned Opc = N.getOpcode();
242  return Opc != ISD::TRUNCATE && Opc != TargetOpcode::EXTRACT_SUBREG &&
243  Opc != ISD::CopyFromReg;
244 }
245 
246 } // end anonymous namespace
247 
248 class AArch64Subtarget;
250 
252 public:
253  explicit AArch64TargetLowering(const TargetMachine &TM,
254  const AArch64Subtarget &STI);
255 
256  /// Selects the correct CCAssignFn for a given CallingConvention value.
257  CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
258 
259  /// Selects the correct CCAssignFn for a given CallingConvention value.
260  CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC) const;
261 
262  /// Determine which of the bits specified in Mask are known to be either zero
263  /// or one and return them in the KnownZero/KnownOne bitsets.
264  void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known,
265  const APInt &DemandedElts,
266  const SelectionDAG &DAG,
267  unsigned Depth = 0) const override;
268 
269  MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const override {
270  // Returning i64 unconditionally here (i.e. even for ILP32) means that the
271  // *DAG* representation of pointers will always be 64-bits. They will be
272  // truncated and extended when transferred to memory, but the 64-bit DAG
273  // allows us to use AArch64's addressing modes much more easily.
274  return MVT::getIntegerVT(64);
275  }
276 
277  bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
278  TargetLoweringOpt &TLO) const override;
279 
280  MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
281 
282  /// Returns true if the target allows unaligned memory accesses of the
283  /// specified type.
284  bool allowsMisalignedMemoryAccesses(
285  EVT VT, unsigned AddrSpace = 0, unsigned Align = 1,
287  bool *Fast = nullptr) const override;
288  /// LLT variant.
289  bool allowsMisalignedMemoryAccesses(
290  LLT Ty, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags,
291  bool *Fast = nullptr) const override;
292 
293  /// Provide custom lowering hooks for some operations.
294  SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
295 
296  const char *getTargetNodeName(unsigned Opcode) const override;
297 
298  SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
299 
300  /// Returns true if a cast between SrcAS and DestAS is a noop.
301  bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
302  // Addrspacecasts are always noops.
303  return true;
304  }
305 
306  /// This method returns a target specific FastISel object, or null if the
307  /// target does not support "fast" ISel.
309  const TargetLibraryInfo *libInfo) const override;
310 
311  bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
312 
313  bool isFPImmLegal(const APFloat &Imm, EVT VT,
314  bool ForCodeSize) const override;
315 
316  /// Return true if the given shuffle mask can be codegen'd directly, or if it
317  /// should be stack expanded.
318  bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
319 
320  /// Return the ISD::SETCC ValueType.
321  EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
322  EVT VT) const override;
323 
324  SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
325 
326  MachineBasicBlock *EmitF128CSEL(MachineInstr &MI,
327  MachineBasicBlock *BB) const;
328 
329  MachineBasicBlock *EmitLoweredCatchRet(MachineInstr &MI,
330  MachineBasicBlock *BB) const;
331 
332  MachineBasicBlock *EmitLoweredCatchPad(MachineInstr &MI,
333  MachineBasicBlock *BB) const;
334 
336  EmitInstrWithCustomInserter(MachineInstr &MI,
337  MachineBasicBlock *MBB) const override;
338 
339  bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
340  MachineFunction &MF,
341  unsigned Intrinsic) const override;
342 
343  bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
344  EVT NewVT) const override;
345 
346  bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
347  bool isTruncateFree(EVT VT1, EVT VT2) const override;
348 
349  bool isProfitableToHoist(Instruction *I) const override;
350 
351  bool isZExtFree(Type *Ty1, Type *Ty2) const override;
352  bool isZExtFree(EVT VT1, EVT VT2) const override;
353  bool isZExtFree(SDValue Val, EVT VT2) const override;
354 
355  bool shouldSinkOperands(Instruction *I,
356  SmallVectorImpl<Use *> &Ops) const override;
357 
358  bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
359 
360  unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
361 
362  bool lowerInterleavedLoad(LoadInst *LI,
364  ArrayRef<unsigned> Indices,
365  unsigned Factor) const override;
366  bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
367  unsigned Factor) const override;
368 
369  bool isLegalAddImmediate(int64_t) const override;
370  bool isLegalICmpImmediate(int64_t) const override;
371 
372  bool shouldConsiderGEPOffsetSplit() const override;
373 
374  EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
375  bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
376  const AttributeList &FuncAttributes) const override;
377 
378  LLT getOptimalMemOpLLT(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
379  bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
380  const AttributeList &FuncAttributes) const override;
381 
382  /// Return true if the addressing mode represented by AM is legal for this
383  /// target, for a load/store of the specified type.
384  bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
385  unsigned AS,
386  Instruction *I = nullptr) const override;
387 
388  /// Return the cost of the scaling factor used in the addressing
389  /// mode represented by AM for this target, for a load/store
390  /// of the specified type.
391  /// If the AM is supported, the return value must be >= 0.
392  /// If the AM is not supported, it returns a negative value.
393  int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
394  unsigned AS) const override;
395 
396  /// Return true if an FMA operation is faster than a pair of fmul and fadd
397  /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
398  /// returns true, otherwise fmuladd is expanded to fmul + fadd.
399  bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
400 
401  const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
402 
403  /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
404  bool isDesirableToCommuteWithShift(const SDNode *N,
405  CombineLevel Level) const override;
406 
407  /// Returns true if it is beneficial to convert a load of a constant
408  /// to just the constant itself.
409  bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
410  Type *Ty) const override;
411 
412  /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
413  /// with this index.
414  bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
415  unsigned Index) const override;
416 
417  Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
418  AtomicOrdering Ord) const override;
419  Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
420  Value *Addr, AtomicOrdering Ord) const override;
421 
422  void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override;
423 
425  shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
426  bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
428  shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
429 
431  shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
432 
433  bool useLoadStackGuardNode() const override;
435  getPreferredVectorAction(MVT VT) const override;
436 
437  /// If the target has a standard location for the stack protector cookie,
438  /// returns the address of that location. Otherwise, returns nullptr.
439  Value *getIRStackGuard(IRBuilder<> &IRB) const override;
440 
441  void insertSSPDeclarations(Module &M) const override;
442  Value *getSDagStackGuard(const Module &M) const override;
443  Function *getSSPStackGuardCheck(const Module &M) const override;
444 
445  /// If the target has a standard location for the unsafe stack pointer,
446  /// returns the address of that location. Otherwise, returns nullptr.
447  Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const override;
448 
449  /// If a physical register, this returns the register that receives the
450  /// exception address on entry to an EH pad.
451  unsigned
452  getExceptionPointerRegister(const Constant *PersonalityFn) const override {
453  // FIXME: This is a guess. Has this been defined yet?
454  return AArch64::X0;
455  }
456 
457  /// If a physical register, this returns the register that receives the
458  /// exception typeid on entry to a landing pad.
459  unsigned
460  getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
461  // FIXME: This is a guess. Has this been defined yet?
462  return AArch64::X1;
463  }
464 
465  bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
466 
467  bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
468  const SelectionDAG &DAG) const override {
469  // Do not merge to float value size (128 bytes) if no implicit
470  // float attribute is set.
471 
472  bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
473  Attribute::NoImplicitFloat);
474 
475  if (NoFloat)
476  return (MemVT.getSizeInBits() <= 64);
477  return true;
478  }
479 
480  bool isCheapToSpeculateCttz() const override {
481  return true;
482  }
483 
484  bool isCheapToSpeculateCtlz() const override {
485  return true;
486  }
487 
488  bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
489 
490  bool hasAndNotCompare(SDValue V) const override {
491  // We can use bics for any scalar.
492  return V.getValueType().isScalarInteger();
493  }
494 
495  bool hasAndNot(SDValue Y) const override {
496  EVT VT = Y.getValueType();
497 
498  if (!VT.isVector())
499  return hasAndNotCompare(Y);
500 
501  return VT.getSizeInBits() >= 64; // vector 'bic'
502  }
503 
504  bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
506  unsigned OldShiftOpcode, unsigned NewShiftOpcode,
507  SelectionDAG &DAG) const override;
508 
509  bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override;
510 
512  unsigned KeptBits) const override {
513  // For vectors, we don't have a preference..
514  if (XVT.isVector())
515  return false;
516 
517  auto VTIsOk = [](EVT VT) -> bool {
518  return VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
519  VT == MVT::i64;
520  };
521 
522  // We are ok with KeptBitsVT being byte/word/dword, what SXT supports.
523  // XVT will be larger than KeptBitsVT.
524  MVT KeptBitsVT = MVT::getIntegerVT(KeptBits);
525  return VTIsOk(XVT) && VTIsOk(KeptBitsVT);
526  }
527 
528  bool preferIncOfAddToSubOfNot(EVT VT) const override;
529 
530  bool hasBitPreservingFPLogic(EVT VT) const override {
531  // FIXME: Is this always true? It should be true for vectors at least.
532  return VT == MVT::f32 || VT == MVT::f64;
533  }
534 
535  bool supportSplitCSR(MachineFunction *MF) const override {
537  MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
538  }
539  void initializeSplitCSR(MachineBasicBlock *Entry) const override;
540  void insertCopiesSplitCSR(
541  MachineBasicBlock *Entry,
542  const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
543 
544  bool supportSwiftError() const override {
545  return true;
546  }
547 
548  /// Enable aggressive FMA fusion on targets that want it.
549  bool enableAggressiveFMAFusion(EVT VT) const override;
550 
551  /// Returns the size of the platform's va_list object.
552  unsigned getVaListSizeInBits(const DataLayout &DL) const override;
553 
554  /// Returns true if \p VecTy is a legal interleaved access type. This
555  /// function checks the vector element type and the overall width of the
556  /// vector.
557  bool isLegalInterleavedAccessType(VectorType *VecTy,
558  const DataLayout &DL) const;
559 
560  /// Returns the number of interleaved accesses that will be generated when
561  /// lowering accesses of the given type.
562  unsigned getNumInterleavedAccesses(VectorType *VecTy,
563  const DataLayout &DL) const;
564 
565  MachineMemOperand::Flags getMMOFlags(const Instruction &I) const override;
566 
567  bool functionArgumentNeedsConsecutiveRegisters(Type *Ty,
568  CallingConv::ID CallConv,
569  bool isVarArg) const override;
570  /// Used for exception handling on Win64.
571  bool needsFixedCatchObjects() const override;
572 private:
573  /// Keep a pointer to the AArch64Subtarget around so that we can
574  /// make the right decision when generating code for different targets.
575  const AArch64Subtarget *Subtarget;
576 
577  bool isExtFreeImpl(const Instruction *Ext) const override;
578 
579  void addTypeForNEON(MVT VT, MVT PromotedBitwiseVT);
580  void addDRTypeForNEON(MVT VT);
581  void addQRTypeForNEON(MVT VT);
582 
583  SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
584  bool isVarArg,
586  const SDLoc &DL, SelectionDAG &DAG,
587  SmallVectorImpl<SDValue> &InVals) const override;
588 
589  SDValue LowerCall(CallLoweringInfo & /*CLI*/,
590  SmallVectorImpl<SDValue> &InVals) const override;
591 
592  SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
593  CallingConv::ID CallConv, bool isVarArg,
595  const SDLoc &DL, SelectionDAG &DAG,
596  SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
597  SDValue ThisVal) const;
598 
599  SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
600 
601  SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
602 
603  bool isEligibleForTailCallOptimization(
604  SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
606  const SmallVectorImpl<SDValue> &OutVals,
607  const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
608 
609  /// Finds the incoming stack arguments which overlap the given fixed stack
610  /// object and incorporates their load into the current chain. This prevents
611  /// an upcoming store from clobbering the stack argument before it's used.
612  SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
613  MachineFrameInfo &MFI, int ClobberedFI) const;
614 
615  bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
616 
617  void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &DL,
618  SDValue &Chain) const;
619 
620  bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
621  bool isVarArg,
623  LLVMContext &Context) const override;
624 
625  SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
627  const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
628  SelectionDAG &DAG) const override;
629 
631  unsigned Flag) const;
633  unsigned Flag) const;
635  unsigned Flag) const;
637  unsigned Flag) const;
638  template <class NodeTy>
639  SDValue getGOT(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
640  template <class NodeTy>
641  SDValue getAddrLarge(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
642  template <class NodeTy>
643  SDValue getAddr(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
644  template <class NodeTy>
645  SDValue getAddrTiny(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
646  SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
647  SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
648  SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
649  SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
650  SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
651  SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL,
652  SelectionDAG &DAG) const;
653  SDValue LowerWindowsGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
654  SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
655  SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
656  SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
657  SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
659  SDValue TVal, SDValue FVal, const SDLoc &dl,
660  SelectionDAG &DAG) const;
661  SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
662  SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
663  SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
664  SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
665  SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
666  SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
667  SDValue LowerWin64_VASTART(SDValue Op, SelectionDAG &DAG) const;
668  SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
669  SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
670  SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
671  SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
672  SDValue LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const;
674  SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
675  SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
678  SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
680  SDValue LowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG) const;
682  SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
683  SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
684  SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
685  SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
686  SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
687  SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
688  RTLIB::Libcall Call) const;
689  SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
690  SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
691  SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
693  SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
694  SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
695  SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
697  SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
698  SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
699  SDValue LowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const;
700  SDValue LowerATOMIC_LOAD_AND(SDValue Op, SelectionDAG &DAG) const;
702  SDValue LowerWindowsDYNAMIC_STACKALLOC(SDValue Op, SDValue Chain,
703  SDValue &Size,
704  SelectionDAG &DAG) const;
705 
706  SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
707  SmallVectorImpl<SDNode *> &Created) const override;
708  SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
709  int &ExtraSteps, bool &UseOneConst,
710  bool Reciprocal) const override;
711  SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
712  int &ExtraSteps) const override;
713  unsigned combineRepeatedFPDivisors() const override;
714 
715  ConstraintType getConstraintType(StringRef Constraint) const override;
716  Register getRegisterByName(const char* RegName, EVT VT,
717  const MachineFunction &MF) const override;
718 
719  /// Examine constraint string and operand type and determine a weight value.
720  /// The operand object must already have been set up with the operand type.
722  getSingleConstraintMatchWeight(AsmOperandInfo &info,
723  const char *constraint) const override;
724 
725  std::pair<unsigned, const TargetRegisterClass *>
726  getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
727  StringRef Constraint, MVT VT) const override;
728 
729  const char *LowerXConstraint(EVT ConstraintVT) const override;
730 
731  void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
732  std::vector<SDValue> &Ops,
733  SelectionDAG &DAG) const override;
734 
735  unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
736  if (ConstraintCode == "Q")
738  // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
739  // followed by llvm_unreachable so we'll leave them unimplemented in
740  // the backend for now.
741  return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
742  }
743 
744  bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
745  bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
746  bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
747  ISD::MemIndexedMode &AM, bool &IsInc,
748  SelectionDAG &DAG) const;
749  bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
751  SelectionDAG &DAG) const override;
752  bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
753  SDValue &Offset, ISD::MemIndexedMode &AM,
754  SelectionDAG &DAG) const override;
755 
756  void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
757  SelectionDAG &DAG) const override;
758 
759  bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
760 
761  void finalizeLowering(MachineFunction &MF) const override;
762 };
763 
764 namespace AArch64 {
766  const TargetLibraryInfo *libInfo);
767 } // end namespace AArch64
768 
769 } // end namespace llvm
770 
771 #endif
static MVT getIntegerVT(unsigned BitWidth)
static SDValue LowerCallResult(SDValue Chain, SDValue InFlag, const SmallVectorImpl< CCValAssign > &RVLocs, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals)
LowerCallResult - Lower the result values of a call into the appropriate copies out of appropriate ph...
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:921
unsigned getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
EVT getValueType() const
Return the ValueType of the referenced return value.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
LLVMContext & Context
bool supportSplitCSR(MachineFunction *MF) const override
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
This class represents lattice values for constants.
Definition: AllocatorList.h:23
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:66
An instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
Definition: Instructions.h:536
static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition: ValueTypes.h:145
This class represents a function call, abstracting a target machine&#39;s calling convention.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
Function Alias Analysis Results
This instruction constructs a fixed permutation of two input vectors.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:323
unsigned const TargetRegisterInfo * TRI
An instruction for reading from memory.
Definition: Instructions.h:169
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:699
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
bool hasAndNotCompare(SDValue V) const override
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) != Y —> (~X & Y) ...
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const override
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:779
static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, const SparcSubtarget *Subtarget)
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override
Returns true if a cast between SrcAS and DestAS is a noop.
AtomicOrdering
Atomic ordering for LLVM&#39;s memory model.
This is a fast-path instruction selection class that generates poor code and doesn&#39;t support illegal ...
Definition: FastISel.h:66
unsigned getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:291
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:414
static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG)
This contains information for each constraint that we are lowering.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:32
An instruction for storing to memory.
Definition: Instructions.h:325
Natural vector cast.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:1020
virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
Analysis containing CSE Info
Definition: CSEInfo.cpp:20
unsigned getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition: MCRegister.h:19
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:131
static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG)
static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
Machine Value Type.
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:64
This is an important base class in LLVM.
Definition: Constant.h:41
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Definition: ISDOpcodes.h:995
CombineLevel
Definition: DAGCombine.h:15
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:42
static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG, const SparcSubtarget *Subtarget)
lazy value info
Extended Value Type.
Definition: ValueTypes.h:33
static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty, SelectionDAG &DAG, unsigned Flags)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
This structure contains all information that is necessary for lowering calls.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:40
static Value * LowerCTPOP(LLVMContext &Context, Value *V, Instruction *IP)
Emit the code to lower ctpop of V before the specified instruction IP.
CCState - This class holds information needed while lowering arguments and return values...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:212
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:221
Provides information about what library functions are available for the current target.
AddressSpace
Definition: NVPTXBaseInfo.h:21
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
static const int FIRST_TARGET_MEMORY_OPCODE
FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations which do not reference a specific me...
Definition: ISDOpcodes.h:928
Represents one node in the SelectionDAG.
static bool Enabled
Definition: Statistic.cpp:50
const Function & getFunction() const
Return the LLVM function that this machine code represents.
bool hasBitPreservingFPLogic(EVT VT) const override
Return true if it is safe to transform an integer-domain bitwise operation into the equivalent floati...
Class to represent vector types.
Definition: DerivedTypes.h:432
Class for arbitrary precision integers.
Definition: APInt.h:69
bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT, const SelectionDAG &DAG) const override
Returns if it&#39;s reasonable to merge stores to MemVT size.
amdgpu Simplify well known AMD library false FunctionCallee Callee
static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG)
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
Flags
Flags values. These may be or&#39;d together.
static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG, const SparcTargetLowering &TLI, bool hasHardQuad)
Representation of each machine instruction.
Definition: MachineInstr.h:63
static unsigned getScalingFactorCost(const TargetTransformInfo &TTI, const LSRUse &LU, const Formula &F, const Loop &L)
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:150
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
uint32_t Size
Definition: Profile.cpp:46
bool hasAndNot(SDValue Y) const override
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition: ISDOpcodes.h:174
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
LLVM Value Representation.
Definition: Value.h:74
static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG)
bool shouldTransformSignedTruncationCheck(EVT XVT, unsigned KeptBits) const override
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be trun...
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:65
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:48
static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
bool isCheapToSpeculateCttz() const override
Return true if it is cheap to speculate a call to intrinsic cttz.
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:513
bool isCheapToSpeculateCtlz() const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget, SelectionDAG &DAG)
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This file describes how to lower LLVM code to machine code.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:958