LLVM  4.0.0
ARMISelLowering.h
Go to the documentation of this file.
1 //===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the interfaces that ARM uses to lower LLVM code into a
11 // selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H
16 #define LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H
17 
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/StringRef.h"
27 #include "llvm/IR/CallingConv.h"
28 #include "llvm/IR/IRBuilder.h"
29 #include "llvm/IR/InlineAsm.h"
30 #include "llvm/Support/CodeGen.h"
32 #include <utility>
33 
34 namespace llvm {
35 
36 class ARMSubtarget;
37 class InstrItineraryData;
38 
39  namespace ARMISD {
40 
41  // ARM Specific DAG Nodes
42  enum NodeType : unsigned {
43  // Start the numbering where the builtin ops and target ops leave off.
45 
46  Wrapper, // Wrapper - A wrapper node for TargetConstantPool,
47  // TargetExternalSymbol, and TargetGlobalAddress.
48  WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in
49  // PIC mode.
50  WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable
51 
52  // Add pseudo op to model memcpy for struct byval.
54 
55  CALL, // Function call.
56  CALL_PRED, // Function call that's predicable.
57  CALL_NOLINK, // Function call with branch not branch-and-link.
58  BRCOND, // Conditional branch.
59  BR_JT, // Jumptable branch.
60  BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump).
61  RET_FLAG, // Return with a flag operand.
62  INTRET_FLAG, // Interrupt return with an LR-offset and a flag operand.
63 
64  PIC_ADD, // Add with a PC operand and a PIC label.
65 
66  CMP, // ARM compare instructions.
67  CMN, // ARM CMN instructions.
68  CMPZ, // ARM compare that sets only Z flag.
69  CMPFP, // ARM VFP compare instruction, sets FPSCR.
70  CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR.
71  FMSTAT, // ARM fmstat instruction.
72 
73  CMOV, // ARM conditional move instructions.
74 
75  SSAT, // Signed saturation
76 
78 
79  SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out.
80  SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out.
81  RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag.
82 
83  ADDC, // Add with carry
84  ADDE, // Add using carry
85  SUBC, // Sub with carry
86  SUBE, // Sub using carry
87 
88  VMOVRRD, // double to two gprs.
89  VMOVDRR, // Two gprs to double.
90 
91  EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
92  EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
93  EH_SJLJ_SETUP_DISPATCH, // SjLj exception handling setup_dispatch.
94 
95  TC_RETURN, // Tail call return pseudo.
96 
98 
99  DYN_ALLOC, // Dynamic allocation on the stack.
100 
101  MEMBARRIER_MCR, // Memory barrier (MCR)
102 
103  PRELOAD, // Preload
104 
105  WIN__CHKSTK, // Windows' __chkstk call to do stack probing.
106  WIN__DBZCHK, // Windows' divide by zero check
107 
108  VCEQ, // Vector compare equal.
109  VCEQZ, // Vector compare equal to zero.
110  VCGE, // Vector compare greater than or equal.
111  VCGEZ, // Vector compare greater than or equal to zero.
112  VCLEZ, // Vector compare less than or equal to zero.
113  VCGEU, // Vector compare unsigned greater than or equal.
114  VCGT, // Vector compare greater than.
115  VCGTZ, // Vector compare greater than zero.
116  VCLTZ, // Vector compare less than zero.
117  VCGTU, // Vector compare unsigned greater than.
118  VTST, // Vector test bits.
119 
120  // Vector shift by immediate:
121  VSHL, // ...left
122  VSHRs, // ...right (signed)
123  VSHRu, // ...right (unsigned)
124 
125  // Vector rounding shift by immediate:
126  VRSHRs, // ...right (signed)
127  VRSHRu, // ...right (unsigned)
128  VRSHRN, // ...right narrow
129 
130  // Vector saturating shift by immediate:
131  VQSHLs, // ...left (signed)
132  VQSHLu, // ...left (unsigned)
133  VQSHLsu, // ...left (signed to unsigned)
134  VQSHRNs, // ...right narrow (signed)
135  VQSHRNu, // ...right narrow (unsigned)
136  VQSHRNsu, // ...right narrow (signed to unsigned)
137 
138  // Vector saturating rounding shift by immediate:
139  VQRSHRNs, // ...right narrow (signed)
140  VQRSHRNu, // ...right narrow (unsigned)
141  VQRSHRNsu, // ...right narrow (signed to unsigned)
142 
143  // Vector shift and insert:
144  VSLI, // ...left
145  VSRI, // ...right
146 
147  // Vector get lane (VMOV scalar to ARM core register)
148  // (These are used for 8- and 16-bit element types only.)
149  VGETLANEu, // zero-extend vector extract element
150  VGETLANEs, // sign-extend vector extract element
151 
152  // Vector move immediate and move negated immediate:
155 
156  // Vector move f32 immediate:
158 
159  // Vector duplicate:
162 
163  // Vector shuffles:
164  VEXT, // extract
165  VREV64, // reverse elements within 64-bit doublewords
166  VREV32, // reverse elements within 32-bit words
167  VREV16, // reverse elements within 16-bit halfwords
168  VZIP, // zip (interleave)
169  VUZP, // unzip (deinterleave)
170  VTRN, // transpose
171  VTBL1, // 1-register shuffle with mask
172  VTBL2, // 2-register shuffle with mask
173 
174  // Vector multiply long:
175  VMULLs, // ...signed
176  VMULLu, // ...unsigned
177 
178  UMLAL, // 64bit Unsigned Accumulate Multiply
179  SMLAL, // 64bit Signed Accumulate Multiply
180  UMAAL, // 64-bit Unsigned Accumulate Accumulate Multiply
181 
182  // Operands of the standard BUILD_VECTOR node are not legalized, which
183  // is fine if BUILD_VECTORs are always lowered to shuffles or other
184  // operations, but for ARM some BUILD_VECTORs are legal as-is and their
185  // operands need to be legalized. Define an ARM-specific version of
186  // BUILD_VECTOR for this purpose.
188 
189  // Bit-field insert
191 
192  // Vector OR with immediate
194  // Vector AND with NOT of immediate
196 
197  // Vector bitwise select
199 
200  // Pseudo-instruction representing a memory copy using ldm/stm
201  // instructions.
203 
204  // Vector load N-element structure to all lanes:
209 
210  // NEON loads with post-increment base updates:
222 
223  // NEON stores with post-increment base updates:
231  };
232 
233  } // end namespace ARMISD
234 
235  /// Define some predicates that are used for node matching.
236  namespace ARM {
237 
238  bool isBitFieldInvertedMask(unsigned v);
239 
240  } // end namespace ARM
241 
242  //===--------------------------------------------------------------------===//
243  // ARMTargetLowering - ARM Implementation of the TargetLowering interface
244 
246  public:
247  explicit ARMTargetLowering(const TargetMachine &TM,
248  const ARMSubtarget &STI);
249 
250  unsigned getJumpTableEncoding() const override;
251  bool useSoftFloat() const override;
252 
253  SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
254 
255  /// ReplaceNodeResults - Replace the results of node with an illegal result
256  /// type with new values built out of custom code.
257  ///
259  SelectionDAG &DAG) const override;
260 
261  const char *getTargetNodeName(unsigned Opcode) const override;
262 
263  bool isSelectSupported(SelectSupportKind Kind) const override {
264  // ARM does not support scalar condition selects on vectors.
265  return (Kind != ScalarCondVectorVal);
266  }
267 
268  /// getSetCCResultType - Return the value type to use for ISD::SETCC.
270  EVT VT) const override;
271 
274  MachineBasicBlock *MBB) const override;
275 
277  SDNode *Node) const override;
278 
282  SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
283 
284  bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override;
285 
286  /// allowsMisalignedMemoryAccesses - Returns true if the target allows
287  /// unaligned memory accesses of the specified type. Returns whether it
288  /// is "fast" by reference in the second argument.
289  bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
290  unsigned Align,
291  bool *Fast) const override;
292 
293  EVT getOptimalMemOpType(uint64_t Size,
294  unsigned DstAlign, unsigned SrcAlign,
295  bool IsMemset, bool ZeroMemset,
296  bool MemcpyStrSrc,
297  MachineFunction &MF) const override;
298 
300  bool isZExtFree(SDValue Val, EVT VT2) const override;
301 
302  bool isVectorLoadExtDesirable(SDValue ExtVal) const override;
303 
304  bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
305 
306 
307  /// isLegalAddressingMode - Return true if the addressing mode represented
308  /// by AM is legal for this target, for a load/store of the specified type.
309  bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
310  Type *Ty, unsigned AS) const override;
311 
312  /// getScalingFactorCost - Return the cost of the scaling used in
313  /// addressing mode represented by AM.
314  /// If the AM is supported, the return value must be >= 0.
315  /// If the AM is not supported, the return value must be negative.
316  int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
317  unsigned AS) const override;
318 
319  bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
320 
321  /// isLegalICmpImmediate - Return true if the specified immediate is legal
322  /// icmp immediate, that is the target has icmp instructions which can
323  /// compare a register against the immediate without having to materialize
324  /// the immediate into a register.
325  bool isLegalICmpImmediate(int64_t Imm) const override;
326 
327  /// isLegalAddImmediate - Return true if the specified immediate is legal
328  /// add immediate, that is the target has add instructions which can
329  /// add a register and the immediate without having to materialize
330  /// the immediate into a register.
331  bool isLegalAddImmediate(int64_t Imm) const override;
332 
333  /// getPreIndexedAddressParts - returns true by value, base pointer and
334  /// offset pointer and addressing mode by reference if the node's address
335  /// can be legally represented as pre-indexed load / store address.
338  SelectionDAG &DAG) const override;
339 
340  /// getPostIndexedAddressParts - returns true by value, base pointer and
341  /// offset pointer and addressing mode by reference if this node can be
342  /// combined with a load / store to form a post-indexed load / store.
343  bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
345  SelectionDAG &DAG) const override;
346 
347  void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero,
348  APInt &KnownOne,
349  const SelectionDAG &DAG,
350  unsigned Depth) const override;
351 
352 
353  bool ExpandInlineAsm(CallInst *CI) const override;
354 
355  ConstraintType getConstraintType(StringRef Constraint) const override;
356 
357  /// Examine constraint string and operand type and determine a weight value.
358  /// The operand object must already have been set up with the operand type.
360  AsmOperandInfo &info, const char *constraint) const override;
361 
362  std::pair<unsigned, const TargetRegisterClass *>
364  StringRef Constraint, MVT VT) const override;
365 
366  const char *LowerXConstraint(EVT ConstraintVT) const override;
367 
368  /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
369  /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
370  /// true it means one of the asm constraint of the inline asm instruction
371  /// being processed is 'm'.
372  void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
373  std::vector<SDValue> &Ops,
374  SelectionDAG &DAG) const override;
375 
376  unsigned
377  getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
378  if (ConstraintCode == "Q")
380  else if (ConstraintCode == "o")
382  else if (ConstraintCode.size() == 2) {
383  if (ConstraintCode[0] == 'U') {
384  switch(ConstraintCode[1]) {
385  default:
386  break;
387  case 'm':
389  case 'n':
391  case 'q':
393  case 's':
395  case 't':
397  case 'v':
399  case 'y':
401  }
402  }
403  }
404  return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
405  }
406 
407  const ARMSubtarget* getSubtarget() const {
408  return Subtarget;
409  }
410 
411  /// getRegClassFor - Return the register class that should be used for the
412  /// specified value type.
413  const TargetRegisterClass *getRegClassFor(MVT VT) const override;
414 
415  /// Returns true if a cast between SrcAS and DestAS is a noop.
416  bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
417  // Addrspacecasts are always noops.
418  return true;
419  }
420 
421  bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize,
422  unsigned &PrefAlign) const override;
423 
424  /// createFastISel - This method returns a target specific FastISel object,
425  /// or null if the target does not support "fast" ISel.
427  const TargetLibraryInfo *libInfo) const override;
428 
430 
431  bool
432  isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const override;
433  bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
434 
435  /// isFPImmLegal - Returns true if the target can instruction select the
436  /// specified FP immediate natively. If false, the legalizer will
437  /// materialize the FP immediate as a load from a constant pool.
438  bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
439 
440  bool getTgtMemIntrinsic(IntrinsicInfo &Info,
441  const CallInst &I,
442  unsigned Intrinsic) const override;
443 
444  /// \brief Returns true if it is beneficial to convert a load of a constant
445  /// to just the constant itself.
447  Type *Ty) const override;
448 
449  /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
450  /// with this index.
451  bool isExtractSubvectorCheap(EVT ResVT, unsigned Index) const override;
452 
453  /// \brief Returns true if an argument of type Ty needs to be passed in a
454  /// contiguous block of registers in calling convention CallConv.
456  Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override;
457 
458  /// If a physical register, this returns the register that receives the
459  /// exception address on entry to an EH pad.
460  unsigned
461  getExceptionPointerRegister(const Constant *PersonalityFn) const override;
462 
463  /// If a physical register, this returns the register that receives the
464  /// exception typeid on entry to a landing pad.
465  unsigned
466  getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
467 
468  Instruction *makeDMB(IRBuilder<> &Builder, ARM_MB::MemBOpt Domain) const;
469  Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
470  AtomicOrdering Ord) const override;
472  Value *Addr, AtomicOrdering Ord) const override;
473 
474  void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override;
475 
477  bool IsStore, bool IsLoad) const override;
479  bool IsStore, bool IsLoad) const override;
480 
481  unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
482 
485  ArrayRef<unsigned> Indices,
486  unsigned Factor) const override;
488  unsigned Factor) const override;
489 
490  bool shouldInsertFencesForAtomic(const Instruction *I) const override;
492  shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
493  bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
495  shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
496  bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
497 
498  bool useLoadStackGuardNode() const override;
499 
500  bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
501  unsigned &Cost) const override;
502 
503  bool isCheapToSpeculateCttz() const override;
504  bool isCheapToSpeculateCtlz() const override;
505 
506  bool supportSwiftError() const override {
507  return true;
508  }
509 
510  bool hasStandaloneRem(EVT VT) const override {
511  return HasStandaloneRem;
512  }
513 
514  CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool isVarArg) const;
515  CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool isVarArg) const;
516 
517  protected:
518  std::pair<const TargetRegisterClass *, uint8_t>
520  MVT VT) const override;
521 
522  private:
523  /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
524  /// make the right decision when generating code for different targets.
525  const ARMSubtarget *Subtarget;
526 
527  const TargetRegisterInfo *RegInfo;
528 
529  const InstrItineraryData *Itins;
530 
531  /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created.
532  ///
533  unsigned ARMPCLabelIndex;
534 
535  // TODO: remove this, and have shouldInsertFencesForAtomic do the proper
536  // check.
537  bool InsertFencesForAtomic;
538 
539  bool HasStandaloneRem = true;
540 
541  void addTypeForNEON(MVT VT, MVT PromotedLdStVT, MVT PromotedBitwiseVT);
542  void addDRTypeForNEON(MVT VT);
543  void addQRTypeForNEON(MVT VT);
544  std::pair<SDValue, SDValue> getARMXALUOOp(SDValue Op, SelectionDAG &DAG, SDValue &ARMcc) const;
545 
546  typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector;
547 
548  void PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG, SDValue Chain,
549  SDValue &Arg, RegsToPassVector &RegsToPass,
550  CCValAssign &VA, CCValAssign &NextVA,
551  SDValue &StackPtr,
552  SmallVectorImpl<SDValue> &MemOpChains,
553  ISD::ArgFlagsTy Flags) const;
554  SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
555  SDValue &Root, SelectionDAG &DAG,
556  const SDLoc &dl) const;
557 
558  CallingConv::ID getEffectiveCallingConv(CallingConv::ID CC,
559  bool isVarArg) const;
560  CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return,
561  bool isVarArg) const;
562  SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
563  const SDLoc &dl, SelectionDAG &DAG,
564  const CCValAssign &VA,
565  ISD::ArgFlagsTy Flags) const;
566  SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
567  SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
568  SDValue LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, SelectionDAG &DAG) const;
569  SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
570  const ARMSubtarget *Subtarget) const;
571  SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
572  SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
573  SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const;
574  SDValue LowerGlobalAddressWindows(SDValue Op, SelectionDAG &DAG) const;
575  SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
576  SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
577  SelectionDAG &DAG) const;
578  SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA,
579  SelectionDAG &DAG,
580  TLSModel::Model model) const;
581  SDValue LowerGlobalTLSAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
582  SDValue LowerGlobalTLSAddressWindows(SDValue Op, SelectionDAG &DAG) const;
583  SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const;
584  SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
585  SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) const;
586  SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
587  SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
588  SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
589  SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
590  SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
591  SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
592  SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
593  SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
594  SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
595  SDValue LowerConstantFP(SDValue Op, SelectionDAG &DAG,
596  const ARMSubtarget *ST) const;
597  SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
598  const ARMSubtarget *ST) const;
599  SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
600  SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const;
601  SDValue LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed) const;
602  void ExpandDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed,
603  SmallVectorImpl<SDValue> &Results) const;
604  SDValue LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG, bool Signed,
605  SDValue &Chain) const;
606  SDValue LowerREM(SDNode *N, SelectionDAG &DAG) const;
607  SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
608  SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
609  SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
610  SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
611  SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
612 
613  unsigned getRegisterByName(const char* RegName, EVT VT,
614  SelectionDAG &DAG) const override;
615 
616  /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
617  /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
618  /// expanded to FMAs when this method returns true, otherwise fmuladd is
619  /// expanded to fmul + fadd.
620  ///
621  /// ARM supports both fused and unfused multiply-add operations; we already
622  /// lower a pair of fmul and fadd to the latter so it's not clear that there
623  /// would be a gain or that the gain would be worthwhile enough to risk
624  /// correctness bugs.
625  bool isFMAFasterThanFMulAndFAdd(EVT VT) const override { return false; }
626 
627  SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
628 
629  SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
630  CallingConv::ID CallConv, bool isVarArg,
631  const SmallVectorImpl<ISD::InputArg> &Ins,
632  const SDLoc &dl, SelectionDAG &DAG,
633  SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
634  SDValue ThisVal) const;
635 
636  bool supportSplitCSR(MachineFunction *MF) const override {
637  return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
638  MF->getFunction()->hasFnAttribute(Attribute::NoUnwind);
639  }
640 
641  void initializeSplitCSR(MachineBasicBlock *Entry) const override;
642  void insertCopiesSplitCSR(
643  MachineBasicBlock *Entry,
644  const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
645 
646  SDValue
647  LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
648  const SmallVectorImpl<ISD::InputArg> &Ins,
649  const SDLoc &dl, SelectionDAG &DAG,
650  SmallVectorImpl<SDValue> &InVals) const override;
651 
652  int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &dl,
653  SDValue &Chain, const Value *OrigArg,
654  unsigned InRegsParamRecordIdx, int ArgOffset,
655  unsigned ArgSize) const;
656 
657  void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
658  const SDLoc &dl, SDValue &Chain,
659  unsigned ArgOffset, unsigned TotalArgRegsSaveSize,
660  bool ForceMutable = false) const;
661 
662  SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
663  SmallVectorImpl<SDValue> &InVals) const override;
664 
665  /// HandleByVal - Target-specific cleanup for ByVal support.
666  void HandleByVal(CCState *, unsigned &, unsigned) const override;
667 
668  /// IsEligibleForTailCallOptimization - Check whether the call is eligible
669  /// for tail call optimization. Targets which want to do tail call
670  /// optimization should implement this function.
671  bool IsEligibleForTailCallOptimization(SDValue Callee,
672  CallingConv::ID CalleeCC,
673  bool isVarArg,
674  bool isCalleeStructRet,
675  bool isCallerStructRet,
676  const SmallVectorImpl<ISD::OutputArg> &Outs,
677  const SmallVectorImpl<SDValue> &OutVals,
678  const SmallVectorImpl<ISD::InputArg> &Ins,
679  SelectionDAG& DAG) const;
680 
681  bool CanLowerReturn(CallingConv::ID CallConv,
682  MachineFunction &MF, bool isVarArg,
683  const SmallVectorImpl<ISD::OutputArg> &Outs,
684  LLVMContext &Context) const override;
685 
686  SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
687  const SmallVectorImpl<ISD::OutputArg> &Outs,
688  const SmallVectorImpl<SDValue> &OutVals,
689  const SDLoc &dl, SelectionDAG &DAG) const override;
690 
691  bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
692 
693  bool mayBeEmittedAsTailCall(CallInst *CI) const override;
694 
695  SDValue getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, SDValue TrueVal,
696  SDValue ARMcc, SDValue CCR, SDValue Cmp,
697  SelectionDAG &DAG) const;
698  SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
699  SDValue &ARMcc, SelectionDAG &DAG, const SDLoc &dl) const;
700  SDValue getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
701  const SDLoc &dl) const;
702  SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const;
703 
704  SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const;
705 
706  void SetupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB,
707  MachineBasicBlock *DispatchBB, int FI) const;
708 
709  void EmitSjLjDispatchBlock(MachineInstr &MI, MachineBasicBlock *MBB) const;
710 
711  bool RemapAddSubWithFlags(MachineInstr &MI, MachineBasicBlock *BB) const;
712 
713  MachineBasicBlock *EmitStructByval(MachineInstr &MI,
714  MachineBasicBlock *MBB) const;
715 
716  MachineBasicBlock *EmitLowered__chkstk(MachineInstr &MI,
717  MachineBasicBlock *MBB) const;
718  MachineBasicBlock *EmitLowered__dbzchk(MachineInstr &MI,
719  MachineBasicBlock *MBB) const;
720  };
721 
726  };
727 
728  namespace ARM {
729 
730  FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
731  const TargetLibraryInfo *libInfo);
732 
733  } // end namespace ARM
734 
735 } // end namespace llvm
736 
737 #endif // LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:762
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:102
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the value type to use for ISD::SETCC.
LLVMContext & Context
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag...
const char * LowerXConstraint(EVT ConstraintVT) const override
Try to replace an X constraint, which matches anything, with another that has more specific requireme...
an instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
Definition: Instructions.h:504
void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPreIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mod...
This class represents a function call, abstracting a target machine's calling convention.
bool hasStandaloneRem(EVT VT) const override
Return true if the target can handle a standalone remainder operation.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
Function Alias Analysis Results
This instruction constructs a fixed permutation of two input vectors.
An instruction for reading from memory.
Definition: Instructions.h:164
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:669
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override
createFastISel - This method returns a target specific FastISel object, or null if the target does no...
bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override
Return true if it is profitable for dag combiner to transform a floating point op of specified opcode...
bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool isVarArg) const
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const override
Lower an interleaved store into a vstN intrinsic.
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override
Returns true if a cast between SrcAS and DestAS is a noop.
lazy value info
void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
struct fuzzer::@269 Flags
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
Value * emitStoreConditional(IRBuilder<> &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:32
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:588
EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, MachineFunction &MF) const override
Returns the target specific optimal type for load and store operations as a result of memset...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC, bool isVarArg) const
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPostIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mo...
AtomicOrdering
Atomic ordering for LLVM's memory model.
bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, unsigned &Cost) const override
Return true if the target can combine store(extractelement VectorTy, Idx).
bool lowerInterleavedLoad(LoadInst *LI, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor) const override
Lower an interleaved load into a vldN intrinsic.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition: FastISel.h:31
Value * emitLoadLinked(IRBuilder<> &Builder, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type...
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself...
MachineBasicBlock * MBB
Itinerary data supplied by a subtarget to be used by a target.
An instruction for storing to memory.
Definition: Instructions.h:300
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:842
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:135
unsigned getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
bool isExtractSubvectorCheap(EVT ResVT, unsigned Index) const override
Return true if EXTRACT_SUBVECTOR is cheap for this result type with this index.
bool useSoftFloat() const override
SDValue PerformCMOVToBFICombine(SDNode *N, SelectionDAG &DAG) const
MVT - Machine Value Type.
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, unsigned &PrefAlign) const override
Return true if the pointer arguments to CI should be aligned by aligning the object whose address is ...
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:48
This is an important base class in LLVM.
Definition: Constant.h:42
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
uint32_t Offset
std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const override
Return the largest legal super-reg register class of the register class for the specified type and it...
int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const override
getScalingFactorCost - Return the cost of the scaling used in addressing mode represented by AM...
SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const
PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV.
bool shouldInsertFencesForAtomic(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic...
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all...
bool isCheapToSpeculateCttz() const override
Return true if it is cheap to speculate a call to intrinsic cttz.
EVT - Extended Value Type.
Definition: ValueTypes.h:31
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
const ARMSubtarget * getSubtarget() const
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:166
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:843
Provides information about what library functions are available for the current target.
CCValAssign - Represent assignment of one arg/retval to a location.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override
bool isFPImmLegal(const APFloat &Imm, EVT VT) const override
isFPImmLegal - Returns true if the target can instruction select the specified FP immediate natively...
static const int FIRST_TARGET_MEMORY_OPCODE
FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations which do not reference a specific me...
Definition: ISDOpcodes.h:769
Represents one node in the SelectionDAG.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
bool ExpandInlineAsm(CallInst *CI) const override
This hook allows the target to expand an inline asm call to be explicit llvm code if it wants to...
Class for arbitrary precision integers.
Definition: APInt.h:77
Instruction * emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
AddrMode
ARM Addressing Modes.
Definition: ARMBaseInfo.h:235
bool isCheapToSpeculateCtlz() const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
const TargetRegisterClass * getRegClassFor(MVT VT) const override
getRegClassFor - Return the register class that should be used for the specified value type...
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const override
getTgtMemIntrinsic - Represent NEON load and store intrinsics as MemIntrinsicNodes.
Representation of each machine instruction.
Definition: MachineInstr.h:52
SelectSupportKind
Enum that describes what type of support for selects the target has.
bool isVectorLoadExtDesirable(SDValue ExtVal) const override
Return true if folding a vector load into ExtVal (a sign, zero, or any extend node) is profitable...
#define I(x, y, z)
Definition: MD5.cpp:54
#define N
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
bool isLegalAddImmediate(int64_t Imm) const override
isLegalAddImmediate - Return true if the specified immediate is legal add immediate, that is the target has add instructions which can add a register and the immediate without having to materialize the immediate into a register.
bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns true if the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass into a ...
const unsigned Kind
bool isLegalICmpImmediate(int64_t Imm) const override
isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructions which can compare a register against the immediate without having to materialize the immediate into a register.
SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const
PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND.
bool isSelectSupported(SelectSupportKind Kind) const override
bool isShuffleMaskLegal(const SmallVectorImpl< int > &M, EVT VT) const override
isShuffleMaskLegal - Targets can use this to indicate that they only support some VECTOR_SHUFFLE oper...
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
LLVM Value Representation.
Definition: Value.h:71
bool useLoadStackGuardNode() const override
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
Instruction * makeDMB(IRBuilder<> &Builder, ARM_MB::MemBOpt Domain) const
ARMTargetLowering(const TargetMachine &TM, const ARMSubtarget &STI)
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override
Returns true if an argument of type Ty needs to be passed in a contiguous block of registers in calli...
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target...
unsigned getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
Primary interface to the complete machine description for the target machine.
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:47
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
bool isBitFieldInvertedMask(unsigned v)
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass...
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns true if the given (atomic) store should be expanded by the IR-level AtomicExpand pass into an...
bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:42
Instruction * emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const override
This file describes how to lower LLVM code to machine code.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, unsigned Align, bool *Fast) const override
allowsMisalignedMemoryAccesses - Returns true if the target allows unaligned memory accesses of the s...
void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero, APInt &KnownOne, const SelectionDAG &DAG, unsigned Depth) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:799