LLVM  3.7.0
ARMISelLowering.h
Go to the documentation of this file.
1 //===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the interfaces that ARM uses to lower LLVM code into a
11 // selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H
16 #define LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H
17 
22 #include <vector>
23 
24 namespace llvm {
25  class ARMConstantPoolValue;
26  class ARMSubtarget;
27 
28  namespace ARMISD {
29  // ARM Specific DAG Nodes
30  enum NodeType : unsigned {
31  // Start the numbering where the builtin ops and target ops leave off.
33 
34  Wrapper, // Wrapper - A wrapper node for TargetConstantPool,
35  // TargetExternalSymbol, and TargetGlobalAddress.
36  WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in
37  // PIC mode.
38  WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable
39 
40  // Add pseudo op to model memcpy for struct byval.
42 
43  CALL, // Function call.
44  CALL_PRED, // Function call that's predicable.
45  CALL_NOLINK, // Function call with branch not branch-and-link.
46  tCALL, // Thumb function call.
47  BRCOND, // Conditional branch.
48  BR_JT, // Jumptable branch.
49  BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump).
50  RET_FLAG, // Return with a flag operand.
51  INTRET_FLAG, // Interrupt return with an LR-offset and a flag operand.
52 
53  PIC_ADD, // Add with a PC operand and a PIC label.
54 
55  CMP, // ARM compare instructions.
56  CMN, // ARM CMN instructions.
57  CMPZ, // ARM compare that sets only Z flag.
58  CMPFP, // ARM VFP compare instruction, sets FPSCR.
59  CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR.
60  FMSTAT, // ARM fmstat instruction.
61 
62  CMOV, // ARM conditional move instructions.
63 
65 
66  RBIT, // ARM bitreverse instruction
67 
68  SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out.
69  SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out.
70  RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag.
71 
72  ADDC, // Add with carry
73  ADDE, // Add using carry
74  SUBC, // Sub with carry
75  SUBE, // Sub using carry
76 
77  VMOVRRD, // double to two gprs.
78  VMOVDRR, // Two gprs to double.
79 
80  EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
81  EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
82 
83  TC_RETURN, // Tail call return pseudo.
84 
86 
87  DYN_ALLOC, // Dynamic allocation on the stack.
88 
89  MEMBARRIER_MCR, // Memory barrier (MCR)
90 
91  PRELOAD, // Preload
92 
93  WIN__CHKSTK, // Windows' __chkstk call to do stack probing.
94 
95  VCEQ, // Vector compare equal.
96  VCEQZ, // Vector compare equal to zero.
97  VCGE, // Vector compare greater than or equal.
98  VCGEZ, // Vector compare greater than or equal to zero.
99  VCLEZ, // Vector compare less than or equal to zero.
100  VCGEU, // Vector compare unsigned greater than or equal.
101  VCGT, // Vector compare greater than.
102  VCGTZ, // Vector compare greater than zero.
103  VCLTZ, // Vector compare less than zero.
104  VCGTU, // Vector compare unsigned greater than.
105  VTST, // Vector test bits.
106 
107  // Vector shift by immediate:
108  VSHL, // ...left
109  VSHRs, // ...right (signed)
110  VSHRu, // ...right (unsigned)
111 
112  // Vector rounding shift by immediate:
113  VRSHRs, // ...right (signed)
114  VRSHRu, // ...right (unsigned)
115  VRSHRN, // ...right narrow
116 
117  // Vector saturating shift by immediate:
118  VQSHLs, // ...left (signed)
119  VQSHLu, // ...left (unsigned)
120  VQSHLsu, // ...left (signed to unsigned)
121  VQSHRNs, // ...right narrow (signed)
122  VQSHRNu, // ...right narrow (unsigned)
123  VQSHRNsu, // ...right narrow (signed to unsigned)
124 
125  // Vector saturating rounding shift by immediate:
126  VQRSHRNs, // ...right narrow (signed)
127  VQRSHRNu, // ...right narrow (unsigned)
128  VQRSHRNsu, // ...right narrow (signed to unsigned)
129 
130  // Vector shift and insert:
131  VSLI, // ...left
132  VSRI, // ...right
133 
134  // Vector get lane (VMOV scalar to ARM core register)
135  // (These are used for 8- and 16-bit element types only.)
136  VGETLANEu, // zero-extend vector extract element
137  VGETLANEs, // sign-extend vector extract element
138 
139  // Vector move immediate and move negated immediate:
142 
143  // Vector move f32 immediate:
145 
146  // Vector duplicate:
149 
150  // Vector shuffles:
151  VEXT, // extract
152  VREV64, // reverse elements within 64-bit doublewords
153  VREV32, // reverse elements within 32-bit words
154  VREV16, // reverse elements within 16-bit halfwords
155  VZIP, // zip (interleave)
156  VUZP, // unzip (deinterleave)
157  VTRN, // transpose
158  VTBL1, // 1-register shuffle with mask
159  VTBL2, // 2-register shuffle with mask
160 
161  // Vector multiply long:
162  VMULLs, // ...signed
163  VMULLu, // ...unsigned
164 
165  UMLAL, // 64bit Unsigned Accumulate Multiply
166  SMLAL, // 64bit Signed Accumulate Multiply
167 
168  // Operands of the standard BUILD_VECTOR node are not legalized, which
169  // is fine if BUILD_VECTORs are always lowered to shuffles or other
170  // operations, but for ARM some BUILD_VECTORs are legal as-is and their
171  // operands need to be legalized. Define an ARM-specific version of
172  // BUILD_VECTOR for this purpose.
174 
175  // Floating-point max and min:
180 
181  // Bit-field insert
183 
184  // Vector OR with immediate
186  // Vector AND with NOT of immediate
188 
189  // Vector bitwise select
191 
192  // Vector load N-element structure to all lanes:
196 
197  // NEON loads with post-increment base updates:
208 
209  // NEON stores with post-increment base updates:
217  };
218  }
219 
220  /// Define some predicates that are used for node matching.
221  namespace ARM {
222  bool isBitFieldInvertedMask(unsigned v);
223  }
224 
225  //===--------------------------------------------------------------------===//
226  // ARMTargetLowering - ARM Implementation of the TargetLowering interface
227 
229  public:
230  explicit ARMTargetLowering(const TargetMachine &TM,
231  const ARMSubtarget &STI);
232 
233  unsigned getJumpTableEncoding() const override;
234  bool useSoftFloat() const override;
235 
236  SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
237 
238  /// ReplaceNodeResults - Replace the results of node with an illegal result
239  /// type with new values built out of custom code.
240  ///
242  SelectionDAG &DAG) const override;
243 
244  const char *getTargetNodeName(unsigned Opcode) const override;
245 
246  bool isSelectSupported(SelectSupportKind Kind) const override {
247  // ARM does not support scalar condition selects on vectors.
248  return (Kind != ScalarCondVectorVal);
249  }
250 
251  /// getSetCCResultType - Return the value type to use for ISD::SETCC.
253  EVT VT) const override;
254 
257  MachineBasicBlock *MBB) const override;
258 
260  SDNode *Node) const override;
261 
263  SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
264 
265  bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override;
266 
267  /// allowsMisalignedMemoryAccesses - Returns true if the target allows
268  /// unaligned memory accesses of the specified type. Returns whether it
269  /// is "fast" by reference in the second argument.
270  bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
271  unsigned Align,
272  bool *Fast) const override;
273 
274  EVT getOptimalMemOpType(uint64_t Size,
275  unsigned DstAlign, unsigned SrcAlign,
276  bool IsMemset, bool ZeroMemset,
277  bool MemcpyStrSrc,
278  MachineFunction &MF) const override;
279 
281  bool isZExtFree(SDValue Val, EVT VT2) const override;
282 
283  bool isVectorLoadExtDesirable(SDValue ExtVal) const override;
284 
285  bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
286 
287 
288  /// isLegalAddressingMode - Return true if the addressing mode represented
289  /// by AM is legal for this target, for a load/store of the specified type.
290  bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
291  Type *Ty, unsigned AS) const override;
292  bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
293 
294  /// isLegalICmpImmediate - Return true if the specified immediate is legal
295  /// icmp immediate, that is the target has icmp instructions which can
296  /// compare a register against the immediate without having to materialize
297  /// the immediate into a register.
298  bool isLegalICmpImmediate(int64_t Imm) const override;
299 
300  /// isLegalAddImmediate - Return true if the specified immediate is legal
301  /// add immediate, that is the target has add instructions which can
302  /// add a register and the immediate without having to materialize
303  /// the immediate into a register.
304  bool isLegalAddImmediate(int64_t Imm) const override;
305 
306  /// getPreIndexedAddressParts - returns true by value, base pointer and
307  /// offset pointer and addressing mode by reference if the node's address
308  /// can be legally represented as pre-indexed load / store address.
309  bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
311  SelectionDAG &DAG) const override;
312 
313  /// getPostIndexedAddressParts - returns true by value, base pointer and
314  /// offset pointer and addressing mode by reference if this node can be
315  /// combined with a load / store to form a post-indexed load / store.
316  bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
317  SDValue &Offset, ISD::MemIndexedMode &AM,
318  SelectionDAG &DAG) const override;
319 
320  void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero,
321  APInt &KnownOne,
322  const SelectionDAG &DAG,
323  unsigned Depth) const override;
324 
325 
326  bool ExpandInlineAsm(CallInst *CI) const override;
327 
328  ConstraintType getConstraintType(StringRef Constraint) const override;
329 
330  /// Examine constraint string and operand type and determine a weight value.
331  /// The operand object must already have been set up with the operand type.
333  AsmOperandInfo &info, const char *constraint) const override;
334 
335  std::pair<unsigned, const TargetRegisterClass *>
337  StringRef Constraint, MVT VT) const override;
338 
339  /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
340  /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
341  /// true it means one of the asm constraint of the inline asm instruction
342  /// being processed is 'm'.
343  void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
344  std::vector<SDValue> &Ops,
345  SelectionDAG &DAG) const override;
346 
347  unsigned
348  getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
349  if (ConstraintCode == "Q")
351  else if (ConstraintCode.size() == 2) {
352  if (ConstraintCode[0] == 'U') {
353  switch(ConstraintCode[1]) {
354  default:
355  break;
356  case 'm':
358  case 'n':
360  case 'q':
362  case 's':
364  case 't':
366  case 'v':
368  case 'y':
370  }
371  }
372  }
373  return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
374  }
375 
376  const ARMSubtarget* getSubtarget() const {
377  return Subtarget;
378  }
379 
380  /// getRegClassFor - Return the register class that should be used for the
381  /// specified value type.
382  const TargetRegisterClass *getRegClassFor(MVT VT) const override;
383 
384  /// Returns true if a cast between SrcAS and DestAS is a noop.
385  bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
386  // Addrspacecasts are always noops.
387  return true;
388  }
389 
390  bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize,
391  unsigned &PrefAlign) const override;
392 
393  /// createFastISel - This method returns a target specific FastISel object,
394  /// or null if the target does not support "fast" ISel.
396  const TargetLibraryInfo *libInfo) const override;
397 
399 
400  bool
401  isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const override;
402  bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
403 
404  /// isFPImmLegal - Returns true if the target can instruction select the
405  /// specified FP immediate natively. If false, the legalizer will
406  /// materialize the FP immediate as a load from a constant pool.
407  bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
408 
409  bool getTgtMemIntrinsic(IntrinsicInfo &Info,
410  const CallInst &I,
411  unsigned Intrinsic) const override;
412 
413  /// \brief Returns true if it is beneficial to convert a load of a constant
414  /// to just the constant itself.
416  Type *Ty) const override;
417 
418  /// \brief Returns true if an argument of type Ty needs to be passed in a
419  /// contiguous block of registers in calling convention CallConv.
421  Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override;
422 
423  bool hasLoadLinkedStoreConditional() const override;
424  Instruction *makeDMB(IRBuilder<> &Builder, ARM_MB::MemBOpt Domain) const;
425  Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
426  AtomicOrdering Ord) const override;
428  Value *Addr, AtomicOrdering Ord) const override;
429 
431  bool IsStore, bool IsLoad) const override;
433  bool IsStore, bool IsLoad) const override;
434 
435  unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
436 
439  ArrayRef<unsigned> Indices,
440  unsigned Factor) const override;
442  unsigned Factor) const override;
443 
444  bool shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
445  bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
447  shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
448 
449  bool useLoadStackGuardNode() const override;
450 
451  bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
452  unsigned &Cost) const override;
453 
454  protected:
455  std::pair<const TargetRegisterClass *, uint8_t>
457  MVT VT) const override;
458 
459  private:
460  /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
461  /// make the right decision when generating code for different targets.
462  const ARMSubtarget *Subtarget;
463 
464  const TargetRegisterInfo *RegInfo;
465 
466  const InstrItineraryData *Itins;
467 
468  /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created.
469  ///
470  unsigned ARMPCLabelIndex;
471 
472  void addTypeForNEON(MVT VT, MVT PromotedLdStVT, MVT PromotedBitwiseVT);
473  void addDRTypeForNEON(MVT VT);
474  void addQRTypeForNEON(MVT VT);
475  std::pair<SDValue, SDValue> getARMXALUOOp(SDValue Op, SelectionDAG &DAG, SDValue &ARMcc) const;
476 
477  typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector;
478  void PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG,
479  SDValue Chain, SDValue &Arg,
480  RegsToPassVector &RegsToPass,
481  CCValAssign &VA, CCValAssign &NextVA,
482  SDValue &StackPtr,
483  SmallVectorImpl<SDValue> &MemOpChains,
484  ISD::ArgFlagsTy Flags) const;
485  SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
486  SDValue &Root, SelectionDAG &DAG,
487  SDLoc dl) const;
488 
489  CallingConv::ID getEffectiveCallingConv(CallingConv::ID CC,
490  bool isVarArg) const;
491  CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return,
492  bool isVarArg) const;
493  SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
494  SDLoc dl, SelectionDAG &DAG,
495  const CCValAssign &VA,
496  ISD::ArgFlagsTy Flags) const;
497  SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
498  SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
499  SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
500  const ARMSubtarget *Subtarget) const;
501  SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
502  SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
503  SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const;
504  SDValue LowerGlobalAddressWindows(SDValue Op, SelectionDAG &DAG) const;
505  SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
506  SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
507  SelectionDAG &DAG) const;
508  SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA,
509  SelectionDAG &DAG,
510  TLSModel::Model model) const;
511  SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const;
512  SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
513  SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) const;
514  SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
515  SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
516  SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
517  SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
518  SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
519  SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
520  SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
521  SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
522  SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
523  SDValue LowerConstantFP(SDValue Op, SelectionDAG &DAG,
524  const ARMSubtarget *ST) const;
525  SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
526  const ARMSubtarget *ST) const;
527  SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
528  SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const;
529  SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
530  SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
531  SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
532  SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
533  SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
534 
535  unsigned getRegisterByName(const char* RegName, EVT VT,
536  SelectionDAG &DAG) const override;
537 
538  /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
539  /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
540  /// expanded to FMAs when this method returns true, otherwise fmuladd is
541  /// expanded to fmul + fadd.
542  ///
543  /// ARM supports both fused and unfused multiply-add operations; we already
544  /// lower a pair of fmul and fadd to the latter so it's not clear that there
545  /// would be a gain or that the gain would be worthwhile enough to risk
546  /// correctness bugs.
547  bool isFMAFasterThanFMulAndFAdd(EVT VT) const override { return false; }
548 
549  SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
550 
551  SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
552  CallingConv::ID CallConv, bool isVarArg,
553  const SmallVectorImpl<ISD::InputArg> &Ins,
554  SDLoc dl, SelectionDAG &DAG,
555  SmallVectorImpl<SDValue> &InVals,
556  bool isThisReturn, SDValue ThisVal) const;
557 
558  SDValue
559  LowerFormalArguments(SDValue Chain,
560  CallingConv::ID CallConv, bool isVarArg,
561  const SmallVectorImpl<ISD::InputArg> &Ins,
562  SDLoc dl, SelectionDAG &DAG,
563  SmallVectorImpl<SDValue> &InVals) const override;
564 
565  int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
566  SDLoc dl, SDValue &Chain,
567  const Value *OrigArg,
568  unsigned InRegsParamRecordIdx,
569  int ArgOffset,
570  unsigned ArgSize) const;
571 
572  void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
573  SDLoc dl, SDValue &Chain,
574  unsigned ArgOffset,
575  unsigned TotalArgRegsSaveSize,
576  bool ForceMutable = false) const;
577 
578  SDValue
579  LowerCall(TargetLowering::CallLoweringInfo &CLI,
580  SmallVectorImpl<SDValue> &InVals) const override;
581 
582  /// HandleByVal - Target-specific cleanup for ByVal support.
583  void HandleByVal(CCState *, unsigned &, unsigned) const override;
584 
585  /// IsEligibleForTailCallOptimization - Check whether the call is eligible
586  /// for tail call optimization. Targets which want to do tail call
587  /// optimization should implement this function.
588  bool IsEligibleForTailCallOptimization(SDValue Callee,
589  CallingConv::ID CalleeCC,
590  bool isVarArg,
591  bool isCalleeStructRet,
592  bool isCallerStructRet,
593  const SmallVectorImpl<ISD::OutputArg> &Outs,
594  const SmallVectorImpl<SDValue> &OutVals,
595  const SmallVectorImpl<ISD::InputArg> &Ins,
596  SelectionDAG& DAG) const;
597 
598  bool CanLowerReturn(CallingConv::ID CallConv,
599  MachineFunction &MF, bool isVarArg,
600  const SmallVectorImpl<ISD::OutputArg> &Outs,
601  LLVMContext &Context) const override;
602 
603  SDValue
604  LowerReturn(SDValue Chain,
605  CallingConv::ID CallConv, bool isVarArg,
606  const SmallVectorImpl<ISD::OutputArg> &Outs,
607  const SmallVectorImpl<SDValue> &OutVals,
608  SDLoc dl, SelectionDAG &DAG) const override;
609 
610  bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
611 
612  bool mayBeEmittedAsTailCall(CallInst *CI) const override;
613 
614  SDValue getCMOV(SDLoc dl, EVT VT, SDValue FalseVal, SDValue TrueVal,
615  SDValue ARMcc, SDValue CCR, SDValue Cmp,
616  SelectionDAG &DAG) const;
617  SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
618  SDValue &ARMcc, SelectionDAG &DAG, SDLoc dl) const;
619  SDValue getVFPCmp(SDValue LHS, SDValue RHS,
620  SelectionDAG &DAG, SDLoc dl) const;
621  SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const;
622 
623  SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const;
624 
625  void SetupEntryBlockForSjLj(MachineInstr *MI,
626  MachineBasicBlock *MBB,
627  MachineBasicBlock *DispatchBB, int FI) const;
628 
629  void EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const;
630 
631  bool RemapAddSubWithFlags(MachineInstr *MI, MachineBasicBlock *BB) const;
632 
633  MachineBasicBlock *EmitStructByval(MachineInstr *MI,
634  MachineBasicBlock *MBB) const;
635 
636  MachineBasicBlock *EmitLowered__chkstk(MachineInstr *MI,
637  MachineBasicBlock *MBB) const;
638  };
639 
644  };
645 
646  namespace ARM {
647  FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
648  const TargetLibraryInfo *libInfo);
649  }
650 }
651 
652 #endif // ARMISELLOWERING_H
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:724
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:104
TargetLoweringBase::AtomicRMWExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all...
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the value type to use for ISD::SETCC.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
size_t size() const
size - Get the string size.
Definition: StringRef.h:113
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPreIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mod...
CallInst - This class represents a function call, abstracting a target machine's calling convention...
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
ShuffleVectorInst - This instruction constructs a fixed permutation of two input vectors.
virtual bool isZExtFree(Type *, Type *) const
Return true if any actual instruction that defines a value of type Ty1 implicitly zero-extends the va...
LoadInst - an instruction for reading from memory.
Definition: Instructions.h:177
AtomicRMWInst - an instruction that atomically reads a memory location, combines it with another valu...
Definition: Instructions.h:674
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override
createFastISel - This method returns a target specific FastISel object, or null if the target does no...
bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override
Return true if it is profitable for dag combiner to transform a floating point op of specified opcode...
bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override
Return true if a truncation from Ty1 to Ty2 is permitted when deciding whether a call is in tail posi...
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const override
Lower an interleaved store into a vstN intrinsic.
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override
Returns true if a cast between SrcAS and DestAS is a noop.
AtomicRMWExpansionKind
Enum that specifies what a AtomicRMWInst is expanded to, if at all.
lazy value info
void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
Value * emitStoreConditional(IRBuilder<> &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APInt.h:33
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:517
EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, MachineFunction &MF) const override
Returns the target specific optimal type for load and store operations as a result of memset...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPostIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mo...
AtomicOrdering
Definition: Instructions.h:38
bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, unsigned &Cost) const override
Return true if the target can combine store(extractelement VectorTy, Idx).
bool shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns true if the given (atomic) load should be expanded by the IR-level AtomicExpand pass into a l...
bool lowerInterleavedLoad(LoadInst *LI, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor) const override
Lower an interleaved load into a vldN intrinsic.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition: FastISel.h:30
Value * emitLoadLinked(IRBuilder<> &Builder, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type...
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself...
void AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: ArrayRef.h:31
Itinerary data supplied by a subtarget to be used by a target.
StoreInst - an instruction for storing to memory.
Definition: Instructions.h:316
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:804
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
A self-contained host- and target-independent arbitrary-precision floating-point software implementat...
Definition: APFloat.h:122
bool useSoftFloat() const override
MVT - Machine Value Type.
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, unsigned &PrefAlign) const override
Return true if the pointer arguments to CI should be aligned by aligning the object whose address is ...
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:41
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const override
Return the largest legal super-reg register class of the register class for the specified type and it...
SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const
PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV.
EVT - Extended Value Type.
Definition: ValueTypes.h:31
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
const ARMSubtarget * getSubtarget() const
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:179
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:861
Provides information about what library functions are available for the current target.
CCValAssign - Represent assignment of one arg/retval to a location.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override
bool isFPImmLegal(const APFloat &Imm, EVT VT) const override
isFPImmLegal - Returns true if the target can instruction select the specified FP immediate natively...
static const int FIRST_TARGET_MEMORY_OPCODE
FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations which do not reference a specific me...
Definition: ISDOpcodes.h:731
Represents one node in the SelectionDAG.
static cl::opt< AlignMode > Align(cl::desc("Load/store alignment support"), cl::Hidden, cl::init(NoStrictAlign), cl::values(clEnumValN(StrictAlign,"aarch64-strict-align","Disallow all unaligned memory accesses"), clEnumValN(NoStrictAlign,"aarch64-no-strict-align","Allow unaligned memory accesses"), clEnumValEnd))
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
bool ExpandInlineAsm(CallInst *CI) const override
This hook allows the target to expand an inline asm call to be explicit llvm code if it wants to...
Class for arbitrary precision integers.
Definition: APInt.h:73
Instruction * emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
AddrMode
ARM Addressing Modes.
Definition: ARMBaseInfo.h:235
const TargetRegisterClass * getRegClassFor(MVT VT) const override
getRegClassFor - Return the register class that should be used for the specified value type...
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const override
getTgtMemIntrinsic - Represent NEON load and store intrinsics as MemIntrinsicNodes.
Representation of each machine instruction.
Definition: MachineInstr.h:51
SelectSupportKind
Enum that describes what type of support for selects the target has.
bool isVectorLoadExtDesirable(SDValue ExtVal) const override
Return true if folding a vector load into ExtVal (a sign, zero, or any extend node) is profitable...
#define I(x, y, z)
Definition: MD5.cpp:54
#define N
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:42
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
bool isLegalAddImmediate(int64_t Imm) const override
isLegalAddImmediate - Return true if the specified immediate is legal add immediate, that is the target has add instructions which can add a register and the immediate without having to materialize the immediate into a register.
bool isLegalICmpImmediate(int64_t Imm) const override
isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructions which can compare a register against the immediate without having to materialize the immediate into a register.
bool hasLoadLinkedStoreConditional() const override
True if AtomicExpandPass should use emitLoadLinked/emitStoreConditional and expand AtomicCmpXchgInst...
const ARM::ArchExtKind Kind
bool isSelectSupported(SelectSupportKind Kind) const override
bool isShuffleMaskLegal(const SmallVectorImpl< int > &M, EVT VT) const override
isShuffleMaskLegal - Targets can use this to indicate that they only support some VECTOR_SHUFFLE oper...
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
LLVM Value Representation.
Definition: Value.h:69
bool useLoadStackGuardNode() const override
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
Instruction * makeDMB(IRBuilder<> &Builder, ARM_MB::MemBOpt Domain) const
ARMTargetLowering(const TargetMachine &TM, const ARMSubtarget &STI)
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override
Returns true if an argument of type Ty needs to be passed in a contiguous block of registers in calli...
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target...
Primary interface to the complete machine description for the target machine.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:40
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
bool isBitFieldInvertedMask(unsigned v)
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns true if the given (atomic) store should be expanded by the IR-level AtomicExpand pass into an...
bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const
Instruction * emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const override
This file describes how to lower LLVM code to machine code.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, unsigned Align, bool *Fast) const override
allowsMisalignedMemoryAccesses - Returns true if the target allows unaligned memory accesses of the s...
void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero, APInt &KnownOne, const SelectionDAG &DAG, unsigned Depth) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:761