LLVM  3.7.0
TargetLowering.h
Go to the documentation of this file.
1 //===-- llvm/Target/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 ///
10 /// \file
11 /// This file describes how to lower LLVM code to machine code. This has two
12 /// main components:
13 ///
14 /// 1. Which ValueTypes are natively supported by the target.
15 /// 2. Which operations are supported for supported ValueTypes.
16 /// 3. Cost thresholds for alternative implementations of certain operations.
17 ///
18 /// In addition it has a few other components, like information about FP
19 /// immediates.
20 ///
21 //===----------------------------------------------------------------------===//
22 
23 #ifndef LLVM_TARGET_TARGETLOWERING_H
24 #define LLVM_TARGET_TARGETLOWERING_H
25 
26 #include "llvm/ADT/DenseMap.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/CallSite.h"
32 #include "llvm/IR/CallingConv.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/InlineAsm.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/MC/MCRegisterInfo.h"
39 #include <climits>
40 #include <map>
41 #include <vector>
42 
43 namespace llvm {
44  class CallInst;
45  class CCState;
46  class FastISel;
47  class FunctionLoweringInfo;
48  class ImmutableCallSite;
49  class IntrinsicInst;
50  class MachineBasicBlock;
51  class MachineFunction;
52  class MachineInstr;
53  class MachineJumpTableInfo;
54  class MachineLoop;
55  class Mangler;
56  class MCContext;
57  class MCExpr;
58  class MCSymbol;
59  template<typename T> class SmallVectorImpl;
60  class DataLayout;
61  class TargetRegisterClass;
62  class TargetLibraryInfo;
63  class TargetLoweringObjectFile;
64  class Value;
65 
66  namespace Sched {
67  enum Preference {
68  None, // No preference
69  Source, // Follow source order.
70  RegPressure, // Scheduling for lowest register pressure.
71  Hybrid, // Scheduling for both latency and register pressure.
72  ILP, // Scheduling for ILP in low register pressure mode.
73  VLIW // Scheduling for VLIW targets.
74  };
75  }
76 
77 /// This base class for TargetLowering contains the SelectionDAG-independent
78 /// parts that can be used from the rest of CodeGen.
80  TargetLoweringBase(const TargetLoweringBase&) = delete;
81  void operator=(const TargetLoweringBase&) = delete;
82 
83 public:
84  /// This enum indicates whether operations are valid for a target, and if not,
85  /// what action should be used to make them valid.
87  Legal, // The target natively supports this operation.
88  Promote, // This operation should be executed in a larger type.
89  Expand, // Try to expand this to other ops, otherwise use a libcall.
90  Custom // Use the LowerOperation hook to implement custom lowering.
91  };
92 
93  /// This enum indicates whether a types are legal for a target, and if not,
94  /// what action should be used to make them valid.
96  TypeLegal, // The target natively supports this type.
97  TypePromoteInteger, // Replace this integer with a larger one.
98  TypeExpandInteger, // Split this integer into two of half the size.
99  TypeSoftenFloat, // Convert this float to a same size integer type.
100  TypeExpandFloat, // Split this float into two of half the size.
101  TypeScalarizeVector, // Replace this one-element vector with its element.
102  TypeSplitVector, // Split this vector into two of half the size.
103  TypeWidenVector, // This vector should be widened into a larger vector.
104  TypePromoteFloat // Replace this float with a larger one.
105  };
106 
107  /// LegalizeKind holds the legalization kind that needs to happen to EVT
108  /// in order to type-legalize it.
109  typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind;
110 
111  /// Enum that describes how the target represents true/false values.
113  UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage.
114  ZeroOrOneBooleanContent, // All bits zero except for bit 0.
115  ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
116  };
117 
118  /// Enum that describes what type of support for selects the target has.
120  ScalarValSelect, // The target supports scalar selects (ex: cmov).
121  ScalarCondVectorVal, // The target supports selects with a scalar condition
122  // and vector values (ex: cmov).
123  VectorMaskSelect // The target supports vector selects with a vector
124  // mask (ex: x86 blends).
125  };
126 
127  /// Enum that specifies what a AtomicRMWInst is expanded to, if at all. Exists
128  /// because different targets have different levels of support for these
129  /// atomic RMW instructions, and also have different options w.r.t. what they
130  /// should expand to.
132  None, // Don't expand the instruction.
133  LLSC, // Expand the instruction into loadlinked/storeconditional; used
134  // by ARM/AArch64. Implies `hasLoadLinkedStoreConditional`
135  // returns true.
136  CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
137  };
138 
140  switch (Content) {
142  // Extend by adding rubbish bits.
143  return ISD::ANY_EXTEND;
145  // Extend by adding zero bits.
146  return ISD::ZERO_EXTEND;
148  // Extend by copying the sign bit.
149  return ISD::SIGN_EXTEND;
150  }
151  llvm_unreachable("Invalid content kind");
152  }
153 
154  /// NOTE: The TargetMachine owns TLOF.
155  explicit TargetLoweringBase(const TargetMachine &TM);
156  virtual ~TargetLoweringBase() {}
157 
158 protected:
159  /// \brief Initialize all of the actions to default values.
160  void initActions();
161 
162 public:
163  const TargetMachine &getTargetMachine() const { return TM; }
164 
165  virtual bool useSoftFloat() const { return false; }
166 
167  /// Return the pointer type for the given address space, defaults to
168  /// the pointer type from the data layout.
169  /// FIXME: The default needs to be removed once all the code is updated.
170  MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const {
172  }
173 
174  /// EVT is not used in-tree, but is used by out-of-tree target.
175  /// A documentation for this function would be nice...
176  virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const;
177 
178  EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const;
179 
180  /// Returns the type to be used for the index operand of:
181  /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
182  /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
183  virtual MVT getVectorIdxTy(const DataLayout &DL) const {
184  return getPointerTy(DL);
185  }
186 
187  /// Return true if the select operation is expensive for this target.
188  bool isSelectExpensive() const { return SelectIsExpensive; }
189 
190  virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
191  return true;
192  }
193 
194  /// Return true if multiple condition registers are available.
196  return HasMultipleConditionRegisters;
197  }
198 
199  /// Return true if the target has BitExtract instructions.
200  bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
201 
202  /// Return the preferred vector type legalization action.
205  // The default action for one element vectors is to scalarize
206  if (VT.getVectorNumElements() == 1)
207  return TypeScalarizeVector;
208  // The default action for other vectors is to promote
209  return TypePromoteInteger;
210  }
211 
212  // There are two general methods for expanding a BUILD_VECTOR node:
213  // 1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
214  // them together.
215  // 2. Build the vector on the stack and then load it.
216  // If this function returns true, then method (1) will be used, subject to
217  // the constraint that all of the necessary shuffles are legal (as determined
218  // by isShuffleMaskLegal). If this function returns false, then method (2) is
219  // always used. The vector type, and the number of defined values, are
220  // provided.
221  virtual bool
223  unsigned DefinedValues) const {
224  return DefinedValues < 3;
225  }
226 
227  /// Return true if integer divide is usually cheaper than a sequence of
228  /// several shifts, adds, and multiplies for this target.
229  bool isIntDivCheap() const { return IntDivIsCheap; }
230 
231  /// Return true if sqrt(x) is as cheap or cheaper than 1 / rsqrt(x)
232  bool isFsqrtCheap() const {
233  return FsqrtIsCheap;
234  }
235 
236  /// Returns true if target has indicated at least one type should be bypassed.
237  bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
238 
239  /// Returns map of slow types for division or remainder with corresponding
240  /// fast types
242  return BypassSlowDivWidths;
243  }
244 
245  /// Return true if pow2 sdiv is cheaper than a chain of sra/srl/add/sra.
246  bool isPow2SDivCheap() const { return Pow2SDivIsCheap; }
247 
248  /// Return true if Flow Control is an expensive operation that should be
249  /// avoided.
250  bool isJumpExpensive() const { return JumpIsExpensive; }
251 
252  /// Return true if selects are only cheaper than branches if the branch is
253  /// unlikely to be predicted right.
256  }
257 
258  /// isLoadBitCastBeneficial() - Return true if the following transform
259  /// is beneficial.
260  /// fold (conv (load x)) -> (load (conv*)x)
261  /// On architectures that don't natively support some vector loads
262  /// efficiently, casting the load to a smaller vector of larger types and
263  /// loading is more efficient, however, this can be undone by optimizations in
264  /// dag combiner.
265  virtual bool isLoadBitCastBeneficial(EVT /* Load */,
266  EVT /* Bitcast */) const {
267  return true;
268  }
269 
270  /// Return true if it is expected to be cheaper to do a store of a non-zero
271  /// vector constant with the given size and type for the address space than to
272  /// store the individual scalar element constants.
273  virtual bool storeOfVectorConstantIsCheap(EVT MemVT,
274  unsigned NumElem,
275  unsigned AddrSpace) const {
276  return false;
277  }
278 
279  /// \brief Return true if it is cheap to speculate a call to intrinsic cttz.
280  virtual bool isCheapToSpeculateCttz() const {
281  return false;
282  }
283 
284  /// \brief Return true if it is cheap to speculate a call to intrinsic ctlz.
285  virtual bool isCheapToSpeculateCtlz() const {
286  return false;
287  }
288 
289  /// \brief Return if the target supports combining a
290  /// chain like:
291  /// \code
292  /// %andResult = and %val1, #imm-with-one-bit-set;
293  /// %icmpResult = icmp %andResult, 0
294  /// br i1 %icmpResult, label %dest1, label %dest2
295  /// \endcode
296  /// into a single machine instruction of a form like:
297  /// \code
298  /// brOnBitSet %register, #bitNumber, dest
299  /// \endcode
302  }
303 
304  /// \brief Return true if the target wants to use the optimization that
305  /// turns ext(promotableInst1(...(promotableInstN(load)))) into
306  /// promotedInst1(...(promotedInstN(ext(load)))).
308 
309  /// Return true if the target can combine store(extractelement VectorTy,
310  /// Idx).
311  /// \p Cost[out] gives the cost of that transformation when this is true.
312  virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
313  unsigned &Cost) const {
314  return false;
315  }
316 
317  /// Return true if target supports floating point exceptions.
319  return HasFloatingPointExceptions;
320  }
321 
322  /// Return true if target always beneficiates from combining into FMA for a
323  /// given value type. This must typically return false on targets where FMA
324  /// takes more cycles to execute than FADD.
325  virtual bool enableAggressiveFMAFusion(EVT VT) const {
326  return false;
327  }
328 
329  /// Return the ValueType of the result of SETCC operations.
330  virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
331  EVT VT) const;
332 
333  /// Return the ValueType for comparison libcalls. Comparions libcalls include
334  /// floating point comparion calls, and Ordered/Unordered check calls on
335  /// floating point numbers.
336  virtual
338 
339  /// For targets without i1 registers, this gives the nature of the high-bits
340  /// of boolean values held in types wider than i1.
341  ///
342  /// "Boolean values" are special true/false values produced by nodes like
343  /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
344  /// Not to be confused with general values promoted from i1. Some cpus
345  /// distinguish between vectors of boolean and scalars; the isVec parameter
346  /// selects between the two kinds. For example on X86 a scalar boolean should
347  /// be zero extended from i1, while the elements of a vector of booleans
348  /// should be sign extended from i1.
349  ///
350  /// Some cpus also treat floating point types the same way as they treat
351  /// vectors instead of the way they treat scalars.
352  BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
353  if (isVec)
354  return BooleanVectorContents;
355  return isFloat ? BooleanFloatContents : BooleanContents;
356  }
357 
359  return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
360  }
361 
362  /// Return target scheduling preference.
364  return SchedPreferenceInfo;
365  }
366 
367  /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
368  /// for different nodes. This function returns the preference (or none) for
369  /// the given node.
371  return Sched::None;
372  }
373 
374  /// Return the register class that should be used for the specified value
375  /// type.
376  virtual const TargetRegisterClass *getRegClassFor(MVT VT) const {
377  const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
378  assert(RC && "This value type is not natively supported!");
379  return RC;
380  }
381 
382  /// Return the 'representative' register class for the specified value
383  /// type.
384  ///
385  /// The 'representative' register class is the largest legal super-reg
386  /// register class for the register class of the value type. For example, on
387  /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
388  /// register class is GR64 on x86_64.
389  virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
390  const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
391  return RC;
392  }
393 
394  /// Return the cost of the 'representative' register class for the specified
395  /// value type.
396  virtual uint8_t getRepRegClassCostFor(MVT VT) const {
397  return RepRegClassCostForVT[VT.SimpleTy];
398  }
399 
400  /// Return true if the target has native support for the specified value type.
401  /// This means that it has a register that directly holds it without
402  /// promotions or expansions.
403  bool isTypeLegal(EVT VT) const {
404  assert(!VT.isSimple() ||
405  (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
406  return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
407  }
408 
410  /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
411  /// that indicates how instruction selection should deal with the type.
412  uint8_t ValueTypeActions[MVT::LAST_VALUETYPE];
413 
414  public:
416  std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions), 0);
417  }
418 
420  return (LegalizeTypeAction)ValueTypeActions[VT.SimpleTy];
421  }
422 
424  unsigned I = VT.SimpleTy;
425  ValueTypeActions[I] = Action;
426  }
427  };
428 
430  return ValueTypeActions;
431  }
432 
433  /// Return how we should legalize values of this type, either it is already
434  /// legal (return 'Legal') or we need to promote it to a larger type (return
435  /// 'Promote'), or we need to expand it into multiple registers of smaller
436  /// integer type (return 'Expand'). 'Custom' is not an option.
438  return getTypeConversion(Context, VT).first;
439  }
441  return ValueTypeActions.getTypeAction(VT);
442  }
443 
444  /// For types supported by the target, this is an identity function. For
445  /// types that must be promoted to larger types, this returns the larger type
446  /// to promote to. For integer types that are larger than the largest integer
447  /// register, this contains one step in the expansion to get to the smaller
448  /// register. For illegal floating point types, this returns the integer type
449  /// to transform to.
450  EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
451  return getTypeConversion(Context, VT).second;
452  }
453 
454  /// For types supported by the target, this is an identity function. For
455  /// types that must be expanded (i.e. integer types that are larger than the
456  /// largest integer register or illegal floating point types), this returns
457  /// the largest legal type it will be expanded to.
458  EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
459  assert(!VT.isVector());
460  while (true) {
461  switch (getTypeAction(Context, VT)) {
462  case TypeLegal:
463  return VT;
464  case TypeExpandInteger:
465  VT = getTypeToTransformTo(Context, VT);
466  break;
467  default:
468  llvm_unreachable("Type is not legal nor is it to be expanded!");
469  }
470  }
471  }
472 
473  /// Vector types are broken down into some number of legal first class types.
474  /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
475  /// promoted EVT::f64 values with the X86 FP stack. Similarly, EVT::v2i64
476  /// turns into 4 EVT::i32 values with both PPC and X86.
477  ///
478  /// This method returns the number of registers needed, and the VT for each
479  /// register. It also returns the VT and quantity of the intermediate values
480  /// before they are promoted/expanded.
481  unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
482  EVT &IntermediateVT,
483  unsigned &NumIntermediates,
484  MVT &RegisterVT) const;
485 
486  struct IntrinsicInfo {
487  unsigned opc; // target opcode
488  EVT memVT; // memory VT
489  const Value* ptrVal; // value representing memory location
490  int offset; // offset off of ptrVal
491  unsigned size; // the size of the memory location
492  // (taken from memVT if zero)
493  unsigned align; // alignment
494  bool vol; // is volatile?
495  bool readMem; // reads memory?
496  bool writeMem; // writes memory?
497 
498  IntrinsicInfo() : opc(0), ptrVal(nullptr), offset(0), size(0), align(1),
500  };
501 
502  /// Given an intrinsic, checks if on the target the intrinsic will need to map
503  /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
504  /// true and store the intrinsic information into the IntrinsicInfo that was
505  /// passed to the function.
506  virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
507  unsigned /*Intrinsic*/) const {
508  return false;
509  }
510 
511  /// Returns true if the target can instruction select the specified FP
512  /// immediate natively. If false, the legalizer will materialize the FP
513  /// immediate as a load from a constant pool.
514  virtual bool isFPImmLegal(const APFloat &/*Imm*/, EVT /*VT*/) const {
515  return false;
516  }
517 
518  /// Targets can use this to indicate that they only support *some*
519  /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a
520  /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
521  /// legal.
522  virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
523  EVT /*VT*/) const {
524  return true;
525  }
526 
527  /// Returns true if the operation can trap for the value type.
528  ///
529  /// VT must be a legal type. By default, we optimistically assume most
530  /// operations don't trap except for divide and remainder.
531  virtual bool canOpTrap(unsigned Op, EVT VT) const;
532 
533  /// Similar to isShuffleMaskLegal. This is used by Targets can use this to
534  /// indicate if there is a suitable VECTOR_SHUFFLE that can be used to replace
535  /// a VAND with a constant pool entry.
536  virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
537  EVT /*VT*/) const {
538  return false;
539  }
540 
541  /// Return how this operation should be treated: either it is legal, needs to
542  /// be promoted to a larger size, needs to be expanded to some other code
543  /// sequence, or the target has a custom expander for it.
544  LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
545  if (VT.isExtended()) return Expand;
546  // If a target-specific SDNode requires legalization, require the target
547  // to provide custom legalization for it.
548  if (Op > array_lengthof(OpActions[0])) return Custom;
549  unsigned I = (unsigned) VT.getSimpleVT().SimpleTy;
550  return (LegalizeAction)OpActions[I][Op];
551  }
552 
553  /// Return true if the specified operation is legal on this target or can be
554  /// made legal with custom lowering. This is used to help guide high-level
555  /// lowering decisions.
556  bool isOperationLegalOrCustom(unsigned Op, EVT VT) const {
557  return (VT == MVT::Other || isTypeLegal(VT)) &&
558  (getOperationAction(Op, VT) == Legal ||
559  getOperationAction(Op, VT) == Custom);
560  }
561 
562  /// Return true if the specified operation is legal on this target or can be
563  /// made legal using promotion. This is used to help guide high-level lowering
564  /// decisions.
565  bool isOperationLegalOrPromote(unsigned Op, EVT VT) const {
566  return (VT == MVT::Other || isTypeLegal(VT)) &&
567  (getOperationAction(Op, VT) == Legal ||
568  getOperationAction(Op, VT) == Promote);
569  }
570 
571  /// Return true if the specified operation is illegal on this target or
572  /// unlikely to be made legal with custom lowering. This is used to help guide
573  /// high-level lowering decisions.
574  bool isOperationExpand(unsigned Op, EVT VT) const {
575  return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
576  }
577 
578  /// Return true if the specified operation is legal on this target.
579  bool isOperationLegal(unsigned Op, EVT VT) const {
580  return (VT == MVT::Other || isTypeLegal(VT)) &&
581  getOperationAction(Op, VT) == Legal;
582  }
583 
584  /// Return how this load with extension should be treated: either it is legal,
585  /// needs to be promoted to a larger size, needs to be expanded to some other
586  /// code sequence, or the target has a custom expander for it.
587  LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT,
588  EVT MemVT) const {
589  if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
590  unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
591  unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
592  assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::LAST_VALUETYPE &&
593  MemI < MVT::LAST_VALUETYPE && "Table isn't big enough!");
594  return (LegalizeAction)LoadExtActions[ValI][MemI][ExtType];
595  }
596 
597  /// Return true if the specified load with extension is legal on this target.
598  bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
599  return ValVT.isSimple() && MemVT.isSimple() &&
600  getLoadExtAction(ExtType, ValVT, MemVT) == Legal;
601  }
602 
603  /// Return true if the specified load with extension is legal or custom
604  /// on this target.
605  bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const {
606  return ValVT.isSimple() && MemVT.isSimple() &&
607  (getLoadExtAction(ExtType, ValVT, MemVT) == Legal ||
608  getLoadExtAction(ExtType, ValVT, MemVT) == Custom);
609  }
610 
611  /// Return how this store with truncation should be treated: either it is
612  /// legal, needs to be promoted to a larger size, needs to be expanded to some
613  /// other code sequence, or the target has a custom expander for it.
615  if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
616  unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
617  unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
618  assert(ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE &&
619  "Table isn't big enough!");
620  return (LegalizeAction)TruncStoreActions[ValI][MemI];
621  }
622 
623  /// Return true if the specified store with truncation is legal on this
624  /// target.
625  bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
626  return isTypeLegal(ValVT) && MemVT.isSimple() &&
627  getTruncStoreAction(ValVT.getSimpleVT(), MemVT.getSimpleVT()) == Legal;
628  }
629 
630  /// Return how the indexed load should be treated: either it is legal, needs
631  /// to be promoted to a larger size, needs to be expanded to some other code
632  /// sequence, or the target has a custom expander for it.
634  getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
635  assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
636  "Table isn't big enough!");
637  unsigned Ty = (unsigned)VT.SimpleTy;
638  return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4);
639  }
640 
641  /// Return true if the specified indexed load is legal on this target.
642  bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
643  return VT.isSimple() &&
644  (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
645  getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
646  }
647 
648  /// Return how the indexed store should be treated: either it is legal, needs
649  /// to be promoted to a larger size, needs to be expanded to some other code
650  /// sequence, or the target has a custom expander for it.
652  getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
653  assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
654  "Table isn't big enough!");
655  unsigned Ty = (unsigned)VT.SimpleTy;
656  return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f);
657  }
658 
659  /// Return true if the specified indexed load is legal on this target.
660  bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
661  return VT.isSimple() &&
662  (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
663  getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
664  }
665 
666  /// Return how the condition code should be treated: either it is legal, needs
667  /// to be expanded to some other code sequence, or the target has a custom
668  /// expander for it.
671  assert((unsigned)CC < array_lengthof(CondCodeActions) &&
672  ((unsigned)VT.SimpleTy >> 4) < array_lengthof(CondCodeActions[0]) &&
673  "Table isn't big enough!");
674  // See setCondCodeAction for how this is encoded.
675  uint32_t Shift = 2 * (VT.SimpleTy & 0xF);
676  uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 4];
677  LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0x3);
678  assert(Action != Promote && "Can't promote condition code!");
679  return Action;
680  }
681 
682  /// Return true if the specified condition code is legal on this target.
683  bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const {
684  return
685  getCondCodeAction(CC, VT) == Legal ||
686  getCondCodeAction(CC, VT) == Custom;
687  }
688 
689 
690  /// If the action for this operation is to promote, this method returns the
691  /// ValueType to promote to.
692  MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
693  assert(getOperationAction(Op, VT) == Promote &&
694  "This operation isn't promoted!");
695 
696  // See if this has an explicit type specified.
697  std::map<std::pair<unsigned, MVT::SimpleValueType>,
698  MVT::SimpleValueType>::const_iterator PTTI =
699  PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
700  if (PTTI != PromoteToType.end()) return PTTI->second;
701 
702  assert((VT.isInteger() || VT.isFloatingPoint()) &&
703  "Cannot autopromote this type, add it with AddPromotedToType.");
704 
705  MVT NVT = VT;
706  do {
707  NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
708  assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
709  "Didn't find type to promote to!");
710  } while (!isTypeLegal(NVT) ||
711  getOperationAction(Op, NVT) == Promote);
712  return NVT;
713  }
714 
715  /// Return the EVT corresponding to this LLVM type. This is fixed by the LLVM
716  /// operations except for the pointer size. If AllowUnknown is true, this
717  /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
718  /// otherwise it will assert.
720  bool AllowUnknown = false) const {
721  // Lower scalar pointers to native pointer types.
722  if (PointerType *PTy = dyn_cast<PointerType>(Ty))
723  return getPointerTy(DL, PTy->getAddressSpace());
724 
725  if (Ty->isVectorTy()) {
726  VectorType *VTy = cast<VectorType>(Ty);
727  Type *Elm = VTy->getElementType();
728  // Lower vectors of pointers to native pointer types.
729  if (PointerType *PT = dyn_cast<PointerType>(Elm)) {
730  EVT PointerTy(getPointerTy(DL, PT->getAddressSpace()));
731  Elm = PointerTy.getTypeForEVT(Ty->getContext());
732  }
733 
734  return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false),
735  VTy->getNumElements());
736  }
737  return EVT::getEVT(Ty, AllowUnknown);
738  }
739 
740  /// Return the MVT corresponding to this LLVM type. See getValueType.
742  bool AllowUnknown = false) const {
743  return getValueType(DL, Ty, AllowUnknown).getSimpleVT();
744  }
745 
746  /// Return the desired alignment for ByVal or InAlloca aggregate function
747  /// arguments in the caller parameter area. This is the actual alignment, not
748  /// its logarithm.
749  virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const;
750 
751  /// Return the type of registers that this ValueType will eventually require.
752  MVT getRegisterType(MVT VT) const {
753  assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
754  return RegisterTypeForVT[VT.SimpleTy];
755  }
756 
757  /// Return the type of registers that this ValueType will eventually require.
758  MVT getRegisterType(LLVMContext &Context, EVT VT) const {
759  if (VT.isSimple()) {
760  assert((unsigned)VT.getSimpleVT().SimpleTy <
761  array_lengthof(RegisterTypeForVT));
762  return RegisterTypeForVT[VT.getSimpleVT().SimpleTy];
763  }
764  if (VT.isVector()) {
765  EVT VT1;
766  MVT RegisterVT;
767  unsigned NumIntermediates;
768  (void)getVectorTypeBreakdown(Context, VT, VT1,
769  NumIntermediates, RegisterVT);
770  return RegisterVT;
771  }
772  if (VT.isInteger()) {
773  return getRegisterType(Context, getTypeToTransformTo(Context, VT));
774  }
775  llvm_unreachable("Unsupported extended type!");
776  }
777 
778  /// Return the number of registers that this ValueType will eventually
779  /// require.
780  ///
781  /// This is one for any types promoted to live in larger registers, but may be
782  /// more than one for types (like i64) that are split into pieces. For types
783  /// like i140, which are first promoted then expanded, it is the number of
784  /// registers needed to hold all the bits of the original type. For an i140
785  /// on a 32 bit machine this means 5 registers.
786  unsigned getNumRegisters(LLVMContext &Context, EVT VT) const {
787  if (VT.isSimple()) {
788  assert((unsigned)VT.getSimpleVT().SimpleTy <
789  array_lengthof(NumRegistersForVT));
790  return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
791  }
792  if (VT.isVector()) {
793  EVT VT1;
794  MVT VT2;
795  unsigned NumIntermediates;
796  return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
797  }
798  if (VT.isInteger()) {
799  unsigned BitWidth = VT.getSizeInBits();
800  unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
801  return (BitWidth + RegWidth - 1) / RegWidth;
802  }
803  llvm_unreachable("Unsupported extended type!");
804  }
805 
806  /// If true, then instruction selection should seek to shrink the FP constant
807  /// of the specified type to a smaller type in order to save space and / or
808  /// reduce runtime.
809  virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
810 
811  // Return true if it is profitable to reduce the given load node to a smaller
812  // type.
813  //
814  // e.g. (i16 (trunc (i32 (load x))) -> i16 load x should be performed
816  ISD::LoadExtType ExtTy,
817  EVT NewVT) const {
818  return true;
819  }
820 
821  /// When splitting a value of the specified type into parts, does the Lo
822  /// or Hi part come first? This usually follows the endianness, except
823  /// for ppcf128, where the Hi part always comes first.
824  bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const {
825  return DL.isBigEndian() || VT == MVT::ppcf128;
826  }
827 
828  /// If true, the target has custom DAG combine transformations that it can
829  /// perform for the specified node.
831  assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
832  return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
833  }
834 
835  /// \brief Get maximum # of store operations permitted for llvm.memset
836  ///
837  /// This function returns the maximum number of store operations permitted
838  /// to replace a call to llvm.memset. The value is set by the target at the
839  /// performance threshold for such a replacement. If OptSize is true,
840  /// return the limit for functions that have OptSize attribute.
841  unsigned getMaxStoresPerMemset(bool OptSize) const {
843  }
844 
845  /// \brief Get maximum # of store operations permitted for llvm.memcpy
846  ///
847  /// This function returns the maximum number of store operations permitted
848  /// to replace a call to llvm.memcpy. The value is set by the target at the
849  /// performance threshold for such a replacement. If OptSize is true,
850  /// return the limit for functions that have OptSize attribute.
851  unsigned getMaxStoresPerMemcpy(bool OptSize) const {
853  }
854 
855  /// \brief Get maximum # of store operations permitted for llvm.memmove
856  ///
857  /// This function returns the maximum number of store operations permitted
858  /// to replace a call to llvm.memmove. The value is set by the target at the
859  /// performance threshold for such a replacement. If OptSize is true,
860  /// return the limit for functions that have OptSize attribute.
861  unsigned getMaxStoresPerMemmove(bool OptSize) const {
863  }
864 
865  /// \brief Determine if the target supports unaligned memory accesses.
866  ///
867  /// This function returns true if the target allows unaligned memory accesses
868  /// of the specified type in the given address space. If true, it also returns
869  /// whether the unaligned memory access is "fast" in the last argument by
870  /// reference. This is used, for example, in situations where an array
871  /// copy/move/set is converted to a sequence of store operations. Its use
872  /// helps to ensure that such replacements don't generate code that causes an
873  /// alignment error (trap) on the target machine.
875  unsigned AddrSpace = 0,
876  unsigned Align = 1,
877  bool * /*Fast*/ = nullptr) const {
878  return false;
879  }
880 
881  /// Returns the target specific optimal type for load and store operations as
882  /// a result of memset, memcpy, and memmove lowering.
883  ///
884  /// If DstAlign is zero that means it's safe to destination alignment can
885  /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't
886  /// a need to check it against alignment requirement, probably because the
887  /// source does not need to be loaded. If 'IsMemset' is true, that means it's
888  /// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of
889  /// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it
890  /// does not need to be loaded. It returns EVT::Other if the type should be
891  /// determined using generic target-independent logic.
892  virtual EVT getOptimalMemOpType(uint64_t /*Size*/,
893  unsigned /*DstAlign*/, unsigned /*SrcAlign*/,
894  bool /*IsMemset*/,
895  bool /*ZeroMemset*/,
896  bool /*MemcpyStrSrc*/,
897  MachineFunction &/*MF*/) const {
898  return MVT::Other;
899  }
900 
901  /// Returns true if it's safe to use load / store of the specified type to
902  /// expand memcpy / memset inline.
903  ///
904  /// This is mostly true for all types except for some special cases. For
905  /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
906  /// fstpl which also does type conversion. Note the specified type doesn't
907  /// have to be legal as the hook is used before type legalization.
908  virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
909 
910  /// Determine if we should use _setjmp or setjmp to implement llvm.setjmp.
911  bool usesUnderscoreSetJmp() const {
912  return UseUnderscoreSetJmp;
913  }
914 
915  /// Determine if we should use _longjmp or longjmp to implement llvm.longjmp.
916  bool usesUnderscoreLongJmp() const {
917  return UseUnderscoreLongJmp;
918  }
919 
920  /// Return integer threshold on number of blocks to use jump tables rather
921  /// than if sequence.
923  return MinimumJumpTableEntries;
924  }
925 
926  /// If a physical register, this specifies the register that
927  /// llvm.savestack/llvm.restorestack should save and restore.
929  return StackPointerRegisterToSaveRestore;
930  }
931 
932  /// If a physical register, this returns the register that receives the
933  /// exception address on entry to a landing pad.
934  unsigned getExceptionPointerRegister() const {
935  return ExceptionPointerRegister;
936  }
937 
938  /// If a physical register, this returns the register that receives the
939  /// exception typeid on entry to a landing pad.
940  unsigned getExceptionSelectorRegister() const {
941  return ExceptionSelectorRegister;
942  }
943 
944  /// Returns the target's jmp_buf size in bytes (if never set, the default is
945  /// 200)
946  unsigned getJumpBufSize() const {
947  return JumpBufSize;
948  }
949 
950  /// Returns the target's jmp_buf alignment in bytes (if never set, the default
951  /// is 0)
952  unsigned getJumpBufAlignment() const {
953  return JumpBufAlignment;
954  }
955 
956  /// Return the minimum stack alignment of an argument.
957  unsigned getMinStackArgumentAlignment() const {
958  return MinStackArgumentAlignment;
959  }
960 
961  /// Return the minimum function alignment.
962  unsigned getMinFunctionAlignment() const {
963  return MinFunctionAlignment;
964  }
965 
966  /// Return the preferred function alignment.
967  unsigned getPrefFunctionAlignment() const {
968  return PrefFunctionAlignment;
969  }
970 
971  /// Return the preferred loop alignment.
972  virtual unsigned getPrefLoopAlignment(MachineLoop *ML = nullptr) const {
973  return PrefLoopAlignment;
974  }
975 
976  /// Return whether the DAG builder should automatically insert fences and
977  /// reduce ordering for atomics.
979  return InsertFencesForAtomic;
980  }
981 
982  /// Return true if the target stores stack protector cookies at a fixed offset
983  /// in some non-standard address space, and populates the address space and
984  /// offset as appropriate.
985  virtual bool getStackCookieLocation(unsigned &/*AddressSpace*/,
986  unsigned &/*Offset*/) const {
987  return false;
988  }
989 
990  /// Returns true if a cast between SrcAS and DestAS is a noop.
991  virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
992  return false;
993  }
994 
995  /// Return true if the pointer arguments to CI should be aligned by aligning
996  /// the object whose address is being passed. If so then MinSize is set to the
997  /// minimum size the object must be to be aligned and PrefAlign is set to the
998  /// preferred alignment.
999  virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/,
1000  unsigned & /*PrefAlign*/) const {
1001  return false;
1002  }
1003 
1004  //===--------------------------------------------------------------------===//
1005  /// \name Helpers for TargetTransformInfo implementations
1006  /// @{
1007 
1008  /// Get the ISD node that corresponds to the Instruction class opcode.
1009  int InstructionOpcodeToISD(unsigned Opcode) const;
1010 
1011  /// Estimate the cost of type-legalization and the legalized type.
1012  std::pair<unsigned, MVT> getTypeLegalizationCost(const DataLayout &DL,
1013  Type *Ty) const;
1014 
1015  /// @}
1016 
1017  //===--------------------------------------------------------------------===//
1018  /// \name Helpers for atomic expansion.
1019  /// @{
1020 
1021  /// True if AtomicExpandPass should use emitLoadLinked/emitStoreConditional
1022  /// and expand AtomicCmpXchgInst.
1023  virtual bool hasLoadLinkedStoreConditional() const { return false; }
1024 
1025  /// Perform a load-linked operation on Addr, returning a "Value *" with the
1026  /// corresponding pointee type. This may entail some non-trivial operations to
1027  /// truncate or reconstruct types that will be illegal in the backend. See
1028  /// ARMISelLowering for an example implementation.
1029  virtual Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
1030  AtomicOrdering Ord) const {
1031  llvm_unreachable("Load linked unimplemented on this target");
1032  }
1033 
1034  /// Perform a store-conditional operation to Addr. Return the status of the
1035  /// store. This should be 0 if the store succeeded, non-zero otherwise.
1036  virtual Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
1037  Value *Addr, AtomicOrdering Ord) const {
1038  llvm_unreachable("Store conditional unimplemented on this target");
1039  }
1040 
1041  /// Inserts in the IR a target-specific intrinsic specifying a fence.
1042  /// It is called by AtomicExpandPass before expanding an
1043  /// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad.
1044  /// RMW and CmpXchg set both IsStore and IsLoad to true.
1045  /// This function should either return a nullptr, or a pointer to an IR-level
1046  /// Instruction*. Even complex fence sequences can be represented by a
1047  /// single Instruction* through an intrinsic to be lowered later.
1048  /// Backends with !getInsertFencesForAtomic() should keep a no-op here.
1049  /// Backends should override this method to produce target-specific intrinsic
1050  /// for their fences.
1051  /// FIXME: Please note that the default implementation here in terms of
1052  /// IR-level fences exists for historical/compatibility reasons and is
1053  /// *unsound* ! Fences cannot, in general, be used to restore sequential
1054  /// consistency. For example, consider the following example:
1055  /// atomic<int> x = y = 0;
1056  /// int r1, r2, r3, r4;
1057  /// Thread 0:
1058  /// x.store(1);
1059  /// Thread 1:
1060  /// y.store(1);
1061  /// Thread 2:
1062  /// r1 = x.load();
1063  /// r2 = y.load();
1064  /// Thread 3:
1065  /// r3 = y.load();
1066  /// r4 = x.load();
1067  /// r1 = r3 = 1 and r2 = r4 = 0 is impossible as long as the accesses are all
1068  /// seq_cst. But if they are lowered to monotonic accesses, no amount of
1069  /// IR-level fences can prevent it.
1070  /// @{
1072  AtomicOrdering Ord, bool IsStore,
1073  bool IsLoad) const {
1074  if (!getInsertFencesForAtomic())
1075  return nullptr;
1076 
1077  if (isAtLeastRelease(Ord) && IsStore)
1078  return Builder.CreateFence(Ord);
1079  else
1080  return nullptr;
1081  }
1082 
1084  AtomicOrdering Ord, bool IsStore,
1085  bool IsLoad) const {
1086  if (!getInsertFencesForAtomic())
1087  return nullptr;
1088 
1089  if (isAtLeastAcquire(Ord))
1090  return Builder.CreateFence(Ord);
1091  else
1092  return nullptr;
1093  }
1094  /// @}
1095 
1096  /// Returns true if the given (atomic) store should be expanded by the
1097  /// IR-level AtomicExpand pass into an "atomic xchg" which ignores its input.
1099  return false;
1100  }
1101 
1102  /// Returns true if arguments should be sign-extended in lib calls.
1103  virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
1104  return IsSigned;
1105  }
1106 
1107  /// Returns true if the given (atomic) load should be expanded by the
1108  /// IR-level AtomicExpand pass into a load-linked instruction
1109  /// (through emitLoadLinked()).
1110  virtual bool shouldExpandAtomicLoadInIR(LoadInst *LI) const { return false; }
1111 
1112  /// Returns how the IR-level AtomicExpand pass should expand the given
1113  /// AtomicRMW, if at all. Default is to never expand.
1114  virtual AtomicRMWExpansionKind
1117  }
1118 
1119  /// On some platforms, an AtomicRMW that never actually modifies the value
1120  /// (such as fetch_add of 0) can be turned into a fence followed by an
1121  /// atomic load. This may sound useless, but it makes it possible for the
1122  /// processor to keep the cacheline shared, dramatically improving
1123  /// performance. And such idempotent RMWs are useful for implementing some
1124  /// kinds of locks, see for example (justification + benchmarks):
1125  /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
1126  /// This method tries doing that transformation, returning the atomic load if
1127  /// it succeeds, and nullptr otherwise.
1128  /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
1129  /// another round of expansion.
1130  virtual LoadInst *
1132  return nullptr;
1133  }
1134 
1135  /// Returns true if we should normalize
1136  /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
1137  /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely
1138  /// that it saves us from materializing N0 and N1 in an integer register.
1139  /// Targets that are able to perform and/or on flags should return false here.
1141  EVT VT) const {
1142  // If a target has multiple condition registers, then it likely has logical
1143  // operations on those registers.
1145  return false;
1146  // Only do the transform if the value won't be split into multiple
1147  // registers.
1148  LegalizeTypeAction Action = getTypeAction(Context, VT);
1149  return Action != TypeExpandInteger && Action != TypeExpandFloat &&
1150  Action != TypeSplitVector;
1151  }
1152 
1153  //===--------------------------------------------------------------------===//
1154  // TargetLowering Configuration Methods - These methods should be invoked by
1155  // the derived class constructor to configure this object for the target.
1156  //
1157 protected:
1158  /// Specify how the target extends the result of integer and floating point
1159  /// boolean values from i1 to a wider type. See getBooleanContents.
1161  BooleanContents = Ty;
1162  BooleanFloatContents = Ty;
1163  }
1164 
1165  /// Specify how the target extends the result of integer and floating point
1166  /// boolean values from i1 to a wider type. See getBooleanContents.
1168  BooleanContents = IntTy;
1169  BooleanFloatContents = FloatTy;
1170  }
1171 
1172  /// Specify how the target extends the result of a vector boolean value from a
1173  /// vector of i1 to a wider type. See getBooleanContents.
1175  BooleanVectorContents = Ty;
1176  }
1177 
1178  /// Specify the target scheduling preference.
1180  SchedPreferenceInfo = Pref;
1181  }
1182 
1183  /// Indicate whether this target prefers to use _setjmp to implement
1184  /// llvm.setjmp or the version without _. Defaults to false.
1185  void setUseUnderscoreSetJmp(bool Val) {
1186  UseUnderscoreSetJmp = Val;
1187  }
1188 
1189  /// Indicate whether this target prefers to use _longjmp to implement
1190  /// llvm.longjmp or the version without _. Defaults to false.
1191  void setUseUnderscoreLongJmp(bool Val) {
1192  UseUnderscoreLongJmp = Val;
1193  }
1194 
1195  /// Indicate the number of blocks to generate jump tables rather than if
1196  /// sequence.
1198  MinimumJumpTableEntries = Val;
1199  }
1200 
1201  /// If set to a physical register, this specifies the register that
1202  /// llvm.savestack/llvm.restorestack should save and restore.
1204  StackPointerRegisterToSaveRestore = R;
1205  }
1206 
1207  /// If set to a physical register, this sets the register that receives the
1208  /// exception address on entry to a landing pad.
1209  void setExceptionPointerRegister(unsigned R) {
1210  ExceptionPointerRegister = R;
1211  }
1212 
1213  /// If set to a physical register, this sets the register that receives the
1214  /// exception typeid on entry to a landing pad.
1215  void setExceptionSelectorRegister(unsigned R) {
1216  ExceptionSelectorRegister = R;
1217  }
1218 
1219  /// Tells the code generator not to expand operations into sequences that use
1220  /// the select operations if possible.
1221  void setSelectIsExpensive(bool isExpensive = true) {
1222  SelectIsExpensive = isExpensive;
1223  }
1224 
1225  /// Tells the code generator that the target has multiple (allocatable)
1226  /// condition registers that can be used to store the results of comparisons
1227  /// for use by selects and conditional branches. With multiple condition
1228  /// registers, the code generator will not aggressively sink comparisons into
1229  /// the blocks of their users.
1230  void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
1231  HasMultipleConditionRegisters = hasManyRegs;
1232  }
1233 
1234  /// Tells the code generator that the target has BitExtract instructions.
1235  /// The code generator will aggressively sink "shift"s into the blocks of
1236  /// their users if the users will generate "and" instructions which can be
1237  /// combined with "shift" to BitExtract instructions.
1238  void setHasExtractBitsInsn(bool hasExtractInsn = true) {
1239  HasExtractBitsInsn = hasExtractInsn;
1240  }
1241 
1242  /// Tells the code generator not to expand logic operations on comparison
1243  /// predicates into separate sequences that increase the amount of flow
1244  /// control.
1245  void setJumpIsExpensive(bool isExpensive = true);
1246 
1247  /// Tells the code generator that integer divide is expensive, and if
1248  /// possible, should be replaced by an alternate sequence of instructions not
1249  /// containing an integer divide.
1250  void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; }
1251 
1252  /// Tells the code generator that fsqrt is cheap, and should not be replaced
1253  /// with an alternative sequence of instructions.
1254  void setFsqrtIsCheap(bool isCheap = true) { FsqrtIsCheap = isCheap; }
1255 
1256  /// Tells the code generator that this target supports floating point
1257  /// exceptions and cares about preserving floating point exception behavior.
1258  void setHasFloatingPointExceptions(bool FPExceptions = true) {
1259  HasFloatingPointExceptions = FPExceptions;
1260  }
1261 
1262  /// Tells the code generator which bitwidths to bypass.
1263  void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
1264  BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
1265  }
1266 
1267  /// Tells the code generator that it shouldn't generate sra/srl/add/sra for a
1268  /// signed divide by power of two; let the target handle it.
1269  void setPow2SDivIsCheap(bool isCheap = true) { Pow2SDivIsCheap = isCheap; }
1270 
1271  /// Add the specified register class as an available regclass for the
1272  /// specified value type. This indicates the selector can handle values of
1273  /// that class natively.
1275  assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT));
1276  AvailableRegClasses.push_back(std::make_pair(VT, RC));
1277  RegClassForVT[VT.SimpleTy] = RC;
1278  }
1279 
1280  /// Remove all register classes.
1282  memset(RegClassForVT, 0,MVT::LAST_VALUETYPE * sizeof(TargetRegisterClass*));
1283 
1284  AvailableRegClasses.clear();
1285  }
1286 
1287  /// \brief Remove all operation actions.
1289  }
1290 
1291  /// Return the largest legal super-reg register class of the register class
1292  /// for the specified type and its associated "cost".
1293  virtual std::pair<const TargetRegisterClass *, uint8_t>
1294  findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const;
1295 
1296  /// Once all of the register classes are added, this allows us to compute
1297  /// derived properties we expose.
1299 
1300  /// Indicate that the specified operation does not work with the specified
1301  /// type and indicate what to do about it.
1302  void setOperationAction(unsigned Op, MVT VT,
1303  LegalizeAction Action) {
1304  assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
1305  OpActions[(unsigned)VT.SimpleTy][Op] = (uint8_t)Action;
1306  }
1307 
1308  /// Indicate that the specified load with extension does not work with the
1309  /// specified type and indicate what to do about it.
1310  void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
1311  LegalizeAction Action) {
1312  assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
1313  MemVT.isValid() && "Table isn't big enough!");
1314  LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy][ExtType] = (uint8_t)Action;
1315  }
1316 
1317  /// Indicate that the specified truncating store does not work with the
1318  /// specified type and indicate what to do about it.
1319  void setTruncStoreAction(MVT ValVT, MVT MemVT,
1320  LegalizeAction Action) {
1321  assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!");
1322  TruncStoreActions[ValVT.SimpleTy][MemVT.SimpleTy] = (uint8_t)Action;
1323  }
1324 
1325  /// Indicate that the specified indexed load does or does not work with the
1326  /// specified type and indicate what to do abort it.
1327  ///
1328  /// NOTE: All indexed mode loads are initialized to Expand in
1329  /// TargetLowering.cpp
1330  void setIndexedLoadAction(unsigned IdxMode, MVT VT,
1331  LegalizeAction Action) {
1332  assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
1333  (unsigned)Action < 0xf && "Table isn't big enough!");
1334  // Load action are kept in the upper half.
1335  IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0;
1336  IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4;
1337  }
1338 
1339  /// Indicate that the specified indexed store does or does not work with the
1340  /// specified type and indicate what to do about it.
1341  ///
1342  /// NOTE: All indexed mode stores are initialized to Expand in
1343  /// TargetLowering.cpp
1344  void setIndexedStoreAction(unsigned IdxMode, MVT VT,
1345  LegalizeAction Action) {
1346  assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
1347  (unsigned)Action < 0xf && "Table isn't big enough!");
1348  // Store action are kept in the lower half.
1349  IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f;
1350  IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action);
1351  }
1352 
1353  /// Indicate that the specified condition code is or isn't supported on the
1354  /// target and indicate what to do about it.
1356  LegalizeAction Action) {
1357  assert(VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions) &&
1358  "Table isn't big enough!");
1359  /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 32-bit
1360  /// value and the upper 27 bits index into the second dimension of the array
1361  /// to select what 32-bit value to use.
1362  uint32_t Shift = 2 * (VT.SimpleTy & 0xF);
1363  CondCodeActions[CC][VT.SimpleTy >> 4] &= ~((uint32_t)0x3 << Shift);
1364  CondCodeActions[CC][VT.SimpleTy >> 4] |= (uint32_t)Action << Shift;
1365  }
1366 
1367  /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
1368  /// to trying a larger integer/fp until it can find one that works. If that
1369  /// default is insufficient, this method can be used by the target to override
1370  /// the default.
1371  void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
1372  PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
1373  }
1374 
1375  /// Targets should invoke this method for each target independent node that
1376  /// they want to provide a custom DAG combiner for by implementing the
1377  /// PerformDAGCombine virtual method.
1379  assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
1380  TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
1381  }
1382 
1383  /// Set the target's required jmp_buf buffer size (in bytes); default is 200
1384  void setJumpBufSize(unsigned Size) {
1385  JumpBufSize = Size;
1386  }
1387 
1388  /// Set the target's required jmp_buf buffer alignment (in bytes); default is
1389  /// 0
1390  void setJumpBufAlignment(unsigned Align) {
1391  JumpBufAlignment = Align;
1392  }
1393 
1394  /// Set the target's minimum function alignment (in log2(bytes))
1396  MinFunctionAlignment = Align;
1397  }
1398 
1399  /// Set the target's preferred function alignment. This should be set if
1400  /// there is a performance benefit to higher-than-minimum alignment (in
1401  /// log2(bytes))
1403  PrefFunctionAlignment = Align;
1404  }
1405 
1406  /// Set the target's preferred loop alignment. Default alignment is zero, it
1407  /// means the target does not care about loop alignment. The alignment is
1408  /// specified in log2(bytes). The target may also override
1409  /// getPrefLoopAlignment to provide per-loop values.
1410  void setPrefLoopAlignment(unsigned Align) {
1411  PrefLoopAlignment = Align;
1412  }
1413 
1414  /// Set the minimum stack alignment of an argument (in log2(bytes)).
1416  MinStackArgumentAlignment = Align;
1417  }
1418 
1419  /// Set if the DAG builder should automatically insert fences and reduce the
1420  /// order of atomic memory operations to Monotonic.
1421  void setInsertFencesForAtomic(bool fence) {
1422  InsertFencesForAtomic = fence;
1423  }
1424 
1425 public:
1426  //===--------------------------------------------------------------------===//
1427  // Addressing mode description hooks (used by LSR etc).
1428  //
1429 
1430  /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
1431  /// instructions reading the address. This allows as much computation as
1432  /// possible to be done in the address mode for that operand. This hook lets
1433  /// targets also pass back when this should be done on intrinsics which
1434  /// load/store.
1435  virtual bool GetAddrModeArguments(IntrinsicInst * /*I*/,
1436  SmallVectorImpl<Value*> &/*Ops*/,
1437  Type *&/*AccessTy*/,
1438  unsigned AddrSpace = 0) const {
1439  return false;
1440  }
1441 
1442  /// This represents an addressing mode of:
1443  /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
1444  /// If BaseGV is null, there is no BaseGV.
1445  /// If BaseOffs is zero, there is no base offset.
1446  /// If HasBaseReg is false, there is no base register.
1447  /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with
1448  /// no scale.
1449  struct AddrMode {
1451  int64_t BaseOffs;
1453  int64_t Scale;
1454  AddrMode() : BaseGV(nullptr), BaseOffs(0), HasBaseReg(false), Scale(0) {}
1455  };
1456 
1457  /// Return true if the addressing mode represented by AM is legal for this
1458  /// target, for a load/store of the specified type.
1459  ///
1460  /// The type may be VoidTy, in which case only return true if the addressing
1461  /// mode is legal for a load/store of any legal type. TODO: Handle
1462  /// pre/postinc as well.
1463  ///
1464  /// If the address space cannot be determined, it will be -1.
1465  ///
1466  /// TODO: Remove default argument
1467  virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
1468  Type *Ty, unsigned AddrSpace) const;
1469 
1470  /// \brief Return the cost of the scaling factor used in the addressing mode
1471  /// represented by AM for this target, for a load/store of the specified type.
1472  ///
1473  /// If the AM is supported, the return value must be >= 0.
1474  /// If the AM is not supported, it returns a negative value.
1475  /// TODO: Handle pre/postinc as well.
1476  /// TODO: Remove default argument
1477  virtual int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM,
1478  Type *Ty, unsigned AS = 0) const {
1479  // Default: assume that any scaling factor used in a legal AM is free.
1480  if (isLegalAddressingMode(DL, AM, Ty, AS))
1481  return 0;
1482  return -1;
1483  }
1484 
1485  /// Return true if the specified immediate is legal icmp immediate, that is
1486  /// the target has icmp instructions which can compare a register against the
1487  /// immediate without having to materialize the immediate into a register.
1488  virtual bool isLegalICmpImmediate(int64_t) const {
1489  return true;
1490  }
1491 
1492  /// Return true if the specified immediate is legal add immediate, that is the
1493  /// target has add instructions which can add a register with the immediate
1494  /// without having to materialize the immediate into a register.
1495  virtual bool isLegalAddImmediate(int64_t) const {
1496  return true;
1497  }
1498 
1499  /// Return true if it's significantly cheaper to shift a vector by a uniform
1500  /// scalar than by an amount which will vary across each lane. On x86, for
1501  /// example, there is a "psllw" instruction for the former case, but no simple
1502  /// instruction for a general "a << b" operation on vectors.
1503  virtual bool isVectorShiftByScalarCheap(Type *Ty) const {
1504  return false;
1505  }
1506 
1507  /// Return true if it's free to truncate a value of type Ty1 to type
1508  /// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
1509  /// by referencing its sub-register AX.
1510  virtual bool isTruncateFree(Type * /*Ty1*/, Type * /*Ty2*/) const {
1511  return false;
1512  }
1513 
1514  /// Return true if a truncation from Ty1 to Ty2 is permitted when deciding
1515  /// whether a call is in tail position. Typically this means that both results
1516  /// would be assigned to the same register or stack slot, but it could mean
1517  /// the target performs adequate checks of its own before proceeding with the
1518  /// tail call.
1519  virtual bool allowTruncateForTailCall(Type * /*Ty1*/, Type * /*Ty2*/) const {
1520  return false;
1521  }
1522 
1523  virtual bool isTruncateFree(EVT /*VT1*/, EVT /*VT2*/) const {
1524  return false;
1525  }
1526 
1527  virtual bool isProfitableToHoist(Instruction *I) const { return true; }
1528 
1529  /// Return true if the extension represented by \p I is free.
1530  /// Unlikely the is[Z|FP]ExtFree family which is based on types,
1531  /// this method can use the context provided by \p I to decide
1532  /// whether or not \p I is free.
1533  /// This method extends the behavior of the is[Z|FP]ExtFree family.
1534  /// In other words, if is[Z|FP]Free returns true, then this method
1535  /// returns true as well. The converse is not true.
1536  /// The target can perform the adequate checks by overriding isExtFreeImpl.
1537  /// \pre \p I must be a sign, zero, or fp extension.
1538  bool isExtFree(const Instruction *I) const {
1539  switch (I->getOpcode()) {
1540  case Instruction::FPExt:
1541  if (isFPExtFree(EVT::getEVT(I->getType())))
1542  return true;
1543  break;
1544  case Instruction::ZExt:
1545  if (isZExtFree(I->getOperand(0)->getType(), I->getType()))
1546  return true;
1547  break;
1548  case Instruction::SExt:
1549  break;
1550  default:
1551  llvm_unreachable("Instruction is not an extension");
1552  }
1553  return isExtFreeImpl(I);
1554  }
1555 
1556  /// Return true if any actual instruction that defines a value of type Ty1
1557  /// implicitly zero-extends the value to Ty2 in the result register.
1558  ///
1559  /// This does not necessarily include registers defined in unknown ways, such
1560  /// as incoming arguments, or copies from unknown virtual registers. Also, if
1561  /// isTruncateFree(Ty2, Ty1) is true, this does not necessarily apply to
1562  /// truncate instructions. e.g. on x86-64, all instructions that define 32-bit
1563  /// values implicit zero-extend the result out to 64 bits.
1564  virtual bool isZExtFree(Type * /*Ty1*/, Type * /*Ty2*/) const {
1565  return false;
1566  }
1567 
1568  virtual bool isZExtFree(EVT /*VT1*/, EVT /*VT2*/) const {
1569  return false;
1570  }
1571 
1572  /// Return true if the target supplies and combines to a paired load
1573  /// two loaded values of type LoadedType next to each other in memory.
1574  /// RequiredAlignment gives the minimal alignment constraints that must be met
1575  /// to be able to select this paired load.
1576  ///
1577  /// This information is *not* used to generate actual paired loads, but it is
1578  /// used to generate a sequence of loads that is easier to combine into a
1579  /// paired load.
1580  /// For instance, something like this:
1581  /// a = load i64* addr
1582  /// b = trunc i64 a to i32
1583  /// c = lshr i64 a, 32
1584  /// d = trunc i64 c to i32
1585  /// will be optimized into:
1586  /// b = load i32* addr1
1587  /// d = load i32* addr2
1588  /// Where addr1 = addr2 +/- sizeof(i32).
1589  ///
1590  /// In other words, unless the target performs a post-isel load combining,
1591  /// this information should not be provided because it will generate more
1592  /// loads.
1593  virtual bool hasPairedLoad(Type * /*LoadedType*/,
1594  unsigned & /*RequiredAligment*/) const {
1595  return false;
1596  }
1597 
1598  virtual bool hasPairedLoad(EVT /*LoadedType*/,
1599  unsigned & /*RequiredAligment*/) const {
1600  return false;
1601  }
1602 
1603  /// \brief Get the maximum supported factor for interleaved memory accesses.
1604  /// Default to be the minimum interleave factor: 2.
1605  virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; }
1606 
1607  /// \brief Lower an interleaved load to target specific intrinsics. Return
1608  /// true on success.
1609  ///
1610  /// \p LI is the vector load instruction.
1611  /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector.
1612  /// \p Indices is the corresponding indices for each shufflevector.
1613  /// \p Factor is the interleave factor.
1614  virtual bool lowerInterleavedLoad(LoadInst *LI,
1616  ArrayRef<unsigned> Indices,
1617  unsigned Factor) const {
1618  return false;
1619  }
1620 
1621  /// \brief Lower an interleaved store to target specific intrinsics. Return
1622  /// true on success.
1623  ///
1624  /// \p SI is the vector store instruction.
1625  /// \p SVI is the shufflevector to RE-interleave the stored vector.
1626  /// \p Factor is the interleave factor.
1628  unsigned Factor) const {
1629  return false;
1630  }
1631 
1632  /// Return true if zero-extending the specific node Val to type VT2 is free
1633  /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
1634  /// because it's folded such as X86 zero-extending loads).
1635  virtual bool isZExtFree(SDValue Val, EVT VT2) const {
1636  return isZExtFree(Val.getValueType(), VT2);
1637  }
1638 
1639  /// Return true if an fpext operation is free (for instance, because
1640  /// single-precision floating-point numbers are implicitly extended to
1641  /// double-precision).
1642  virtual bool isFPExtFree(EVT VT) const {
1643  assert(VT.isFloatingPoint());
1644  return false;
1645  }
1646 
1647  /// Return true if folding a vector load into ExtVal (a sign, zero, or any
1648  /// extend node) is profitable.
1649  virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; }
1650 
1651  /// Return true if an fneg operation is free to the point where it is never
1652  /// worthwhile to replace it with a bitwise operation.
1653  virtual bool isFNegFree(EVT VT) const {
1654  assert(VT.isFloatingPoint());
1655  return false;
1656  }
1657 
1658  /// Return true if an fabs operation is free to the point where it is never
1659  /// worthwhile to replace it with a bitwise operation.
1660  virtual bool isFAbsFree(EVT VT) const {
1661  assert(VT.isFloatingPoint());
1662  return false;
1663  }
1664 
1665  /// Return true if an FMA operation is faster than a pair of fmul and fadd
1666  /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
1667  /// returns true, otherwise fmuladd is expanded to fmul + fadd.
1668  ///
1669  /// NOTE: This may be called before legalization on types for which FMAs are
1670  /// not legal, but should return true if those types will eventually legalize
1671  /// to types that support FMAs. After legalization, it will only be called on
1672  /// types that support FMAs (via Legal or Custom actions)
1673  virtual bool isFMAFasterThanFMulAndFAdd(EVT) const {
1674  return false;
1675  }
1676 
1677  /// Return true if it's profitable to narrow operations of type VT1 to
1678  /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
1679  /// i32 to i16.
1680  virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const {
1681  return false;
1682  }
1683 
1684  /// \brief Return true if it is beneficial to convert a load of a constant to
1685  /// just the constant itself.
1686  /// On some targets it might be more efficient to use a combination of
1687  /// arithmetic instructions to materialize the constant instead of loading it
1688  /// from a constant pool.
1689  virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
1690  Type *Ty) const {
1691  return false;
1692  }
1693 
1694  /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
1695  /// with this index. This is needed because EXTRACT_SUBVECTOR usually
1696  /// has custom lowering that depends on the index of the first element,
1697  /// and only the target knows which lowering is cheap.
1698  virtual bool isExtractSubvectorCheap(EVT ResVT, unsigned Index) const {
1699  return false;
1700  }
1701 
1702  //===--------------------------------------------------------------------===//
1703  // Runtime Library hooks
1704  //
1705 
1706  /// Rename the default libcall routine name for the specified libcall.
1708  LibcallRoutineNames[Call] = Name;
1709  }
1710 
1711  /// Get the libcall routine name for the specified libcall.
1712  const char *getLibcallName(RTLIB::Libcall Call) const {
1713  return LibcallRoutineNames[Call];
1714  }
1715 
1716  /// Override the default CondCode to be used to test the result of the
1717  /// comparison libcall against zero.
1719  CmpLibcallCCs[Call] = CC;
1720  }
1721 
1722  /// Get the CondCode that's to be used to test the result of the comparison
1723  /// libcall against zero.
1725  return CmpLibcallCCs[Call];
1726  }
1727 
1728  /// Set the CallingConv that should be used for the specified libcall.
1730  LibcallCallingConvs[Call] = CC;
1731  }
1732 
1733  /// Get the CallingConv that should be used for the specified libcall.
1735  return LibcallCallingConvs[Call];
1736  }
1737 
1738 private:
1739  const TargetMachine &TM;
1740 
1741  /// Tells the code generator not to expand operations into sequences that use
1742  /// the select operations if possible.
1743  bool SelectIsExpensive;
1744 
1745  /// Tells the code generator that the target has multiple (allocatable)
1746  /// condition registers that can be used to store the results of comparisons
1747  /// for use by selects and conditional branches. With multiple condition
1748  /// registers, the code generator will not aggressively sink comparisons into
1749  /// the blocks of their users.
1750  bool HasMultipleConditionRegisters;
1751 
1752  /// Tells the code generator that the target has BitExtract instructions.
1753  /// The code generator will aggressively sink "shift"s into the blocks of
1754  /// their users if the users will generate "and" instructions which can be
1755  /// combined with "shift" to BitExtract instructions.
1756  bool HasExtractBitsInsn;
1757 
1758  /// Tells the code generator not to expand integer divides by constants into a
1759  /// sequence of muls, adds, and shifts. This is a hack until a real cost
1760  /// model is in place. If we ever optimize for size, this will be set to true
1761  /// unconditionally.
1762  bool IntDivIsCheap;
1763 
1764  // Don't expand fsqrt with an approximation based on the inverse sqrt.
1765  bool FsqrtIsCheap;
1766 
1767  /// Tells the code generator to bypass slow divide or remainder
1768  /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
1769  /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
1770  /// div/rem when the operands are positive and less than 256.
1771  DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
1772 
1773  /// Tells the code generator that it shouldn't generate sra/srl/add/sra for a
1774  /// signed divide by power of two; let the target handle it.
1775  bool Pow2SDivIsCheap;
1776 
1777  /// Tells the code generator that it shouldn't generate extra flow control
1778  /// instructions and should attempt to combine flow control instructions via
1779  /// predication.
1780  bool JumpIsExpensive;
1781 
1782  /// Whether the target supports or cares about preserving floating point
1783  /// exception behavior.
1784  bool HasFloatingPointExceptions;
1785 
1786  /// This target prefers to use _setjmp to implement llvm.setjmp.
1787  ///
1788  /// Defaults to false.
1789  bool UseUnderscoreSetJmp;
1790 
1791  /// This target prefers to use _longjmp to implement llvm.longjmp.
1792  ///
1793  /// Defaults to false.
1794  bool UseUnderscoreLongJmp;
1795 
1796  /// Number of blocks threshold to use jump tables.
1797  int MinimumJumpTableEntries;
1798 
1799  /// Information about the contents of the high-bits in boolean values held in
1800  /// a type wider than i1. See getBooleanContents.
1801  BooleanContent BooleanContents;
1802 
1803  /// Information about the contents of the high-bits in boolean values held in
1804  /// a type wider than i1. See getBooleanContents.
1805  BooleanContent BooleanFloatContents;
1806 
1807  /// Information about the contents of the high-bits in boolean vector values
1808  /// when the element type is wider than i1. See getBooleanContents.
1809  BooleanContent BooleanVectorContents;
1810 
1811  /// The target scheduling preference: shortest possible total cycles or lowest
1812  /// register usage.
1813  Sched::Preference SchedPreferenceInfo;
1814 
1815  /// The size, in bytes, of the target's jmp_buf buffers
1816  unsigned JumpBufSize;
1817 
1818  /// The alignment, in bytes, of the target's jmp_buf buffers
1819  unsigned JumpBufAlignment;
1820 
1821  /// The minimum alignment that any argument on the stack needs to have.
1822  unsigned MinStackArgumentAlignment;
1823 
1824  /// The minimum function alignment (used when optimizing for size, and to
1825  /// prevent explicitly provided alignment from leading to incorrect code).
1826  unsigned MinFunctionAlignment;
1827 
1828  /// The preferred function alignment (used when alignment unspecified and
1829  /// optimizing for speed).
1830  unsigned PrefFunctionAlignment;
1831 
1832  /// The preferred loop alignment.
1833  unsigned PrefLoopAlignment;
1834 
1835  /// Whether the DAG builder should automatically insert fences and reduce
1836  /// ordering for atomics. (This will be set for for most architectures with
1837  /// weak memory ordering.)
1838  bool InsertFencesForAtomic;
1839 
1840  /// If set to a physical register, this specifies the register that
1841  /// llvm.savestack/llvm.restorestack should save and restore.
1842  unsigned StackPointerRegisterToSaveRestore;
1843 
1844  /// If set to a physical register, this specifies the register that receives
1845  /// the exception address on entry to a landing pad.
1846  unsigned ExceptionPointerRegister;
1847 
1848  /// If set to a physical register, this specifies the register that receives
1849  /// the exception typeid on entry to a landing pad.
1850  unsigned ExceptionSelectorRegister;
1851 
1852  /// This indicates the default register class to use for each ValueType the
1853  /// target supports natively.
1854  const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
1855  unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
1856  MVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
1857 
1858  /// This indicates the "representative" register class to use for each
1859  /// ValueType the target supports natively. This information is used by the
1860  /// scheduler to track register pressure. By default, the representative
1861  /// register class is the largest legal super-reg register class of the
1862  /// register class of the specified type. e.g. On x86, i8, i16, and i32's
1863  /// representative class would be GR32.
1864  const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE];
1865 
1866  /// This indicates the "cost" of the "representative" register class for each
1867  /// ValueType. The cost is used by the scheduler to approximate register
1868  /// pressure.
1869  uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE];
1870 
1871  /// For any value types we are promoting or expanding, this contains the value
1872  /// type that we are changing to. For Expanded types, this contains one step
1873  /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
1874  /// (e.g. i64 -> i16). For types natively supported by the system, this holds
1875  /// the same type (e.g. i32 -> i32).
1876  MVT TransformToType[MVT::LAST_VALUETYPE];
1877 
1878  /// For each operation and each value type, keep a LegalizeAction that
1879  /// indicates how instruction selection should deal with the operation. Most
1880  /// operations are Legal (aka, supported natively by the target), but
1881  /// operations that are not should be described. Note that operations on
1882  /// non-legal value types are not described here.
1883  uint8_t OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END];
1884 
1885  /// For each load extension type and each value type, keep a LegalizeAction
1886  /// that indicates how instruction selection should deal with a load of a
1887  /// specific value type and extension type.
1888  uint8_t LoadExtActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE]
1890 
1891  /// For each value type pair keep a LegalizeAction that indicates whether a
1892  /// truncating store of a specific value type and truncating type is legal.
1893  uint8_t TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
1894 
1895  /// For each indexed mode and each value type, keep a pair of LegalizeAction
1896  /// that indicates how instruction selection should deal with the load /
1897  /// store.
1898  ///
1899  /// The first dimension is the value_type for the reference. The second
1900  /// dimension represents the various modes for load store.
1901  uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE];
1902 
1903  /// For each condition code (ISD::CondCode) keep a LegalizeAction that
1904  /// indicates how instruction selection should deal with the condition code.
1905  ///
1906  /// Because each CC action takes up 2 bits, we need to have the array size be
1907  /// large enough to fit all of the value types. This can be done by rounding
1908  /// up the MVT::LAST_VALUETYPE value to the next multiple of 16.
1909  uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE + 15) / 16];
1910 
1911  ValueTypeActionImpl ValueTypeActions;
1912 
1913 private:
1914  LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const;
1915 
1916 private:
1917  std::vector<std::pair<MVT, const TargetRegisterClass*> > AvailableRegClasses;
1918 
1919  /// Targets can specify ISD nodes that they would like PerformDAGCombine
1920  /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
1921  /// array.
1922  unsigned char
1923  TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
1924 
1925  /// For operations that must be promoted to a specific type, this holds the
1926  /// destination type. This map should be sparse, so don't hold it as an
1927  /// array.
1928  ///
1929  /// Targets add entries to this map with AddPromotedToType(..), clients access
1930  /// this with getTypeToPromoteTo(..).
1931  std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
1932  PromoteToType;
1933 
1934  /// Stores the name each libcall.
1935  const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL];
1936 
1937  /// The ISD::CondCode that should be used to test the result of each of the
1938  /// comparison libcall against zero.
1939  ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
1940 
1941  /// Stores the CallingConv that should be used for each libcall.
1942  CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];
1943 
1944 protected:
1945  /// Return true if the extension represented by \p I is free.
1946  /// \pre \p I is a sign, zero, or fp extension and
1947  /// is[Z|FP]ExtFree of the related types is not true.
1948  virtual bool isExtFreeImpl(const Instruction *I) const { return false; }
1949 
1950  /// \brief Specify maximum number of store instructions per memset call.
1951  ///
1952  /// When lowering \@llvm.memset this field specifies the maximum number of
1953  /// store operations that may be substituted for the call to memset. Targets
1954  /// must set this value based on the cost threshold for that target. Targets
1955  /// should assume that the memset will be done using as many of the largest
1956  /// store operations first, followed by smaller ones, if necessary, per
1957  /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
1958  /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
1959  /// store. This only applies to setting a constant array of a constant size.
1961 
1962  /// Maximum number of stores operations that may be substituted for the call
1963  /// to memset, used for functions with OptSize attribute.
1965 
1966  /// \brief Specify maximum bytes of store instructions per memcpy call.
1967  ///
1968  /// When lowering \@llvm.memcpy this field specifies the maximum number of
1969  /// store operations that may be substituted for a call to memcpy. Targets
1970  /// must set this value based on the cost threshold for that target. Targets
1971  /// should assume that the memcpy will be done using as many of the largest
1972  /// store operations first, followed by smaller ones, if necessary, per
1973  /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
1974  /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
1975  /// and one 1-byte store. This only applies to copying a constant array of
1976  /// constant size.
1978 
1979  /// Maximum number of store operations that may be substituted for a call to
1980  /// memcpy, used for functions with OptSize attribute.
1982 
1983  /// \brief Specify maximum bytes of store instructions per memmove call.
1984  ///
1985  /// When lowering \@llvm.memmove this field specifies the maximum number of
1986  /// store instructions that may be substituted for a call to memmove. Targets
1987  /// must set this value based on the cost threshold for that target. Targets
1988  /// should assume that the memmove will be done using as many of the largest
1989  /// store operations first, followed by smaller ones, if necessary, per
1990  /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
1991  /// with 8-bit alignment would result in nine 1-byte stores. This only
1992  /// applies to copying a constant array of constant size.
1994 
1995  /// Maximum number of store instructions that may be substituted for a call to
1996  /// memmove, used for functions with OpSize attribute.
1998 
1999  /// Tells the code generator that select is more expensive than a branch if
2000  /// the branch is usually predicted right.
2002 
2003  /// MaskAndBranchFoldingIsLegal - Indicates if the target supports folding
2004  /// a mask of a single bit, a compare, and a branch into a single instruction.
2006 
2007  /// \see enableExtLdPromotion.
2009 
2010 protected:
2011  /// Return true if the value types that can be represented by the specified
2012  /// register class are all legal.
2013  bool isLegalRC(const TargetRegisterClass *RC) const;
2014 
2015  /// Replace/modify any TargetFrameIndex operands with a targte-dependent
2016  /// sequence of memory operands that is recognized by PrologEpilogInserter.
2018  MachineBasicBlock *MBB) const;
2019 };
2020 
2021 /// This class defines information used to lower LLVM code to legal SelectionDAG
2022 /// operators that the target instruction selector can accept natively.
2023 ///
2024 /// This class also defines callbacks that targets must implement to lower
2025 /// target-specific constructs to SelectionDAG operators.
2027  TargetLowering(const TargetLowering&) = delete;
2028  void operator=(const TargetLowering&) = delete;
2029 
2030 public:
2031  /// NOTE: The TargetMachine owns TLOF.
2032  explicit TargetLowering(const TargetMachine &TM);
2033 
2034  /// Returns true by value, base pointer and offset pointer and addressing mode
2035  /// by reference if the node's address can be legally represented as
2036  /// pre-indexed load / store address.
2037  virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
2038  SDValue &/*Offset*/,
2039  ISD::MemIndexedMode &/*AM*/,
2040  SelectionDAG &/*DAG*/) const {
2041  return false;
2042  }
2043 
2044  /// Returns true by value, base pointer and offset pointer and addressing mode
2045  /// by reference if this node can be combined with a load / store to form a
2046  /// post-indexed load / store.
2047  virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
2048  SDValue &/*Base*/,
2049  SDValue &/*Offset*/,
2050  ISD::MemIndexedMode &/*AM*/,
2051  SelectionDAG &/*DAG*/) const {
2052  return false;
2053  }
2054 
2055  /// Return the entry encoding for a jump table in the current function. The
2056  /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
2057  virtual unsigned getJumpTableEncoding() const;
2058 
2059  virtual const MCExpr *
2061  const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
2062  MCContext &/*Ctx*/) const {
2063  llvm_unreachable("Need to implement this hook if target has custom JTIs");
2064  }
2065 
2066  /// Returns relocation base for the given PIC jumptable.
2068  SelectionDAG &DAG) const;
2069 
2070  /// This returns the relocation base for the given PIC jumptable, the same as
2071  /// getPICJumpTableRelocBase, but as an MCExpr.
2072  virtual const MCExpr *
2074  unsigned JTI, MCContext &Ctx) const;
2075 
2076  /// Return true if folding a constant offset with the given GlobalAddress is
2077  /// legal. It is frequently not legal in PIC relocation models.
2078  virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
2079 
2080  bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
2081  SDValue &Chain) const;
2082 
2083  void softenSetCCOperands(SelectionDAG &DAG, EVT VT,
2084  SDValue &NewLHS, SDValue &NewRHS,
2085  ISD::CondCode &CCCode, SDLoc DL) const;
2086 
2087  /// Returns a pair of (return value, chain).
2088  /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC.
2089  std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
2090  EVT RetVT, const SDValue *Ops,
2091  unsigned NumOps, bool isSigned,
2092  SDLoc dl, bool doesNotReturn = false,
2093  bool isReturnValueUsed = true) const;
2094 
2095  //===--------------------------------------------------------------------===//
2096  // TargetLowering Optimization Methods
2097  //
2098 
2099  /// A convenience struct that encapsulates a DAG, and two SDValues for
2100  /// returning information from TargetLowering to its clients that want to
2101  /// combine.
2104  bool LegalTys;
2105  bool LegalOps;
2108 
2110  bool LT, bool LO) :
2111  DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
2112 
2113  bool LegalTypes() const { return LegalTys; }
2114  bool LegalOperations() const { return LegalOps; }
2115 
2117  Old = O;
2118  New = N;
2119  return true;
2120  }
2121 
2122  /// Check to see if the specified operand of the specified instruction is a
2123  /// constant integer. If so, check to see if there are any bits set in the
2124  /// constant that are not demanded. If so, shrink the constant and return
2125  /// true.
2126  bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded);
2127 
2128  /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. This
2129  /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
2130  /// generalized for targets with other types of implicit widening casts.
2131  bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
2132  SDLoc dl);
2133  };
2134 
2135  /// Look at Op. At this point, we know that only the DemandedMask bits of the
2136  /// result of Op are ever used downstream. If we can use this information to
2137  /// simplify Op, create a new simplified DAG node and return true, returning
2138  /// the original and new nodes in Old and New. Otherwise, analyze the
2139  /// expression and return a mask of KnownOne and KnownZero bits for the
2140  /// expression (used to simplify the caller). The KnownZero/One bits may only
2141  /// be accurate for those bits in the DemandedMask.
2142  bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
2143  APInt &KnownZero, APInt &KnownOne,
2144  TargetLoweringOpt &TLO, unsigned Depth = 0) const;
2145 
2146  /// Determine which of the bits specified in Mask are known to be either zero
2147  /// or one and return them in the KnownZero/KnownOne bitsets.
2148  virtual void computeKnownBitsForTargetNode(const SDValue Op,
2149  APInt &KnownZero,
2150  APInt &KnownOne,
2151  const SelectionDAG &DAG,
2152  unsigned Depth = 0) const;
2153 
2154  /// This method can be implemented by targets that want to expose additional
2155  /// information about sign bits to the DAG Combiner.
2156  virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
2157  const SelectionDAG &DAG,
2158  unsigned Depth = 0) const;
2159 
2161  void *DC; // The DAG Combiner object.
2164  public:
2166 
2167  DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc)
2168  : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
2169 
2170  bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
2173  return Level == AfterLegalizeDAG;
2174  }
2176  bool isCalledByLegalizer() const { return CalledByLegalizer; }
2177 
2178  void AddToWorklist(SDNode *N);
2179  void RemoveFromWorklist(SDNode *N);
2180  SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo = true);
2181  SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
2182  SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
2183 
2184  void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
2185  };
2186 
2187  /// Return if the N is a constant or constant vector equal to the true value
2188  /// from getBooleanContents().
2189  bool isConstTrueVal(const SDNode *N) const;
2190 
2191  /// Return if the N is a constant or constant vector equal to the false value
2192  /// from getBooleanContents().
2193  bool isConstFalseVal(const SDNode *N) const;
2194 
2195  /// Try to simplify a setcc built with the specified operands and cc. If it is
2196  /// unable to simplify it, return a null SDValue.
2198  ISD::CondCode Cond, bool foldBooleans,
2199  DAGCombinerInfo &DCI, SDLoc dl) const;
2200 
2201  /// Returns true (and the GlobalValue and the offset) if the node is a
2202  /// GlobalAddress + offset.
2203  virtual bool
2204  isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
2205 
2206  /// This method will be invoked for all target nodes and for any
2207  /// target-independent nodes that the target has registered with invoke it
2208  /// for.
2209  ///
2210  /// The semantics are as follows:
2211  /// Return Value:
2212  /// SDValue.Val == 0 - No change was made
2213  /// SDValue.Val == N - N was replaced, is dead, and is already handled.
2214  /// otherwise - N should be replaced by the returned Operand.
2215  ///
2216  /// In addition, methods provided by DAGCombinerInfo may be used to perform
2217  /// more complex transformations.
2218  ///
2219  virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
2220 
2221  /// Return true if it is profitable to move a following shift through this
2222  // node, adjusting any immediate operands as necessary to preserve semantics.
2223  // This transformation may not be desirable if it disrupts a particularly
2224  // auspicious target-specific tree (e.g. bitfield extraction in AArch64).
2225  // By default, it returns true.
2226  virtual bool isDesirableToCommuteWithShift(const SDNode *N /*Op*/) const {
2227  return true;
2228  }
2229 
2230  /// Return true if the target has native support for the specified value type
2231  /// and it is 'desirable' to use the type for the given node type. e.g. On x86
2232  /// i16 is legal, but undesirable since i16 instruction encodings are longer
2233  /// and some i16 instructions are slow.
2234  virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
2235  // By default, assume all legal types are desirable.
2236  return isTypeLegal(VT);
2237  }
2238 
2239  /// Return true if it is profitable for dag combiner to transform a floating
2240  /// point op of specified opcode to a equivalent op of an integer
2241  /// type. e.g. f32 load -> i32 load can be profitable on ARM.
2242  virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
2243  EVT /*VT*/) const {
2244  return false;
2245  }
2246 
2247  /// This method query the target whether it is beneficial for dag combiner to
2248  /// promote the specified node. If true, it should return the desired
2249  /// promotion type by reference.
2250  virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
2251  return false;
2252  }
2253 
2254  //===--------------------------------------------------------------------===//
2255  // Lowering methods - These methods must be implemented by targets so that
2256  // the SelectionDAGBuilder code knows how to lower these.
2257  //
2258 
2259  /// This hook must be implemented to lower the incoming (formal) arguments,
2260  /// described by the Ins array, into the specified DAG. The implementation
2261  /// should fill in the InVals array with legal-type argument values, and
2262  /// return the resulting token chain value.
2263  ///
2264  virtual SDValue
2266  bool /*isVarArg*/,
2267  const SmallVectorImpl<ISD::InputArg> &/*Ins*/,
2268  SDLoc /*dl*/, SelectionDAG &/*DAG*/,
2269  SmallVectorImpl<SDValue> &/*InVals*/) const {
2270  llvm_unreachable("Not Implemented");
2271  }
2272 
2273  struct ArgListEntry {
2276  bool isSExt : 1;
2277  bool isZExt : 1;
2278  bool isInReg : 1;
2279  bool isSRet : 1;
2280  bool isNest : 1;
2281  bool isByVal : 1;
2282  bool isInAlloca : 1;
2283  bool isReturned : 1;
2284  uint16_t Alignment;
2285 
2288  isReturned(false), Alignment(0) { }
2289 
2290  void setAttributes(ImmutableCallSite *CS, unsigned AttrIdx);
2291  };
2292  typedef std::vector<ArgListEntry> ArgListTy;
2293 
2294  /// This structure contains all information that is necessary for lowering
2295  /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder
2296  /// needs to lower a call, and targets will see this struct in their LowerCall
2297  /// implementation.
2301  bool RetSExt : 1;
2302  bool RetZExt : 1;
2303  bool IsVarArg : 1;
2304  bool IsInReg : 1;
2305  bool DoesNotReturn : 1;
2307 
2308  // IsTailCall should be modified by implementations of
2309  // TargetLowering::LowerCall that perform tail call conversions.
2311 
2312  unsigned NumFixedArgs;
2323 
2325  : RetTy(nullptr), RetSExt(false), RetZExt(false), IsVarArg(false),
2327  IsTailCall(false), NumFixedArgs(-1), CallConv(CallingConv::C),
2328  DAG(DAG), CS(nullptr), IsPatchPoint(false) {}
2329 
2331  DL = dl;
2332  return *this;
2333  }
2334 
2336  Chain = InChain;
2337  return *this;
2338  }
2339 
2341  SDValue Target, ArgListTy &&ArgsList,
2342  unsigned FixedArgs = -1) {
2343  RetTy = ResultType;
2344  Callee = Target;
2345  CallConv = CC;
2346  NumFixedArgs =
2347  (FixedArgs == static_cast<unsigned>(-1) ? Args.size() : FixedArgs);
2348  Args = std::move(ArgsList);
2349  return *this;
2350  }
2351 
2353  SDValue Target, ArgListTy &&ArgsList,
2355  RetTy = ResultType;
2356 
2358  DoesNotReturn = Call.doesNotReturn();
2359  IsVarArg = FTy->isVarArg();
2363 
2364  Callee = Target;
2365 
2366  CallConv = Call.getCallingConv();
2367  NumFixedArgs = FTy->getNumParams();
2368  Args = std::move(ArgsList);
2369 
2370  CS = &Call;
2371 
2372  return *this;
2373  }
2374 
2376  IsInReg = Value;
2377  return *this;
2378  }
2379 
2381  DoesNotReturn = Value;
2382  return *this;
2383  }
2384 
2386  IsVarArg = Value;
2387  return *this;
2388  }
2389 
2391  IsTailCall = Value;
2392  return *this;
2393  }
2394 
2397  return *this;
2398  }
2399 
2401  RetSExt = Value;
2402  return *this;
2403  }
2404 
2406  RetZExt = Value;
2407  return *this;
2408  }
2409 
2411  IsPatchPoint = Value;
2412  return *this;
2413  }
2414 
2416  return Args;
2417  }
2418 
2419  };
2420 
2421  /// This function lowers an abstract call to a function into an actual call.
2422  /// This returns a pair of operands. The first element is the return value
2423  /// for the function (if RetTy is not VoidTy). The second element is the
2424  /// outgoing token chain. It calls LowerCall to do the actual lowering.
2425  std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
2426 
2427  /// This hook must be implemented to lower calls into the specified
2428  /// DAG. The outgoing arguments to the call are described by the Outs array,
2429  /// and the values to be returned by the call are described by the Ins
2430  /// array. The implementation should fill in the InVals array with legal-type
2431  /// return values from the call, and return the resulting token chain value.
2432  virtual SDValue
2434  SmallVectorImpl<SDValue> &/*InVals*/) const {
2435  llvm_unreachable("Not Implemented");
2436  }
2437 
2438  /// Target-specific cleanup for formal ByVal parameters.
2439  virtual void HandleByVal(CCState *, unsigned &, unsigned) const {}
2440 
2441  /// This hook should be implemented to check whether the return values
2442  /// described by the Outs array can fit into the return registers. If false
2443  /// is returned, an sret-demotion is performed.
2444  virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
2445  MachineFunction &/*MF*/, bool /*isVarArg*/,
2446  const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
2447  LLVMContext &/*Context*/) const
2448  {
2449  // Return true by default to get preexisting behavior.
2450  return true;
2451  }
2452 
2453  /// This hook must be implemented to lower outgoing return values, described
2454  /// by the Outs array, into the specified DAG. The implementation should
2455  /// return the resulting token chain value.
2456  virtual SDValue
2457  LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
2458  bool /*isVarArg*/,
2459  const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
2460  const SmallVectorImpl<SDValue> &/*OutVals*/,
2461  SDLoc /*dl*/, SelectionDAG &/*DAG*/) const {
2462  llvm_unreachable("Not Implemented");
2463  }
2464 
2465  /// Return true if result of the specified node is used by a return node
2466  /// only. It also compute and return the input chain for the tail call.
2467  ///
2468  /// This is used to determine whether it is possible to codegen a libcall as
2469  /// tail call at legalization time.
2470  virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const {
2471  return false;
2472  }
2473 
2474  /// Return true if the target may be able emit the call instruction as a tail
2475  /// call. This is used by optimization passes to determine if it's profitable
2476  /// to duplicate return instructions to enable tailcall optimization.
2477  virtual bool mayBeEmittedAsTailCall(CallInst *) const {
2478  return false;
2479  }
2480 
2481  /// Return the builtin name for the __builtin___clear_cache intrinsic
2482  /// Default is to invoke the clear cache library call
2483  virtual const char * getClearCacheBuiltinName() const {
2484  return "__clear_cache";
2485  }
2486 
2487  /// Return the register ID of the name passed in. Used by named register
2488  /// global variables extension. There is no target-independent behaviour
2489  /// so the default action is to bail.
2490  virtual unsigned getRegisterByName(const char* RegName, EVT VT,
2491  SelectionDAG &DAG) const {
2492  report_fatal_error("Named registers not implemented for this target");
2493  }
2494 
2495  /// Return the type that should be used to zero or sign extend a
2496  /// zeroext/signext integer argument or return value. FIXME: Most C calling
2497  /// convention requires the return type to be promoted, but this is not true
2498  /// all the time, e.g. i1 on x86-64. It is also not necessary for non-C
2499  /// calling conventions. The frontend should handle this and include all of
2500  /// the necessary information.
2502  ISD::NodeType /*ExtendKind*/) const {
2503  EVT MinVT = getRegisterType(Context, MVT::i32);
2504  return VT.bitsLT(MinVT) ? MinVT : VT;
2505  }
2506 
2507  /// For some targets, an LLVM struct type must be broken down into multiple
2508  /// simple types, but the calling convention specifies that the entire struct
2509  /// must be passed in a block of consecutive registers.
2510  virtual bool
2512  bool isVarArg) const {
2513  return false;
2514  }
2515 
2516  /// Returns a 0 terminated array of registers that can be safely used as
2517  /// scratch registers.
2518  virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const {
2519  return nullptr;
2520  }
2521 
2522  /// This callback is used to prepare for a volatile or atomic load.
2523  /// It takes a chain node as input and returns the chain for the load itself.
2524  ///
2525  /// Having a callback like this is necessary for targets like SystemZ,
2526  /// which allows a CPU to reuse the result of a previous load indefinitely,
2527  /// even if a cache-coherent store is performed by another CPU. The default
2528  /// implementation does nothing.
2530  SelectionDAG &DAG) const {
2531  return Chain;
2532  }
2533 
2534  /// This callback is invoked by the type legalizer to legalize nodes with an
2535  /// illegal operand type but legal result types. It replaces the
2536  /// LowerOperation callback in the type Legalizer. The reason we can not do
2537  /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to
2538  /// use this callback.
2539  ///
2540  /// TODO: Consider merging with ReplaceNodeResults.
2541  ///
2542  /// The target places new result values for the node in Results (their number
2543  /// and types must exactly match those of the original return values of
2544  /// the node), or leaves Results empty, which indicates that the node is not
2545  /// to be custom lowered after all.
2546  /// The default implementation calls LowerOperation.
2547  virtual void LowerOperationWrapper(SDNode *N,
2548  SmallVectorImpl<SDValue> &Results,
2549  SelectionDAG &DAG) const;
2550 
2551  /// This callback is invoked for operations that are unsupported by the
2552  /// target, which are registered to use 'custom' lowering, and whose defined
2553  /// values are all legal. If the target has no operations that require custom
2554  /// lowering, it need not implement this. The default implementation of this
2555  /// aborts.
2556  virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
2557 
2558  /// This callback is invoked when a node result type is illegal for the
2559  /// target, and the operation was registered to use 'custom' lowering for that
2560  /// result type. The target places new result values for the node in Results
2561  /// (their number and types must exactly match those of the original return
2562  /// values of the node), or leaves Results empty, which indicates that the
2563  /// node is not to be custom lowered after all.
2564  ///
2565  /// If the target has no operations that require custom lowering, it need not
2566  /// implement this. The default implementation aborts.
2567  virtual void ReplaceNodeResults(SDNode * /*N*/,
2568  SmallVectorImpl<SDValue> &/*Results*/,
2569  SelectionDAG &/*DAG*/) const {
2570  llvm_unreachable("ReplaceNodeResults not implemented for this target!");
2571  }
2572 
2573  /// This method returns the name of a target specific DAG node.
2574  virtual const char *getTargetNodeName(unsigned Opcode) const;
2575 
2576  /// This method returns a target specific FastISel object, or null if the
2577  /// target does not support "fast" ISel.
2579  const TargetLibraryInfo *) const {
2580  return nullptr;
2581  }
2582 
2583 
2585  SelectionDAG &DAG) const;
2586 
2587  //===--------------------------------------------------------------------===//
2588  // Inline Asm Support hooks
2589  //
2590 
2591  /// This hook allows the target to expand an inline asm call to be explicit
2592  /// llvm code if it wants to. This is useful for turning simple inline asms
2593  /// into LLVM intrinsics, which gives the compiler more information about the
2594  /// behavior of the code.
2595  virtual bool ExpandInlineAsm(CallInst *) const {
2596  return false;
2597  }
2598 
2600  C_Register, // Constraint represents specific register(s).
2601  C_RegisterClass, // Constraint represents any of register(s) in class.
2602  C_Memory, // Memory constraint.
2603  C_Other, // Something else.
2604  C_Unknown // Unsupported constraint.
2605  };
2606 
2608  // Generic weights.
2609  CW_Invalid = -1, // No match.
2610  CW_Okay = 0, // Acceptable.
2611  CW_Good = 1, // Good weight.
2612  CW_Better = 2, // Better weight.
2613  CW_Best = 3, // Best weight.
2614 
2615  // Well-known weights.
2616  CW_SpecificReg = CW_Okay, // Specific register operands.
2617  CW_Register = CW_Good, // Register operands.
2618  CW_Memory = CW_Better, // Memory operands.
2619  CW_Constant = CW_Best, // Constant operand.
2620  CW_Default = CW_Okay // Default or don't know type.
2621  };
2622 
2623  /// This contains information for each constraint that we are lowering.
2625  /// This contains the actual string for the code, like "m". TargetLowering
2626  /// picks the 'best' code from ConstraintInfo::Codes that most closely
2627  /// matches the operand.
2628  std::string ConstraintCode;
2629 
2630  /// Information about the constraint code, e.g. Register, RegisterClass,
2631  /// Memory, Other, Unknown.
2633 
2634  /// If this is the result output operand or a clobber, this is null,
2635  /// otherwise it is the incoming operand to the CallInst. This gets
2636  /// modified as the asm is processed.
2638 
2639  /// The ValueType for the operand value.
2641 
2642  /// Return true of this is an input operand that is a matching constraint
2643  /// like "4".
2644  bool isMatchingInputConstraint() const;
2645 
2646  /// If this is an input matching constraint, this method returns the output
2647  /// operand it matches.
2648  unsigned getMatchedOperand() const;
2649 
2650  /// Copy constructor for copying from a ConstraintInfo.
2652  : InlineAsm::ConstraintInfo(std::move(Info)),
2654  ConstraintVT(MVT::Other) {}
2655  };
2656 
2657  typedef std::vector<AsmOperandInfo> AsmOperandInfoVector;
2658 
2659  /// Split up the constraint string from the inline assembly value into the
2660  /// specific constraints and their prefixes, and also tie in the associated
2661  /// operand values. If this returns an empty vector, and if the constraint
2662  /// string itself isn't empty, there was an error parsing.
2664  const TargetRegisterInfo *TRI,
2665  ImmutableCallSite CS) const;
2666 
2667  /// Examine constraint type and operand type and determine a weight value.
2668  /// The operand object must already have been set up with the operand type.
2670  AsmOperandInfo &info, int maIndex) const;
2671 
2672  /// Examine constraint string and operand type and determine a weight value.
2673  /// The operand object must already have been set up with the operand type.
2675  AsmOperandInfo &info, const char *constraint) const;
2676 
2677  /// Determines the constraint code and constraint type to use for the specific
2678  /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
2679  /// If the actual operand being passed in is available, it can be passed in as
2680  /// Op, otherwise an empty SDValue can be passed.
2681  virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
2682  SDValue Op,
2683  SelectionDAG *DAG = nullptr) const;
2684 
2685  /// Given a constraint, return the type of constraint it is for this target.
2686  virtual ConstraintType getConstraintType(StringRef Constraint) const;
2687 
2688  /// Given a physical register constraint (e.g. {edx}), return the register
2689  /// number and the register class for the register.
2690  ///
2691  /// Given a register class constraint, like 'r', if this corresponds directly
2692  /// to an LLVM register class, return a register of 0 and the register class
2693  /// pointer.
2694  ///
2695  /// This should only be used for C_Register constraints. On error, this
2696  /// returns a register number of 0 and a null register class pointer.
2697  virtual std::pair<unsigned, const TargetRegisterClass *>
2699  StringRef Constraint, MVT VT) const;
2700 
2701  virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const {
2702  if (ConstraintCode == "i")
2703  return InlineAsm::Constraint_i;
2704  else if (ConstraintCode == "m")
2705  return InlineAsm::Constraint_m;
2707  }
2708 
2709  /// Try to replace an X constraint, which matches anything, with another that
2710  /// has more specific requirements based on the type of the corresponding
2711  /// operand. This returns null if there is no replacement to make.
2712  virtual const char *LowerXConstraint(EVT ConstraintVT) const;
2713 
2714  /// Lower the specified operand into the Ops vector. If it is invalid, don't
2715  /// add anything to Ops.
2716  virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
2717  std::vector<SDValue> &Ops,
2718  SelectionDAG &DAG) const;
2719 
2720  //===--------------------------------------------------------------------===//
2721  // Div utility functions
2722  //
2723  SDValue BuildSDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
2724  bool IsAfterLegalization,
2725  std::vector<SDNode *> *Created) const;
2726  SDValue BuildUDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
2727  bool IsAfterLegalization,
2728  std::vector<SDNode *> *Created) const;
2729  virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor,
2730  SelectionDAG &DAG,
2731  std::vector<SDNode *> *Created) const {
2732  return SDValue();
2733  }
2734 
2735  /// Indicate whether this target prefers to combine the given number of FDIVs
2736  /// with the same divisor.
2737  virtual bool combineRepeatedFPDivisors(unsigned NumUsers) const {
2738  return false;
2739  }
2740 
2741  /// Hooks for building estimates in place of slower divisions and square
2742  /// roots.
2743 
2744  /// Return a reciprocal square root estimate value for the input operand.
2745  /// The RefinementSteps output is the number of Newton-Raphson refinement
2746  /// iterations required to generate a sufficient (though not necessarily
2747  /// IEEE-754 compliant) estimate for the value type.
2748  /// The boolean UseOneConstNR output is used to select a Newton-Raphson
2749  /// algorithm implementation that uses one constant or two constants.
2750  /// A target may choose to implement its own refinement within this function.
2751  /// If that's true, then return '0' as the number of RefinementSteps to avoid
2752  /// any further refinement of the estimate.
2753  /// An empty SDValue return means no estimate sequence can be created.
2755  unsigned &RefinementSteps,
2756  bool &UseOneConstNR) const {
2757  return SDValue();
2758  }
2759 
2760  /// Return a reciprocal estimate value for the input operand.
2761  /// The RefinementSteps output is the number of Newton-Raphson refinement
2762  /// iterations required to generate a sufficient (though not necessarily
2763  /// IEEE-754 compliant) estimate for the value type.
2764  /// A target may choose to implement its own refinement within this function.
2765  /// If that's true, then return '0' as the number of RefinementSteps to avoid
2766  /// any further refinement of the estimate.
2767  /// An empty SDValue return means no estimate sequence can be created.
2769  unsigned &RefinementSteps) const {
2770  return SDValue();
2771  }
2772 
2773  //===--------------------------------------------------------------------===//
2774  // Legalization utility functions
2775  //
2776 
2777  /// Expand a MUL into two nodes. One that computes the high bits of
2778  /// the result and one that computes the low bits.
2779  /// \param HiLoVT The value type to use for the Lo and Hi nodes.
2780  /// \param LL Low bits of the LHS of the MUL. You can use this parameter
2781  /// if you want to control how low bits are extracted from the LHS.
2782  /// \param LH High bits of the LHS of the MUL. See LL for meaning.
2783  /// \param RL Low bits of the RHS of the MUL. See LL for meaning
2784  /// \param RH High bits of the RHS of the MUL. See LL for meaning.
2785  /// \returns true if the node has been expanded. false if it has not
2786  bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
2787  SelectionDAG &DAG, SDValue LL = SDValue(),
2788  SDValue LH = SDValue(), SDValue RL = SDValue(),
2789  SDValue RH = SDValue()) const;
2790 
2791  /// Expand float(f32) to SINT(i64) conversion
2792  /// \param N Node to expand
2793  /// \param Result output after conversion
2794  /// \returns True, if the expansion was successful, false otherwise
2795  bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
2796 
2797  //===--------------------------------------------------------------------===//
2798  // Instruction Emitting Hooks
2799  //
2800 
2801  /// This method should be implemented by targets that mark instructions with
2802  /// the 'usesCustomInserter' flag. These instructions are special in various
2803  /// ways, which require special support to insert. The specified MachineInstr
2804  /// is created but not inserted into any basic blocks, and this method is
2805  /// called to expand it into a sequence of instructions, potentially also
2806  /// creating new basic blocks and control flow.
2807  /// As long as the returned basic block is different (i.e., we created a new
2808  /// one), the custom inserter is free to modify the rest of \p MBB.
2809  virtual MachineBasicBlock *
2811 
2812  /// This method should be implemented by targets that mark instructions with
2813  /// the 'hasPostISelHook' flag. These instructions must be adjusted after
2814  /// instruction selection by target hooks. e.g. To fill in optional defs for
2815  /// ARM 's' setting instructions.
2816  virtual void
2818 
2819  /// If this function returns true, SelectionDAGBuilder emits a
2820  /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
2821  virtual bool useLoadStackGuardNode() const {
2822  return false;
2823  }
2824 };
2825 
2826 /// Given an LLVM IR type and return type attributes, compute the return value
2827 /// EVTs and flags, and optionally also the offsets, if the return value is
2828 /// being lowered to memory.
2829 void GetReturnInfo(Type *ReturnType, AttributeSet attr,
2830  SmallVectorImpl<ISD::OutputArg> &Outs,
2831  const TargetLowering &TLI, const DataLayout &DL);
2832 
2833 } // end llvm namespace
2834 
2835 #endif
virtual bool enableAggressiveFMAFusion(EVT VT) const
Return true if target always beneficiates from combining into FMA for a given value type...
virtual SDValue LowerReturn(SDValue, CallingConv::ID, bool, const SmallVectorImpl< ISD::OutputArg > &, const SmallVectorImpl< SDValue > &, SDLoc, SelectionDAG &) const
This hook must be implemented to lower outgoing return values, described by the Outs array...
virtual bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const
Lower an interleaved store to target specific intrinsics.
static MVT getIntegerVT(unsigned BitWidth)
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:724
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:104
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:240
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, unsigned FixedArgs=-1)
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT, SelectionDAG &DAG, SDValue LL=SDValue(), SDValue LH=SDValue(), SDValue RL=SDValue(), SDValue RH=SDValue()) const
Expand a MUL into two nodes.
SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)
unsigned getMaxStoresPerMemcpy(bool OptSize) const
Get maximum # of store operations permitted for llvm.memcpy.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
unsigned getPrefFunctionAlignment() const
Return the preferred function alignment.
Sign extended before/after call.
Definition: Attributes.h:105
unsigned getNumRegisters(LLVMContext &Context, EVT VT) const
Return the number of registers that this ValueType will eventually require.
virtual bool storeOfVectorConstantIsCheap(EVT MemVT, unsigned NumElem, unsigned AddrSpace) const
Return true if it is expected to be cheaper to do a store of a non-zero vector constant with the give...
Force argument to be passed in register.
Definition: Attributes.h:78
unsigned getNumParams() const
getNumParams - Return the number of fixed parameters this function type requires. ...
Definition: DerivedTypes.h:136
bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded)
Check to see if the specified operand of the specified instruction is a constant integer.
const TargetMachine & getTargetMachine() const
InstrTy * getInstruction() const
Definition: CallSite.h:82
bool isAtLeastAcquire(AtomicOrdering Ord)
Returns true if the ordering is at least as strong as acquire (i.e.
Definition: Instructions.h:56
virtual bool isCheapToSpeculateCttz() const
Return true if it is cheap to speculate a call to intrinsic cttz.
void setJumpBufAlignment(unsigned Align)
Set the target's required jmp_buf buffer alignment (in bytes); default is 0.
CallLoweringInfo & setDebugLoc(SDLoc dl)
virtual bool hasLoadLinkedStoreConditional() const
True if AtomicExpandPass should use emitLoadLinked/emitStoreConditional and expand AtomicCmpXchgInst...
bool isIntDivCheap() const
Return true if integer divide is usually cheaper than a sequence of several shifts, adds, and multiplies for this target.
bool isConstTrueVal(const SDNode *N) const
Return if the N is a constant or constant vector equal to the true value from getBooleanContents().
virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, SDLoc DL, SelectionDAG &DAG) const
This callback is used to prepare for a volatile or atomic load.
virtual bool allowTruncateForTailCall(Type *, Type *) const
Return true if a truncation from Ty1 to Ty2 is permitted when deciding whether a call is in tail posi...
bool isExtended() const
isExtended - Test if the given EVT is extended (as opposed to being simple).
Definition: ValueTypes.h:100
virtual bool isLoadBitCastBeneficial(EVT, EVT) const
isLoadBitCastBeneficial() - Return true if the following transform is beneficial. ...
CallInst - This class represents a function call, abstracting a target machine's calling convention...
MVT getSimpleValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the MVT corresponding to this LLVM type. See getValueType.
bool usesUnderscoreSetJmp() const
Determine if we should use _setjmp or setjmp to implement llvm.setjmp.
virtual unsigned getMaxSupportedInterleaveFactor() const
Get the maximum supported factor for interleaved memory accesses.
void setHasFloatingPointExceptions(bool FPExceptions=true)
Tells the code generator that this target supports floating point exceptions and cares about preservi...
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
virtual bool getPreIndexedAddressParts(SDNode *, SDValue &, SDValue &, ISD::MemIndexedMode &, SelectionDAG &) const
Returns true by value, base pointer and offset pointer and addressing mode by reference if the node's...
static ISD::NodeType getExtendForContent(BooleanContent Content)
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
Type * getTypeForEVT(LLVMContext &Context) const
getTypeForEVT - This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:181
unsigned getSizeInBits() const
ShuffleVectorInst - This instruction constructs a fixed permutation of two input vectors.
bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded, SDLoc dl)
Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg) const
For some targets, an LLVM struct type must be broken down into multiple simple types, but the calling convention specifies that the entire struct must be passed in a block of consecutive registers.
virtual bool isLegalICmpImmediate(int64_t) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
CallLoweringInfo & setNoReturn(bool Value=true)
const_iterator begin(StringRef path)
Get begin iterator over path.
Definition: Path.cpp:232
void clearOperationActions()
Remove all operation actions.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
virtual bool isZExtFree(Type *, Type *) const
Return true if any actual instruction that defines a value of type Ty1 implicitly zero-extends the va...
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, unsigned Align=1, bool *=nullptr) const
Determine if the target supports unaligned memory accesses.
LoadInst - an instruction for reading from memory.
Definition: Instructions.h:177
bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const
Expand float(f32) to SINT(i64) conversion.
virtual bool shouldExpandBuildVectorWithShuffles(EVT, unsigned DefinedValues) const
AtomicRMWInst - an instruction that atomically reads a memory location, combines it with another valu...
Definition: Instructions.h:674
virtual bool GetAddrModeArguments(IntrinsicInst *, SmallVectorImpl< Value * > &, Type *&, unsigned AddrSpace=0) const
CodeGenPrepare sinks address calculations into the same BB as Load/Store instructions reading the add...
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it...
virtual bool isFPExtFree(EVT VT) const
Return true if an fpext operation is free (for instance, because single-precision floating-point numb...
Value * CallOperandVal
If this is the result output operand or a clobber, this is null, otherwise it is the incoming operand...
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
virtual void HandleByVal(CCState *, unsigned &, unsigned) const
Target-specific cleanup for formal ByVal parameters.
CallLoweringInfo & setDiscardResult(bool Value=true)
SDValue BuildUDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, bool IsAfterLegalization, std::vector< SDNode * > *Created) const
Given an ISD::UDIV node expressing a divide by constant, return a DAG expression to select that will ...
virtual bool isFPImmLegal(const APFloat &, EVT) const
Returns true if the target can instruction select the specified FP immediate natively.
virtual void AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag...
LegalizeTypeAction getTypeAction(MVT VT) const
virtual const TargetRegisterClass * getRepRegClassFor(MVT VT) const
Return the 'representative' register class for the specified value type.
virtual bool isTypeDesirableForOp(unsigned, EVT VT) const
Return true if the target has native support for the specified value type and it is 'desirable' to us...
virtual Sched::Preference getSchedulingPreference(SDNode *) const
Some scheduler, e.g.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition: ISDOpcodes.h:39
bool bitsLT(EVT VT) const
bitsLT - Return true if this has less bits than VT.
Definition: ValueTypes.h:189
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask, APInt &KnownZero, APInt &KnownOne, TargetLoweringOpt &TLO, unsigned Depth=0) const
Look at Op.
virtual LoadInst * lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const
On some platforms, an AtomicRMW that never actually modifies the value (such as fetch_add of 0) can b...
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(const char *reason, bool gen_crash_diag=true)
Reports a serious error, calling any installed error handler.
bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
bool enableExtLdPromotion() const
Return true if the target wants to use the optimization that turns ext(promotableInst1(...(promotableInstN(load)))) into promotedInst1(...(promotedInstN(ext(load)))).
AtomicRMWExpansionKind
Enum that specifies what a AtomicRMWInst is expanded to, if at all.
bool isVector() const
isVector - Return true if this is a vector value type.
Definition: ValueTypes.h:115
virtual const char * LowerXConstraint(EVT ConstraintVT) const
Try to replace an X constraint, which matches anything, with another that has more specific requireme...
lazy value info
void setAttributes(ImmutableCallSite *CS, unsigned AttrIdx)
Set CallLoweringInfo attribute flags based on a call instruction and called function attributes...
virtual bool canOpTrap(unsigned Op, EVT VT) const
Returns true if the operation can trap for the value type.
virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context, EVT VT) const
Returns true if we should normalize select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely that it saves us from materializing N0 and N1 in an integer register.
virtual const char * getTargetNodeName(unsigned Opcode) const
This method returns the name of a target specific DAG node.
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
This method will be invoked for all target nodes and for any target-independent nodes that the target...
bool hasMultipleConditionRegisters() const
Return true if multiple condition registers are available.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
virtual bool isLegalAddImmediate(int64_t) const
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
virtual bool isZExtFree(EVT, EVT) const
CallLoweringInfo & setVarArg(bool Value=true)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:98
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, const SDValue *Ops, unsigned NumOps, bool isSigned, SDLoc dl, bool doesNotReturn=false, bool isReturnValueUsed=true) const
Returns a pair of (return value, chain).
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.
virtual void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero, APInt &KnownOne, const SelectionDAG &DAG, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
CallLoweringInfo & setChain(SDValue InChain)
virtual AtomicRMWExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all...
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:33
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:517
void setCondCodeAction(ISD::CondCode CC, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
bool doesNotReturn() const
Determine if the call cannot return.
Definition: CallSite.h:303
This file contains the simple types necessary to represent the attributes associated with functions a...
SimpleValueType SimpleTy
bool getInsertFencesForAtomic() const
Return whether the DAG builder should automatically insert fences and reduce ordering for atomics...
void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth)
Tells the code generator which bitwidths to bypass.
unsigned getJumpBufSize() const
Returns the target's jmp_buf size in bytes (if never set, the default is 200)
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual Value * emitLoadLinked(IRBuilder<> &Builder, Value *Addr, AtomicOrdering Ord) const
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type...
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
#define false
Definition: ConvertUTF.c:65
SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, bool foldBooleans, DAGCombinerInfo &DCI, SDLoc dl) const
Try to simplify a setcc built with the specified operands and cc.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
ELFYAML::ELF_STO Other
Definition: ELFYAML.cpp:591
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
Definition: ValueTypes.h:110
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
bool isExtFree(const Instruction *I) const
Return true if the extension represented by I is free.
SmallVector< ISD::InputArg, 32 > Ins
AtomicOrdering
Definition: Instructions.h:38
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
virtual bool getPostIndexedAddressParts(SDNode *, SDNode *, SDValue &, SDValue &, ISD::MemIndexedMode &, SelectionDAG &) const
Returns true by value, base pointer and offset pointer and addressing mode by reference if this node ...
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first...
Context object for machine code objects.
Definition: MCContext.h:48
const DenseMap< unsigned int, unsigned int > & getBypassSlowDivWidths() const
Returns map of slow types for division or remainder with corresponding fast types.
virtual bool ShouldShrinkFPConstant(EVT) const
If true, then instruction selection should seek to shrink the FP constant of the specified type to a ...
virtual bool mayBeEmittedAsTailCall(CallInst *) const
Return true if the target may be able emit the call instruction as a tail call.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition: FastISel.h:30
virtual bool isUsedByReturnOnly(SDNode *, SDValue &) const
Return true if result of the specified node is used by a return node only.
FunctionType - Class to represent function types.
Definition: DerivedTypes.h:96
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
void setSelectIsExpensive(bool isExpensive=true)
Tells the code generator not to expand operations into sequences that use the select operations if po...
bool usesUnderscoreLongJmp() const
Determine if we should use _longjmp or longjmp to implement llvm.longjmp.
LLVMContext & getContext() const
getContext - Return the LLVMContext in which this type was uniqued.
Definition: Type.h:125
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
void GetReturnInfo(Type *ReturnType, AttributeSet attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags...
virtual EVT getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const
Return the type that should be used to zero or sign extend a zeroext/signext integer argument or retu...
const ValueTypeActionImpl & getValueTypeActions() const
This contains information for each constraint that we are lowering.
SmallVector< ISD::OutputArg, 32 > Outs
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const
Return how this store with truncation should be treated: either it is legal, needs to be promoted to ...
void setFsqrtIsCheap(bool isCheap=true)
Tells the code generator that fsqrt is cheap, and should not be replaced with an alternative sequence...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: ArrayRef.h:31
CallLoweringInfo & setZExtResult(bool Value=true)
MachineBasicBlock * emitPatchPoint(MachineInstr *MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
StoreInst - an instruction for storing to memory.
Definition: Instructions.h:316
int getMinimumJumpTableEntries() const
Return integer threshold on number of blocks to use jump tables rather than if sequence.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:804
virtual SDValue LowerCall(CallLoweringInfo &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower calls into the specified DAG.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
virtual bool isDesirableToTransformToIntegerOp(unsigned, EVT) const
Return true if it is profitable for dag combiner to transform a floating point op of specified opcode...
virtual Instruction * emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const
Inserts in the IR a target-specific intrinsic specifying a fence.
bool isOperationLegalOrPromote(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target or can be made legal using promotion...
unsigned getNumElements() const
Return the number of elements in the Vector type.
Definition: DerivedTypes.h:432
virtual bool isSelectSupported(SelectSupportKind) const
virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const
Return true if it is beneficial to convert a load of a constant to just the constant itself...
bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal or custom on this target.
virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT) const
Type * getElementType() const
Definition: DerivedTypes.h:323
virtual const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const
Returns a 0 terminated array of registers that can be safely used as scratch registers.
LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return how this load with extension should be treated: either it is legal, needs to be promoted to a ...
bool isLegalRC(const TargetRegisterClass *RC) const
Return true if the value types that can be represented by the specified register class are all legal...
PointerType - Class to represent pointers.
Definition: DerivedTypes.h:449
virtual bool getStackCookieLocation(unsigned &, unsigned &) const
Return true if the target stores stack protector cookies at a fixed offset in some non-standard addre...
LLVM_CONSTEXPR size_t array_lengthof(T(&)[N])
Find the length of an array.
Definition: STLExtras.h:247
bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const
Return true if the specified store with truncation is legal on this target.
A self-contained host- and target-independent arbitrary-precision floating-point software implementat...
Definition: APFloat.h:122
virtual bool isTruncateFree(EVT, EVT) const
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
bool isMatchingInputConstraint() const
Return true of this is an input operand that is a matching constraint like "4".
#define true
Definition: ConvertUTF.c:66
virtual FastISel * createFastISel(FunctionLoweringInfo &, const TargetLibraryInfo *) const
This method returns a target specific FastISel object, or null if the target does not support "fast" ...
virtual Instruction * emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
virtual bool isShuffleMaskLegal(const SmallVectorImpl< int > &, EVT) const
Targets can use this to indicate that they only support some VECTOR_SHUFFLE operations, those with specific masks.
BooleanContent getBooleanContents(EVT Type) const
bool hasFloatingPointExceptions() const
Return true if target supports floating point exceptions.
MVT - Machine Value Type.
DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc)
CallLoweringInfo & setIsPatchPoint(bool Value=true)
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:41
void setJumpBufSize(unsigned Size)
Set the target's required jmp_buf buffer size (in bytes); default is 200.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
bool isFsqrtCheap() const
Return true if sqrt(x) is as cheap or cheaper than 1 / rsqrt(x)
void setTargetDAGCombine(ISD::NodeType NT)
Targets should invoke this method for each target independent node that they want to provide a custom...
bool isVectorTy() const
isVectorTy - True if this is an instance of VectorType.
Definition: Type.h:226
virtual bool shouldExpandAtomicStoreInIR(StoreInst *SI) const
Returns true if the given (atomic) store should be expanded by the IR-level AtomicExpand pass into an...
bool isOperationLegalOrCustom(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual EVT getOptimalMemOpType(uint64_t, unsigned, unsigned, bool, bool, bool, MachineFunction &) const
Returns the target specific optimal type for load and store operations as a result of memset...
virtual unsigned getPrefLoopAlignment(MachineLoop *ML=nullptr) const
Return the preferred loop alignment.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Definition: ISDOpcodes.h:780
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
unsigned getMatchedOperand() const
If this is an input matching constraint, this method returns the output operand it matches...
bool isFloatingPoint() const
isFloatingPoint - Return true if this is a FP, or a vector FP type.
bool isMaskAndBranchFoldingLegal() const
Return if the target supports combining a chain like:
virtual bool isSafeMemOpType(MVT) const
Returns true if it's safe to use load / store of the specified type to expand memcpy / memset inline...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
unsigned getMaxStoresPerMemmove(bool OptSize) const
Get maximum # of store operations permitted for llvm.memmove.
virtual bool isDesirableToCommuteWithShift(const SDNode *N) const
Return true if it is profitable to move a following shift through this.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
ConstraintInfo()
Default constructor.
Definition: InlineAsm.cpp:60
CombineLevel
Definition: DAGCombine.h:16
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual bool isTruncateFree(Type *, Type *) const
Return true if it's free to truncate a value of type Ty1 to type Ty2.
virtual bool isFMAFasterThanFMulAndFAdd(EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
virtual unsigned getRegisterByName(const char *RegName, EVT VT, SelectionDAG &DAG) const
Return the register ID of the name passed in.
bool CombineTo(SDValue O, SDValue N)
virtual bool useSoftFloat() const
Value * getOperand(unsigned i) const
Definition: User.h:118
Zero extended before/after call.
Definition: Attributes.h:119
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
virtual bool isNarrowingProfitable(EVT, EVT) const
Return true if it's profitable to narrow operations of type VT1 to VT2.
bool isPredictableSelectExpensive() const
Return true if selects are only cheaper than branches if the branch is unlikely to be predicted right...
void setPrefFunctionAlignment(unsigned Align)
Set the target's preferred function alignment.
virtual bool useLoadStackGuardNode() const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
virtual SDValue getRecipEstimate(SDValue Operand, DAGCombinerInfo &DCI, unsigned &RefinementSteps) const
Return a reciprocal estimate value for the input operand.
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
bool isConstFalseVal(const SDNode *N) const
Return if the N is a constant or constant vector equal to the false value from getBooleanContents().
SDValue BuildSDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, bool IsAfterLegalization, std::vector< SDNode * > *Created) const
Given an ISD::SDIV node expressing a divide by constant, return a DAG expression to select that will ...
virtual int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS=0) const
Return the cost of the scaling factor used in the addressing mode represented by AM for this target...
unsigned getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
void setPow2SDivIsCheap(bool isCheap=true)
Tells the code generator that it shouldn't generate sra/srl/add/sra for a signed divide by power of t...
EVT - Extended Value Type.
Definition: ValueTypes.h:31
bool isSlowDivBypassed() const
Returns true if target has indicated at least one type should be bypassed.
LegalizeTypeAction getTypeAction(MVT VT) const
std::vector< ArgListEntry > ArgListTy
unsigned getExceptionPointerRegister() const
If a physical register, this returns the register that receives the exception address on entry to a l...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Return the desired alignment for ByVal or InAlloca aggregate function arguments in the caller paramet...
This structure contains all information that is necessary for lowering calls.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
virtual bool isExtFreeImpl(const Instruction *I) const
Return true if the extension represented by I is free.
virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const
Return true if folding a vector load into ExtVal (a sign, zero, or any extend node) is profitable...
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
virtual SDValue LowerFormalArguments(SDValue, CallingConv::ID, bool, const SmallVectorImpl< ISD::InputArg > &, SDLoc, SelectionDAG &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array...
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements)
getVectorVT - Returns the EVT that represents a vector NumElements in length, where each element is o...
Definition: ValueTypes.h:70
void setUseUnderscoreLongJmp(bool Val)
Indicate whether this target prefers to use _longjmp to implement llvm.longjmp or the version without...
void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC)
Set the CallingConv that should be used for the specified libcall.
AsmOperandInfo(InlineAsm::ConstraintInfo Info)
Copy constructor for copying from a ConstraintInfo.
std::string ConstraintCode
This contains the actual string for the code, like "m".
virtual SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const
Returns relocation base for the given PIC jumptable.
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const
Get the CondCode that's to be used to test the result of the comparison libcall against zero...
void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC)
Override the default CondCode to be used to test the result of the comparison libcall against zero...
void setHasExtractBitsInsn(bool hasExtractInsn=true)
Tells the code generator that the target has BitExtract instructions.
virtual void ReplaceNodeResults(SDNode *, SmallVectorImpl< SDValue > &, SelectionDAG &) const
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
void initActions()
Initialize all of the actions to default values.
std::vector< AsmOperandInfo > AsmOperandInfoVector
virtual MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
CCState - This class holds information needed while lowering arguments and return values...
virtual bool hasPairedLoad(Type *, unsigned &) const
Return true if the target supplies and combines to a paired load two loaded values of type LoadedType...
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
bool MaskAndBranchFoldingIsLegal
MaskAndBranchFoldingIsLegal - Indicates if the target supports folding a mask of a single bit...
virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, std::vector< SDNode * > *Created) const
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
virtual bool combineRepeatedFPDivisors(unsigned NumUsers) const
Indicate whether this target prefers to combine the given number of FDIVs with the same divisor...
LegalizeAction getIndexedStoreAction(unsigned IdxMode, MVT VT) const
Return how the indexed store should be treated: either it is legal, needs to be promoted to a larger ...
bool paramHasAttr(unsigned i, Attribute::AttrKind A) const
Return true if the call or the callee has the given attribute.
Definition: CallSite.h:242
void setExceptionPointerRegister(unsigned R)
If set to a physical register, this sets the register that receives the exception address on entry to...
virtual bool isVectorClearMaskLegal(const SmallVectorImpl< int > &, EVT) const
Similar to isShuffleMaskLegal.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:179
virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:222
Provides information about what library functions are available for the current target.
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
virtual Value * emitStoreConditional(IRBuilder<> &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const
Perform a store-conditional operation to Addr.
TargetLoweringOpt(SelectionDAG &InDAG, bool LT, bool LO)
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
void setHasMultipleConditionRegisters(bool hasManyRegs=true)
Tells the code generator that the target has multiple (allocatable) condition registers that can be u...
CallLoweringInfo & setSExtResult(bool Value=true)
virtual const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase, but as an MCExpr.
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset...
Represents one node in the SelectionDAG.
virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
void clearRegisterClasses()
Remove all register classes.
bool hasExtractBitsInsn() const
Return true if the target has BitExtract instructions.
static cl::opt< AlignMode > Align(cl::desc("Load/store alignment support"), cl::Hidden, cl::init(NoStrictAlign), cl::values(clEnumValN(StrictAlign,"aarch64-strict-align","Disallow all unaligned memory accesses"), clEnumValN(NoStrictAlign,"aarch64-no-strict-align","Allow unaligned memory accesses"), clEnumValEnd))
virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const SelectionDAG &DAG, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits ...
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
virtual bool isProfitableToHoist(Instruction *I) const
VectorType - Class to represent vector types.
Definition: DerivedTypes.h:362
void setMinimumJumpTableEntries(int Val)
Indicate the number of blocks to generate jump tables rather than if sequence.
virtual const char * getClearCacheBuiltinName() const
Return the builtin name for the __builtin___clear_cache intrinsic Default is to invoke the clear cach...
void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
Target - Wrapper for Target specific information.
Class for arbitrary precision integers.
Definition: APInt.h:73
virtual bool IsDesirableToPromoteOp(SDValue, EVT &) const
This method query the target whether it is beneficial for dag combiner to promote the specified node...
void setExceptionSelectorRegister(unsigned R)
If set to a physical register, this sets the register that receives the exception typeid on entry to ...
void setMinFunctionAlignment(unsigned Align)
Set the target's minimum function alignment (in log2(bytes))
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
void setPrefLoopAlignment(unsigned Align)
Set the target's preferred loop alignment.
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:383
AddrMode
ARM Addressing Modes.
Definition: ARMBaseInfo.h:235
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:386
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(EVT VT) const
Return the preferred vector type legalization action.
virtual bool shouldExpandAtomicLoadInIR(LoadInst *LI) const
Returns true if the given (atomic) load should be expanded by the IR-level AtomicExpand pass into a l...
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
virtual MVT::SimpleValueType getCmpLibcallReturnType() const
Return the ValueType for comparison libcalls.
CallLoweringInfo & setTailCall(bool Value=true)
MVT getRegisterType(LLVMContext &Context, EVT VT) const
Return the type of registers that this ValueType will eventually require.
Representation of each machine instruction.
Definition: MachineInstr.h:51
SmallVector< SDValue, 32 > OutVals
bool isValid() const
isValid - Return true if this is a valid simple valuetype.
virtual bool lowerInterleavedLoad(LoadInst *LI, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor) const
Lower an interleaved load to target specific intrinsics.
LegalizeAction getIndexedLoadAction(unsigned IdxMode, MVT VT) const
Return how the indexed load should be treated: either it is legal, needs to be promoted to a larger s...
virtual bool isVectorShiftByScalarCheap(Type *Ty) const
Return true if it's significantly cheaper to shift a vector by a uniform scalar than by an amount whi...
SelectSupportKind
Enum that describes what type of support for selects the target has.
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
unsigned getJumpBufAlignment() const
Returns the target's jmp_buf alignment in bytes (if never set, the default is 0)
void setTypeAction(MVT VT, LegalizeTypeAction Action)
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AddrSpace) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
void * PointerTy
Definition: GenericValue.h:23
bool isPow2SDivCheap() const
Return true if pow2 sdiv is cheaper than a chain of sra/srl/add/sra.
virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const
EVT is not used in-tree, but is used by out-of-tree target.
unsigned getMinFunctionAlignment() const
Return the minimum function alignment.
unsigned getMaxStoresPerMemset(bool OptSize) const
Get maximum # of store operations permitted for llvm.memset.
ImmutableCallSite - establish a view to a call site for examination.
Definition: CallSite.h:418
unsigned getSizeInBits() const
getSizeInBits - Return the size of the specified value type in bits.
Definition: ValueTypes.h:233
CallingConv::ID getCallingConv() const
getCallingConv/setCallingConv - get or set the calling convention of the call.
Definition: CallSite.h:212
#define I(x, y, z)
Definition: MD5.cpp:54
#define N
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Definition: DataLayout.h:329
unsigned MaxStoresPerMemmoveOptSize
Maximum number of store instructions that may be substituted for a call to memmove, used for functions with OpSize attribute.
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
FenceInst * CreateFence(AtomicOrdering Ordering, SynchronizationScope SynchScope=CrossThread, const Twine &Name="")
Definition: IRBuilder.h:1014
virtual const MCExpr * LowerCustomJumpTableEntry(const MachineJumpTableInfo *, const MachineBasicBlock *, unsigned, MCContext &) const
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
virtual SDValue getRsqrtEstimate(SDValue Operand, DAGCombinerInfo &DCI, unsigned &RefinementSteps, bool &UseOneConstNR) const
Hooks for building estimates in place of slower divisions and square roots.
virtual bool isExtractSubvectorCheap(EVT ResVT, unsigned Index) const
Return true if EXTRACT_SUBVECTOR is cheap for this result type with this index.
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
getEVT - Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:277
bool isAtLeastRelease(AtomicOrdering Ord)
Returns true if the ordering is at least as strong as release (i.e.
Definition: Instructions.h:64
EVT getValueType() const
Return the ValueType of the referenced return value.
CallLoweringInfo & setCallee(Type *ResultType, FunctionType *FTy, SDValue Target, ArgListTy &&ArgsList, ImmutableCallSite &Call)
bool isVarArg() const
Definition: DerivedTypes.h:120
bool isSelectExpensive() const
Return true if the select operation is expensive for this target.
bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const
Definition: DenseMap.h:79
virtual bool ExpandInlineAsm(CallInst *) const
This hook allows the target to expand an inline asm call to be explicit llvm code if it wants to...
bool isFloatingPoint() const
isFloatingPoint - Return true if this is a FP, or a vector FP type.
Definition: ValueTypes.h:105
bool use_empty() const
Definition: Value.h:275
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
bool isFloat(MCInstrInfo const &MCII, MCInst const &MCI)
Return whether it is a floating-point insn.
void setIntDivIsCheap(bool isCheap=true)
Tells the code generator that integer divide is expensive, and if possible, should be replaced by an ...
bool isSimple() const
isSimple - Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:94
void setMinStackArgumentAlignment(unsigned Align)
Set the minimum stack alignment of an argument (in log2(bytes)).
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
CallLoweringInfo & setInRegister(bool Value=true)
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
LLVM Value Representation.
Definition: Value.h:69
void setUseUnderscoreSetJmp(bool Val)
Indicate whether this target prefers to use _setjmp to implement llvm.setjmp or the version without _...
unsigned getOpcode() const
getOpcode() returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:112
void setInsertFencesForAtomic(bool fence)
Set if the DAG builder should automatically insert fences and reduce the order of atomic memory opera...
virtual bool isZExtFree(SDValue Val, EVT VT2) const
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
bool hasTargetDAGCombine(ISD::NodeType NT) const
If true, the target has custom DAG combine transformations that it can perform for the specified node...
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
virtual ConstraintWeight getMultipleConstraintMatchWeight(AsmOperandInfo &info, int maIndex) const
Examine constraint type and operand type and determine a weight value.
virtual uint8_t getRepRegClassCostFor(MVT VT) const
Return the cost of the 'representative' register class for the specified value type.
virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, unsigned &Cost) const
Return true if the target can combine store(extractelement VectorTy, Idx).
Primary interface to the complete machine description for the target machine.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:40
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
Return true if folding a constant offset with the given GlobalAddress is legal.
unsigned getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
unsigned getExceptionSelectorRegister() const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
MVT ConstraintVT
The ValueType for the operand value.
Conversion operators.
Definition: ISDOpcodes.h:380
BooleanContent
Enum that describes how the target represents true/false values.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal on this target.
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
bool isBigEndian() const
Definition: DataLayout.h:218
bool isJumpExpensive() const
Return true if Flow Control is an expensive operation that should be avoided.
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
This callback is invoked for operations that are unsupported by the target, which are registered to u...
virtual bool isFNegFree(EVT VT) const
Return true if an fneg operation is free to the point where it is never worthwhile to replace it with...
std::pair< unsigned, MVT > getTypeLegalizationCost(const DataLayout &DL, Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
virtual bool hasPairedLoad(EVT, unsigned &) const
virtual bool shouldAlignPointerArgs(CallInst *, unsigned &, unsigned &) const
Return true if the pointer arguments to CI should be aligned by aligning the object whose address is ...
void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, ImmutableCallSite CS) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
LegalizeAction getCondCodeAction(ISD::CondCode CC, MVT VT) const
Return how the condition code should be treated: either it is legal, needs to be expanded to some oth...
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
MVT getSimpleVT() const
getSimpleVT - Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:203
IntrinsicInst - A useful wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:37
void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO)
virtual bool isCheapToSpeculateCtlz() const
Return true if it is cheap to speculate a call to intrinsic ctlz.
void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
MVT getTypeToPromoteTo(unsigned Op, MVT VT) const
If the action for this operation is to promote, this method returns the ValueType to promote to...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:761
unsigned getVectorNumElements() const
getVectorNumElements - Given a vector type, return the number of elements it contains.
Definition: ValueTypes.h:225
void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode, SDLoc DL) const
SoftenSetCCOperands - Soften the operands of a comparison.