LLVM  4.0.0
TargetLowering.h
Go to the documentation of this file.
1 //===-- llvm/Target/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 ///
10 /// \file
11 /// This file describes how to lower LLVM code to machine code. This has two
12 /// main components:
13 ///
14 /// 1. Which ValueTypes are natively supported by the target.
15 /// 2. Which operations are supported for supported ValueTypes.
16 /// 3. Cost thresholds for alternative implementations of certain operations.
17 ///
18 /// In addition it has a few other components, like information about FP
19 /// immediates.
20 ///
21 //===----------------------------------------------------------------------===//
22 
23 #ifndef LLVM_TARGET_TARGETLOWERING_H
24 #define LLVM_TARGET_TARGETLOWERING_H
25 
26 #include "llvm/ADT/ArrayRef.h"
27 #include "llvm/ADT/DenseMap.h"
28 #include "llvm/ADT/SmallVector.h"
29 #include "llvm/ADT/STLExtras.h"
30 #include "llvm/ADT/StringRef.h"
37 #include "llvm/IR/Attributes.h"
38 #include "llvm/IR/CallSite.h"
39 #include "llvm/IR/CallingConv.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/IRBuilder.h"
43 #include "llvm/IR/InlineAsm.h"
44 #include "llvm/IR/Instruction.h"
45 #include "llvm/IR/Instructions.h"
46 #include "llvm/IR/Type.h"
47 #include "llvm/MC/MCRegisterInfo.h"
49 #include "llvm/Support/Casting.h"
53 #include <algorithm>
54 #include <cassert>
55 #include <climits>
56 #include <cstdint>
57 #include <iterator>
58 #include <map>
59 #include <string>
60 #include <utility>
61 #include <vector>
62 
63 namespace llvm {
64 
65 class BranchProbability;
66 class CCState;
67 class CCValAssign;
68 class FastISel;
69 class FunctionLoweringInfo;
70 class IntrinsicInst;
71 class MachineBasicBlock;
72 class MachineFunction;
73 class MachineInstr;
74 class MachineJumpTableInfo;
75 class MachineLoop;
76 class MachineRegisterInfo;
77 class MCContext;
78 class MCExpr;
79 class TargetRegisterClass;
80 class TargetLibraryInfo;
81 class TargetRegisterInfo;
82 class Value;
83 
84 namespace Sched {
85 
86  enum Preference {
87  None, // No preference
88  Source, // Follow source order.
89  RegPressure, // Scheduling for lowest register pressure.
90  Hybrid, // Scheduling for both latency and register pressure.
91  ILP, // Scheduling for ILP in low register pressure mode.
92  VLIW // Scheduling for VLIW targets.
93  };
94 
95 } // end namespace Sched
96 
97 /// This base class for TargetLowering contains the SelectionDAG-independent
98 /// parts that can be used from the rest of CodeGen.
100 public:
101  /// This enum indicates whether operations are valid for a target, and if not,
102  /// what action should be used to make them valid.
103  enum LegalizeAction : uint8_t {
104  Legal, // The target natively supports this operation.
105  Promote, // This operation should be executed in a larger type.
106  Expand, // Try to expand this to other ops, otherwise use a libcall.
107  LibCall, // Don't try to expand this to other ops, always use a libcall.
108  Custom // Use the LowerOperation hook to implement custom lowering.
109  };
110 
111  /// This enum indicates whether a types are legal for a target, and if not,
112  /// what action should be used to make them valid.
113  enum LegalizeTypeAction : uint8_t {
114  TypeLegal, // The target natively supports this type.
115  TypePromoteInteger, // Replace this integer with a larger one.
116  TypeExpandInteger, // Split this integer into two of half the size.
117  TypeSoftenFloat, // Convert this float to a same size integer type,
118  // if an operation is not supported in target HW.
119  TypeExpandFloat, // Split this float into two of half the size.
120  TypeScalarizeVector, // Replace this one-element vector with its element.
121  TypeSplitVector, // Split this vector into two of half the size.
122  TypeWidenVector, // This vector should be widened into a larger vector.
123  TypePromoteFloat // Replace this float with a larger one.
124  };
125 
126  /// LegalizeKind holds the legalization kind that needs to happen to EVT
127  /// in order to type-legalize it.
128  typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind;
129 
130  /// Enum that describes how the target represents true/false values.
132  UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage.
133  ZeroOrOneBooleanContent, // All bits zero except for bit 0.
134  ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
135  };
136 
137  /// Enum that describes what type of support for selects the target has.
139  ScalarValSelect, // The target supports scalar selects (ex: cmov).
140  ScalarCondVectorVal, // The target supports selects with a scalar condition
141  // and vector values (ex: cmov).
142  VectorMaskSelect // The target supports vector selects with a vector
143  // mask (ex: x86 blends).
144  };
145 
146  /// Enum that specifies what an atomic load/AtomicRMWInst is expanded
147  /// to, if at all. Exists because different targets have different levels of
148  /// support for these atomic instructions, and also have different options
149  /// w.r.t. what they should expand to.
150  enum class AtomicExpansionKind {
151  None, // Don't expand the instruction.
152  LLSC, // Expand the instruction into loadlinked/storeconditional; used
153  // by ARM/AArch64.
154  LLOnly, // Expand the (load) instruction into just a load-linked, which has
155  // greater atomic guarantees than a normal load.
156  CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
157  };
158 
159  /// Enum that specifies when a multiplication should be expanded.
160  enum class MulExpansionKind {
161  Always, // Always expand the instruction.
162  OnlyLegalOrCustom, // Only expand when the resulting instructions are legal
163  // or custom.
164  };
165 
167  switch (Content) {
169  // Extend by adding rubbish bits.
170  return ISD::ANY_EXTEND;
172  // Extend by adding zero bits.
173  return ISD::ZERO_EXTEND;
175  // Extend by copying the sign bit.
176  return ISD::SIGN_EXTEND;
177  }
178  llvm_unreachable("Invalid content kind");
179  }
180 
181  /// NOTE: The TargetMachine owns TLOF.
182  explicit TargetLoweringBase(const TargetMachine &TM);
183  TargetLoweringBase(const TargetLoweringBase&) = delete;
184  void operator=(const TargetLoweringBase&) = delete;
185  virtual ~TargetLoweringBase() = default;
186 
187 protected:
188  /// \brief Initialize all of the actions to default values.
189  void initActions();
190 
191 public:
192  const TargetMachine &getTargetMachine() const { return TM; }
193 
194  virtual bool useSoftFloat() const { return false; }
195 
196  /// Return the pointer type for the given address space, defaults to
197  /// the pointer type from the data layout.
198  /// FIXME: The default needs to be removed once all the code is updated.
199  MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const {
201  }
202 
203  /// EVT is not used in-tree, but is used by out-of-tree target.
204  /// A documentation for this function would be nice...
205  virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const;
206 
207  EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const;
208 
209  /// Returns the type to be used for the index operand of:
210  /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
211  /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
212  virtual MVT getVectorIdxTy(const DataLayout &DL) const {
213  return getPointerTy(DL);
214  }
215 
216  virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
217  return true;
218  }
219 
220  /// Return true if multiple condition registers are available.
222  return HasMultipleConditionRegisters;
223  }
224 
225  /// Return true if the target has BitExtract instructions.
226  bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
227 
228  /// Return the preferred vector type legalization action.
231  // The default action for one element vectors is to scalarize
232  if (VT.getVectorNumElements() == 1)
233  return TypeScalarizeVector;
234  // The default action for other vectors is to promote
235  return TypePromoteInteger;
236  }
237 
238  // There are two general methods for expanding a BUILD_VECTOR node:
239  // 1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
240  // them together.
241  // 2. Build the vector on the stack and then load it.
242  // If this function returns true, then method (1) will be used, subject to
243  // the constraint that all of the necessary shuffles are legal (as determined
244  // by isShuffleMaskLegal). If this function returns false, then method (2) is
245  // always used. The vector type, and the number of defined values, are
246  // provided.
247  virtual bool
249  unsigned DefinedValues) const {
250  return DefinedValues < 3;
251  }
252 
253  /// Return true if integer divide is usually cheaper than a sequence of
254  /// several shifts, adds, and multiplies for this target.
255  /// The definition of "cheaper" may depend on whether we're optimizing
256  /// for speed or for size.
257  virtual bool isIntDivCheap(EVT VT, AttributeSet Attr) const {
258  return false;
259  }
260 
261  /// Return true if the target can handle a standalone remainder operation.
262  virtual bool hasStandaloneRem(EVT VT) const {
263  return true;
264  }
265 
266  /// Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
267  virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const {
268  // Default behavior is to replace SQRT(X) with X*RSQRT(X).
269  return false;
270  }
271 
272  /// Reciprocal estimate status values used by the functions below.
273  enum ReciprocalEstimate : int {
275  Disabled = 0,
277  };
278 
279  /// Return a ReciprocalEstimate enum value for a square root of the given type
280  /// based on the function's attributes. If the operation is not overridden by
281  /// the function's attributes, "Unspecified" is returned and target defaults
282  /// are expected to be used for instruction selection.
284 
285  /// Return a ReciprocalEstimate enum value for a division of the given type
286  /// based on the function's attributes. If the operation is not overridden by
287  /// the function's attributes, "Unspecified" is returned and target defaults
288  /// are expected to be used for instruction selection.
290 
291  /// Return the refinement step count for a square root of the given type based
292  /// on the function's attributes. If the operation is not overridden by
293  /// the function's attributes, "Unspecified" is returned and target defaults
294  /// are expected to be used for instruction selection.
295  int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const;
296 
297  /// Return the refinement step count for a division of the given type based
298  /// on the function's attributes. If the operation is not overridden by
299  /// the function's attributes, "Unspecified" is returned and target defaults
300  /// are expected to be used for instruction selection.
301  int getDivRefinementSteps(EVT VT, MachineFunction &MF) const;
302 
303  /// Returns true if target has indicated at least one type should be bypassed.
304  bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
305 
306  /// Returns map of slow types for division or remainder with corresponding
307  /// fast types
309  return BypassSlowDivWidths;
310  }
311 
312  /// Return true if Flow Control is an expensive operation that should be
313  /// avoided.
314  bool isJumpExpensive() const { return JumpIsExpensive; }
315 
316  /// Return true if selects are only cheaper than branches if the branch is
317  /// unlikely to be predicted right.
320  }
321 
322  /// If a branch or a select condition is skewed in one direction by more than
323  /// this factor, it is very likely to be predicted correctly.
325 
326  /// Return true if the following transform is beneficial:
327  /// fold (conv (load x)) -> (load (conv*)x)
328  /// On architectures that don't natively support some vector loads
329  /// efficiently, casting the load to a smaller vector of larger types and
330  /// loading is more efficient, however, this can be undone by optimizations in
331  /// dag combiner.
332  virtual bool isLoadBitCastBeneficial(EVT LoadVT,
333  EVT BitcastVT) const {
334  // Don't do if we could do an indexed load on the original type, but not on
335  // the new one.
336  if (!LoadVT.isSimple() || !BitcastVT.isSimple())
337  return true;
338 
339  MVT LoadMVT = LoadVT.getSimpleVT();
340 
341  // Don't bother doing this if it's just going to be promoted again later, as
342  // doing so might interfere with other combines.
343  if (getOperationAction(ISD::LOAD, LoadMVT) == Promote &&
344  getTypeToPromoteTo(ISD::LOAD, LoadMVT) == BitcastVT.getSimpleVT())
345  return false;
346 
347  return true;
348  }
349 
350  /// Return true if the following transform is beneficial:
351  /// (store (y (conv x)), y*)) -> (store x, (x*))
352  virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT) const {
353  // Default to the same logic as loads.
354  return isLoadBitCastBeneficial(StoreVT, BitcastVT);
355  }
356 
357  /// Return true if it is expected to be cheaper to do a store of a non-zero
358  /// vector constant with the given size and type for the address space than to
359  /// store the individual scalar element constants.
360  virtual bool storeOfVectorConstantIsCheap(EVT MemVT,
361  unsigned NumElem,
362  unsigned AddrSpace) const {
363  return false;
364  }
365 
366  /// \brief Return true if it is cheap to speculate a call to intrinsic cttz.
367  virtual bool isCheapToSpeculateCttz() const {
368  return false;
369  }
370 
371  /// \brief Return true if it is cheap to speculate a call to intrinsic ctlz.
372  virtual bool isCheapToSpeculateCtlz() const {
373  return false;
374  }
375 
376  /// \brief Return true if ctlz instruction is fast.
377  virtual bool isCtlzFast() const {
378  return false;
379  }
380 
381  /// Return true if it is safe to transform an integer-domain bitwise operation
382  /// into the equivalent floating-point operation. This should be set to true
383  /// if the target has IEEE-754-compliant fabs/fneg operations for the input
384  /// type.
385  virtual bool hasBitPreservingFPLogic(EVT VT) const {
386  return false;
387  }
388 
389  /// \brief Return true if it is cheaper to split the store of a merged int val
390  /// from a pair of smaller values into multiple stores.
391  virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const {
392  return false;
393  }
394 
395  /// \brief Return if the target supports combining a
396  /// chain like:
397  /// \code
398  /// %andResult = and %val1, #imm-with-one-bit-set;
399  /// %icmpResult = icmp %andResult, 0
400  /// br i1 %icmpResult, label %dest1, label %dest2
401  /// \endcode
402  /// into a single machine instruction of a form like:
403  /// \code
404  /// brOnBitSet %register, #bitNumber, dest
405  /// \endcode
408  }
409 
410  /// Return true if the target should transform:
411  /// (X & Y) == Y ---> (~X & Y) == 0
412  /// (X & Y) != Y ---> (~X & Y) != 0
413  ///
414  /// This may be profitable if the target has a bitwise and-not operation that
415  /// sets comparison flags. A target may want to limit the transformation based
416  /// on the type of Y or if Y is a constant.
417  ///
418  /// Note that the transform will not occur if Y is known to be a power-of-2
419  /// because a mask and compare of a single bit can be handled by inverting the
420  /// predicate, for example:
421  /// (X & 8) == 8 ---> (X & 8) != 0
422  virtual bool hasAndNotCompare(SDValue Y) const {
423  return false;
424  }
425 
426  /// Return true if the target has a bitwise and-not operation:
427  /// X = ~A & B
428  /// This can be used to simplify select or other instructions.
429  virtual bool hasAndNot(SDValue X) const {
430  // If the target has the more complex version of this operation, assume that
431  // it has this operation too.
432  return hasAndNotCompare(X);
433  }
434 
435  /// \brief Return true if the target wants to use the optimization that
436  /// turns ext(promotableInst1(...(promotableInstN(load)))) into
437  /// promotedInst1(...(promotedInstN(ext(load)))).
439 
440  /// Return true if the target can combine store(extractelement VectorTy,
441  /// Idx).
442  /// \p Cost[out] gives the cost of that transformation when this is true.
443  virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
444  unsigned &Cost) const {
445  return false;
446  }
447 
448  /// Return true if target supports floating point exceptions.
450  return HasFloatingPointExceptions;
451  }
452 
453  /// Return true if target always beneficiates from combining into FMA for a
454  /// given value type. This must typically return false on targets where FMA
455  /// takes more cycles to execute than FADD.
456  virtual bool enableAggressiveFMAFusion(EVT VT) const {
457  return false;
458  }
459 
460  /// Return the ValueType of the result of SETCC operations.
462  EVT VT) const;
463 
464  /// Return the ValueType for comparison libcalls. Comparions libcalls include
465  /// floating point comparion calls, and Ordered/Unordered check calls on
466  /// floating point numbers.
467  virtual
469 
470  /// For targets without i1 registers, this gives the nature of the high-bits
471  /// of boolean values held in types wider than i1.
472  ///
473  /// "Boolean values" are special true/false values produced by nodes like
474  /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
475  /// Not to be confused with general values promoted from i1. Some cpus
476  /// distinguish between vectors of boolean and scalars; the isVec parameter
477  /// selects between the two kinds. For example on X86 a scalar boolean should
478  /// be zero extended from i1, while the elements of a vector of booleans
479  /// should be sign extended from i1.
480  ///
481  /// Some cpus also treat floating point types the same way as they treat
482  /// vectors instead of the way they treat scalars.
483  BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
484  if (isVec)
485  return BooleanVectorContents;
486  return isFloat ? BooleanFloatContents : BooleanContents;
487  }
488 
490  return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
491  }
492 
493  /// Return target scheduling preference.
495  return SchedPreferenceInfo;
496  }
497 
498  /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
499  /// for different nodes. This function returns the preference (or none) for
500  /// the given node.
502  return Sched::None;
503  }
504 
505  /// Return the register class that should be used for the specified value
506  /// type.
507  virtual const TargetRegisterClass *getRegClassFor(MVT VT) const {
508  const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
509  assert(RC && "This value type is not natively supported!");
510  return RC;
511  }
512 
513  /// Return the 'representative' register class for the specified value
514  /// type.
515  ///
516  /// The 'representative' register class is the largest legal super-reg
517  /// register class for the register class of the value type. For example, on
518  /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
519  /// register class is GR64 on x86_64.
520  virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
521  const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
522  return RC;
523  }
524 
525  /// Return the cost of the 'representative' register class for the specified
526  /// value type.
527  virtual uint8_t getRepRegClassCostFor(MVT VT) const {
528  return RepRegClassCostForVT[VT.SimpleTy];
529  }
530 
531  /// Return true if the target has native support for the specified value type.
532  /// This means that it has a register that directly holds it without
533  /// promotions or expansions.
534  bool isTypeLegal(EVT VT) const {
535  assert(!VT.isSimple() ||
536  (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
537  return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
538  }
539 
541  /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
542  /// that indicates how instruction selection should deal with the type.
543  LegalizeTypeAction ValueTypeActions[MVT::LAST_VALUETYPE];
544 
545  public:
547  std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions),
548  TypeLegal);
549  }
550 
552  return ValueTypeActions[VT.SimpleTy];
553  }
554 
556  ValueTypeActions[VT.SimpleTy] = Action;
557  }
558  };
559 
561  return ValueTypeActions;
562  }
563 
564  /// Return how we should legalize values of this type, either it is already
565  /// legal (return 'Legal') or we need to promote it to a larger type (return
566  /// 'Promote'), or we need to expand it into multiple registers of smaller
567  /// integer type (return 'Expand'). 'Custom' is not an option.
569  return getTypeConversion(Context, VT).first;
570  }
572  return ValueTypeActions.getTypeAction(VT);
573  }
574 
575  /// For types supported by the target, this is an identity function. For
576  /// types that must be promoted to larger types, this returns the larger type
577  /// to promote to. For integer types that are larger than the largest integer
578  /// register, this contains one step in the expansion to get to the smaller
579  /// register. For illegal floating point types, this returns the integer type
580  /// to transform to.
582  return getTypeConversion(Context, VT).second;
583  }
584 
585  /// For types supported by the target, this is an identity function. For
586  /// types that must be expanded (i.e. integer types that are larger than the
587  /// largest integer register or illegal floating point types), this returns
588  /// the largest legal type it will be expanded to.
590  assert(!VT.isVector());
591  while (true) {
592  switch (getTypeAction(Context, VT)) {
593  case TypeLegal:
594  return VT;
595  case TypeExpandInteger:
596  VT = getTypeToTransformTo(Context, VT);
597  break;
598  default:
599  llvm_unreachable("Type is not legal nor is it to be expanded!");
600  }
601  }
602  }
603 
604  /// Vector types are broken down into some number of legal first class types.
605  /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
606  /// promoted EVT::f64 values with the X86 FP stack. Similarly, EVT::v2i64
607  /// turns into 4 EVT::i32 values with both PPC and X86.
608  ///
609  /// This method returns the number of registers needed, and the VT for each
610  /// register. It also returns the VT and quantity of the intermediate values
611  /// before they are promoted/expanded.
613  EVT &IntermediateVT,
614  unsigned &NumIntermediates,
615  MVT &RegisterVT) const;
616 
617  struct IntrinsicInfo {
618  unsigned opc = 0; // target opcode
619  EVT memVT; // memory VT
620  const Value* ptrVal = nullptr; // value representing memory location
621  int offset = 0; // offset off of ptrVal
622  unsigned size = 0; // the size of the memory location
623  // (taken from memVT if zero)
624  unsigned align = 1; // alignment
625  bool vol = false; // is volatile?
626  bool readMem = false; // reads memory?
627  bool writeMem = false; // writes memory?
628 
629  IntrinsicInfo() = default;
630  };
631 
632  /// Given an intrinsic, checks if on the target the intrinsic will need to map
633  /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
634  /// true and store the intrinsic information into the IntrinsicInfo that was
635  /// passed to the function.
636  virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
637  unsigned /*Intrinsic*/) const {
638  return false;
639  }
640 
641  /// Returns true if the target can instruction select the specified FP
642  /// immediate natively. If false, the legalizer will materialize the FP
643  /// immediate as a load from a constant pool.
644  virtual bool isFPImmLegal(const APFloat &/*Imm*/, EVT /*VT*/) const {
645  return false;
646  }
647 
648  /// Targets can use this to indicate that they only support *some*
649  /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a
650  /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
651  /// legal.
652  virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
653  EVT /*VT*/) const {
654  return true;
655  }
656 
657  /// Returns true if the operation can trap for the value type.
658  ///
659  /// VT must be a legal type. By default, we optimistically assume most
660  /// operations don't trap except for integer divide and remainder.
661  virtual bool canOpTrap(unsigned Op, EVT VT) const;
662 
663  /// Similar to isShuffleMaskLegal. This is used by Targets can use this to
664  /// indicate if there is a suitable VECTOR_SHUFFLE that can be used to replace
665  /// a VAND with a constant pool entry.
666  virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
667  EVT /*VT*/) const {
668  return false;
669  }
670 
671  /// Return how this operation should be treated: either it is legal, needs to
672  /// be promoted to a larger size, needs to be expanded to some other code
673  /// sequence, or the target has a custom expander for it.
674  LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
675  if (VT.isExtended()) return Expand;
676  // If a target-specific SDNode requires legalization, require the target
677  // to provide custom legalization for it.
678  if (Op > array_lengthof(OpActions[0])) return Custom;
679  return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op];
680  }
681 
682  /// Return true if the specified operation is legal on this target or can be
683  /// made legal with custom lowering. This is used to help guide high-level
684  /// lowering decisions.
685  bool isOperationLegalOrCustom(unsigned Op, EVT VT) const {
686  return (VT == MVT::Other || isTypeLegal(VT)) &&
687  (getOperationAction(Op, VT) == Legal ||
688  getOperationAction(Op, VT) == Custom);
689  }
690 
691  /// Return true if the specified operation is legal on this target or can be
692  /// made legal using promotion. This is used to help guide high-level lowering
693  /// decisions.
694  bool isOperationLegalOrPromote(unsigned Op, EVT VT) const {
695  return (VT == MVT::Other || isTypeLegal(VT)) &&
696  (getOperationAction(Op, VT) == Legal ||
697  getOperationAction(Op, VT) == Promote);
698  }
699 
700  /// Return true if the specified operation is legal on this target or can be
701  /// made legal with custom lowering or using promotion. This is used to help
702  /// guide high-level lowering decisions.
703  bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT) const {
704  return (VT == MVT::Other || isTypeLegal(VT)) &&
705  (getOperationAction(Op, VT) == Legal ||
706  getOperationAction(Op, VT) == Custom ||
707  getOperationAction(Op, VT) == Promote);
708  }
709 
710  /// Return true if the specified operation is illegal but has a custom lowering
711  /// on that type. This is used to help guide high-level lowering
712  /// decisions.
713  bool isOperationCustom(unsigned Op, EVT VT) const {
714  return (!isTypeLegal(VT) && getOperationAction(Op, VT) == Custom);
715  }
716 
717  /// Return true if the specified operation is illegal on this target or
718  /// unlikely to be made legal with custom lowering. This is used to help guide
719  /// high-level lowering decisions.
720  bool isOperationExpand(unsigned Op, EVT VT) const {
721  return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
722  }
723 
724  /// Return true if the specified operation is legal on this target.
725  bool isOperationLegal(unsigned Op, EVT VT) const {
726  return (VT == MVT::Other || isTypeLegal(VT)) &&
727  getOperationAction(Op, VT) == Legal;
728  }
729 
730  /// Return how this load with extension should be treated: either it is legal,
731  /// needs to be promoted to a larger size, needs to be expanded to some other
732  /// code sequence, or the target has a custom expander for it.
733  LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT,
734  EVT MemVT) const {
735  if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
736  unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
737  unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
738  assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::LAST_VALUETYPE &&
739  MemI < MVT::LAST_VALUETYPE && "Table isn't big enough!");
740  unsigned Shift = 4 * ExtType;
741  return (LegalizeAction)((LoadExtActions[ValI][MemI] >> Shift) & 0xf);
742  }
743 
744  /// Return true if the specified load with extension is legal on this target.
745  bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
746  return getLoadExtAction(ExtType, ValVT, MemVT) == Legal;
747  }
748 
749  /// Return true if the specified load with extension is legal or custom
750  /// on this target.
751  bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const {
752  return getLoadExtAction(ExtType, ValVT, MemVT) == Legal ||
753  getLoadExtAction(ExtType, ValVT, MemVT) == Custom;
754  }
755 
756  /// Return how this store with truncation should be treated: either it is
757  /// legal, needs to be promoted to a larger size, needs to be expanded to some
758  /// other code sequence, or the target has a custom expander for it.
760  if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
761  unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
762  unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
764  "Table isn't big enough!");
765  return TruncStoreActions[ValI][MemI];
766  }
767 
768  /// Return true if the specified store with truncation is legal on this
769  /// target.
770  bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
771  return isTypeLegal(ValVT) && getTruncStoreAction(ValVT, MemVT) == Legal;
772  }
773 
774  /// Return true if the specified store with truncation has solution on this
775  /// target.
776  bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT) const {
777  return isTypeLegal(ValVT) &&
778  (getTruncStoreAction(ValVT, MemVT) == Legal ||
779  getTruncStoreAction(ValVT, MemVT) == Custom);
780  }
781 
782  /// Return how the indexed load should be treated: either it is legal, needs
783  /// to be promoted to a larger size, needs to be expanded to some other code
784  /// sequence, or the target has a custom expander for it.
786  getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
787  assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
788  "Table isn't big enough!");
789  unsigned Ty = (unsigned)VT.SimpleTy;
790  return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4);
791  }
792 
793  /// Return true if the specified indexed load is legal on this target.
794  bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
795  return VT.isSimple() &&
796  (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
797  getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
798  }
799 
800  /// Return how the indexed store should be treated: either it is legal, needs
801  /// to be promoted to a larger size, needs to be expanded to some other code
802  /// sequence, or the target has a custom expander for it.
804  getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
805  assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
806  "Table isn't big enough!");
807  unsigned Ty = (unsigned)VT.SimpleTy;
808  return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f);
809  }
810 
811  /// Return true if the specified indexed load is legal on this target.
812  bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
813  return VT.isSimple() &&
814  (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
815  getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
816  }
817 
818  /// Return how the condition code should be treated: either it is legal, needs
819  /// to be expanded to some other code sequence, or the target has a custom
820  /// expander for it.
823  assert((unsigned)CC < array_lengthof(CondCodeActions) &&
824  ((unsigned)VT.SimpleTy >> 3) < array_lengthof(CondCodeActions[0]) &&
825  "Table isn't big enough!");
826  // See setCondCodeAction for how this is encoded.
827  uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
828  uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 3];
829  LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0xF);
830  assert(Action != Promote && "Can't promote condition code!");
831  return Action;
832  }
833 
834  /// Return true if the specified condition code is legal on this target.
835  bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const {
836  return
837  getCondCodeAction(CC, VT) == Legal ||
838  getCondCodeAction(CC, VT) == Custom;
839  }
840 
841  /// If the action for this operation is to promote, this method returns the
842  /// ValueType to promote to.
843  MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
844  assert(getOperationAction(Op, VT) == Promote &&
845  "This operation isn't promoted!");
846 
847  // See if this has an explicit type specified.
848  std::map<std::pair<unsigned, MVT::SimpleValueType>,
849  MVT::SimpleValueType>::const_iterator PTTI =
850  PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
851  if (PTTI != PromoteToType.end()) return PTTI->second;
852 
853  assert((VT.isInteger() || VT.isFloatingPoint()) &&
854  "Cannot autopromote this type, add it with AddPromotedToType.");
855 
856  MVT NVT = VT;
857  do {
858  NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
859  assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
860  "Didn't find type to promote to!");
861  } while (!isTypeLegal(NVT) ||
862  getOperationAction(Op, NVT) == Promote);
863  return NVT;
864  }
865 
866  /// Return the EVT corresponding to this LLVM type. This is fixed by the LLVM
867  /// operations except for the pointer size. If AllowUnknown is true, this
868  /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
869  /// otherwise it will assert.
870  EVT getValueType(const DataLayout &DL, Type *Ty,
871  bool AllowUnknown = false) const {
872  // Lower scalar pointers to native pointer types.
873  if (PointerType *PTy = dyn_cast<PointerType>(Ty))
874  return getPointerTy(DL, PTy->getAddressSpace());
875 
876  if (Ty->isVectorTy()) {
877  VectorType *VTy = cast<VectorType>(Ty);
878  Type *Elm = VTy->getElementType();
879  // Lower vectors of pointers to native pointer types.
880  if (PointerType *PT = dyn_cast<PointerType>(Elm)) {
881  EVT PointerTy(getPointerTy(DL, PT->getAddressSpace()));
882  Elm = PointerTy.getTypeForEVT(Ty->getContext());
883  }
884 
885  return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false),
886  VTy->getNumElements());
887  }
888  return EVT::getEVT(Ty, AllowUnknown);
889  }
890 
891  /// Return the MVT corresponding to this LLVM type. See getValueType.
893  bool AllowUnknown = false) const {
894  return getValueType(DL, Ty, AllowUnknown).getSimpleVT();
895  }
896 
897  /// Return the desired alignment for ByVal or InAlloca aggregate function
898  /// arguments in the caller parameter area. This is the actual alignment, not
899  /// its logarithm.
900  virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const;
901 
902  /// Return the type of registers that this ValueType will eventually require.
903  MVT getRegisterType(MVT VT) const {
904  assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
905  return RegisterTypeForVT[VT.SimpleTy];
906  }
907 
908  /// Return the type of registers that this ValueType will eventually require.
910  if (VT.isSimple()) {
911  assert((unsigned)VT.getSimpleVT().SimpleTy <
912  array_lengthof(RegisterTypeForVT));
913  return RegisterTypeForVT[VT.getSimpleVT().SimpleTy];
914  }
915  if (VT.isVector()) {
916  EVT VT1;
917  MVT RegisterVT;
918  unsigned NumIntermediates;
919  (void)getVectorTypeBreakdown(Context, VT, VT1,
920  NumIntermediates, RegisterVT);
921  return RegisterVT;
922  }
923  if (VT.isInteger()) {
924  return getRegisterType(Context, getTypeToTransformTo(Context, VT));
925  }
926  llvm_unreachable("Unsupported extended type!");
927  }
928 
929  /// Return the number of registers that this ValueType will eventually
930  /// require.
931  ///
932  /// This is one for any types promoted to live in larger registers, but may be
933  /// more than one for types (like i64) that are split into pieces. For types
934  /// like i140, which are first promoted then expanded, it is the number of
935  /// registers needed to hold all the bits of the original type. For an i140
936  /// on a 32 bit machine this means 5 registers.
937  unsigned getNumRegisters(LLVMContext &Context, EVT VT) const {
938  if (VT.isSimple()) {
939  assert((unsigned)VT.getSimpleVT().SimpleTy <
940  array_lengthof(NumRegistersForVT));
941  return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
942  }
943  if (VT.isVector()) {
944  EVT VT1;
945  MVT VT2;
946  unsigned NumIntermediates;
947  return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
948  }
949  if (VT.isInteger()) {
950  unsigned BitWidth = VT.getSizeInBits();
951  unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
952  return (BitWidth + RegWidth - 1) / RegWidth;
953  }
954  llvm_unreachable("Unsupported extended type!");
955  }
956 
957  /// If true, then instruction selection should seek to shrink the FP constant
958  /// of the specified type to a smaller type in order to save space and / or
959  /// reduce runtime.
960  virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
961 
962  // Return true if it is profitable to reduce the given load node to a smaller
963  // type.
964  //
965  // e.g. (i16 (trunc (i32 (load x))) -> i16 load x should be performed
967  ISD::LoadExtType ExtTy,
968  EVT NewVT) const {
969  return true;
970  }
971 
972  /// When splitting a value of the specified type into parts, does the Lo
973  /// or Hi part come first? This usually follows the endianness, except
974  /// for ppcf128, where the Hi part always comes first.
975  bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const {
976  return DL.isBigEndian() || VT == MVT::ppcf128;
977  }
978 
979  /// If true, the target has custom DAG combine transformations that it can
980  /// perform for the specified node.
982  assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
983  return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
984  }
985 
986  unsigned getGatherAllAliasesMaxDepth() const {
988  }
989 
990  /// \brief Get maximum # of store operations permitted for llvm.memset
991  ///
992  /// This function returns the maximum number of store operations permitted
993  /// to replace a call to llvm.memset. The value is set by the target at the
994  /// performance threshold for such a replacement. If OptSize is true,
995  /// return the limit for functions that have OptSize attribute.
996  unsigned getMaxStoresPerMemset(bool OptSize) const {
998  }
999 
1000  /// \brief Get maximum # of store operations permitted for llvm.memcpy
1001  ///
1002  /// This function returns the maximum number of store operations permitted
1003  /// to replace a call to llvm.memcpy. The value is set by the target at the
1004  /// performance threshold for such a replacement. If OptSize is true,
1005  /// return the limit for functions that have OptSize attribute.
1006  unsigned getMaxStoresPerMemcpy(bool OptSize) const {
1007  return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy;
1008  }
1009 
1010  /// \brief Get maximum # of store operations permitted for llvm.memmove
1011  ///
1012  /// This function returns the maximum number of store operations permitted
1013  /// to replace a call to llvm.memmove. The value is set by the target at the
1014  /// performance threshold for such a replacement. If OptSize is true,
1015  /// return the limit for functions that have OptSize attribute.
1016  unsigned getMaxStoresPerMemmove(bool OptSize) const {
1018  }
1019 
1020  /// \brief Determine if the target supports unaligned memory accesses.
1021  ///
1022  /// This function returns true if the target allows unaligned memory accesses
1023  /// of the specified type in the given address space. If true, it also returns
1024  /// whether the unaligned memory access is "fast" in the last argument by
1025  /// reference. This is used, for example, in situations where an array
1026  /// copy/move/set is converted to a sequence of store operations. Its use
1027  /// helps to ensure that such replacements don't generate code that causes an
1028  /// alignment error (trap) on the target machine.
1030  unsigned AddrSpace = 0,
1031  unsigned Align = 1,
1032  bool * /*Fast*/ = nullptr) const {
1033  return false;
1034  }
1035 
1036  /// Return true if the target supports a memory access of this type for the
1037  /// given address space and alignment. If the access is allowed, the optional
1038  /// final parameter returns if the access is also fast (as defined by the
1039  /// target).
1040  bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
1041  unsigned AddrSpace = 0, unsigned Alignment = 1,
1042  bool *Fast = nullptr) const;
1043 
1044  /// Returns the target specific optimal type for load and store operations as
1045  /// a result of memset, memcpy, and memmove lowering.
1046  ///
1047  /// If DstAlign is zero that means it's safe to destination alignment can
1048  /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't
1049  /// a need to check it against alignment requirement, probably because the
1050  /// source does not need to be loaded. If 'IsMemset' is true, that means it's
1051  /// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of
1052  /// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it
1053  /// does not need to be loaded. It returns EVT::Other if the type should be
1054  /// determined using generic target-independent logic.
1055  virtual EVT getOptimalMemOpType(uint64_t /*Size*/,
1056  unsigned /*DstAlign*/, unsigned /*SrcAlign*/,
1057  bool /*IsMemset*/,
1058  bool /*ZeroMemset*/,
1059  bool /*MemcpyStrSrc*/,
1060  MachineFunction &/*MF*/) const {
1061  return MVT::Other;
1062  }
1063 
1064  /// Returns true if it's safe to use load / store of the specified type to
1065  /// expand memcpy / memset inline.
1066  ///
1067  /// This is mostly true for all types except for some special cases. For
1068  /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
1069  /// fstpl which also does type conversion. Note the specified type doesn't
1070  /// have to be legal as the hook is used before type legalization.
1071  virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
1072 
1073  /// Determine if we should use _setjmp or setjmp to implement llvm.setjmp.
1074  bool usesUnderscoreSetJmp() const {
1075  return UseUnderscoreSetJmp;
1076  }
1077 
1078  /// Determine if we should use _longjmp or longjmp to implement llvm.longjmp.
1079  bool usesUnderscoreLongJmp() const {
1080  return UseUnderscoreLongJmp;
1081  }
1082 
1083  /// Return lower limit for number of blocks in a jump table.
1084  unsigned getMinimumJumpTableEntries() const;
1085 
1086  /// Return upper limit for number of entries in a jump table.
1087  /// Zero if no limit.
1088  unsigned getMaximumJumpTableSize() const;
1089 
1090  virtual bool isJumpTableRelative() const {
1091  return TM.isPositionIndependent();
1092  }
1093 
1094  /// If a physical register, this specifies the register that
1095  /// llvm.savestack/llvm.restorestack should save and restore.
1097  return StackPointerRegisterToSaveRestore;
1098  }
1099 
1100  /// If a physical register, this returns the register that receives the
1101  /// exception address on entry to an EH pad.
1102  virtual unsigned
1103  getExceptionPointerRegister(const Constant *PersonalityFn) const {
1104  // 0 is guaranteed to be the NoRegister value on all targets
1105  return 0;
1106  }
1107 
1108  /// If a physical register, this returns the register that receives the
1109  /// exception typeid on entry to a landing pad.
1110  virtual unsigned
1111  getExceptionSelectorRegister(const Constant *PersonalityFn) const {
1112  // 0 is guaranteed to be the NoRegister value on all targets
1113  return 0;
1114  }
1115 
1116  virtual bool needsFixedCatchObjects() const {
1117  report_fatal_error("Funclet EH is not implemented for this target");
1118  }
1119 
1120  /// Returns the target's jmp_buf size in bytes (if never set, the default is
1121  /// 200)
1122  unsigned getJumpBufSize() const {
1123  return JumpBufSize;
1124  }
1125 
1126  /// Returns the target's jmp_buf alignment in bytes (if never set, the default
1127  /// is 0)
1128  unsigned getJumpBufAlignment() const {
1129  return JumpBufAlignment;
1130  }
1131 
1132  /// Return the minimum stack alignment of an argument.
1133  unsigned getMinStackArgumentAlignment() const {
1134  return MinStackArgumentAlignment;
1135  }
1136 
1137  /// Return the minimum function alignment.
1138  unsigned getMinFunctionAlignment() const {
1139  return MinFunctionAlignment;
1140  }
1141 
1142  /// Return the preferred function alignment.
1143  unsigned getPrefFunctionAlignment() const {
1144  return PrefFunctionAlignment;
1145  }
1146 
1147  /// Return the preferred loop alignment.
1148  virtual unsigned getPrefLoopAlignment(MachineLoop *ML = nullptr) const {
1149  return PrefLoopAlignment;
1150  }
1151 
1152  /// If the target has a standard location for the stack protector guard,
1153  /// returns the address of that location. Otherwise, returns nullptr.
1154  /// DEPRECATED: please override useLoadStackGuardNode and customize
1155  /// LOAD_STACK_GUARD, or customize @llvm.stackguard().
1156  virtual Value *getIRStackGuard(IRBuilder<> &IRB) const;
1157 
1158  /// Inserts necessary declarations for SSP (stack protection) purpose.
1159  /// Should be used only when getIRStackGuard returns nullptr.
1160  virtual void insertSSPDeclarations(Module &M) const;
1161 
1162  /// Return the variable that's previously inserted by insertSSPDeclarations,
1163  /// if any, otherwise return nullptr. Should be used only when
1164  /// getIRStackGuard returns nullptr.
1165  virtual Value *getSDagStackGuard(const Module &M) const;
1166 
1167  /// If the target has a standard stack protection check function that
1168  /// performs validation and error handling, returns the function. Otherwise,
1169  /// returns nullptr. Must be previously inserted by insertSSPDeclarations.
1170  /// Should be used only when getIRStackGuard returns nullptr.
1171  virtual Value *getSSPStackGuardCheck(const Module &M) const;
1172 
1173 protected:
1175  bool UseTLS) const;
1176 
1177 public:
1178  /// Returns the target-specific address of the unsafe stack pointer.
1179  virtual Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const;
1180 
1181  /// Returns true if a cast between SrcAS and DestAS is a noop.
1182  virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
1183  return false;
1184  }
1185 
1186  /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we
1187  /// are happy to sink it into basic blocks.
1188  virtual bool isCheapAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
1189  return isNoopAddrSpaceCast(SrcAS, DestAS);
1190  }
1191 
1192  /// Return true if the pointer arguments to CI should be aligned by aligning
1193  /// the object whose address is being passed. If so then MinSize is set to the
1194  /// minimum size the object must be to be aligned and PrefAlign is set to the
1195  /// preferred alignment.
1196  virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/,
1197  unsigned & /*PrefAlign*/) const {
1198  return false;
1199  }
1200 
1201  //===--------------------------------------------------------------------===//
1202  /// \name Helpers for TargetTransformInfo implementations
1203  /// @{
1204 
1205  /// Get the ISD node that corresponds to the Instruction class opcode.
1206  int InstructionOpcodeToISD(unsigned Opcode) const;
1207 
1208  /// Estimate the cost of type-legalization and the legalized type.
1209  std::pair<int, MVT> getTypeLegalizationCost(const DataLayout &DL,
1210  Type *Ty) const;
1211 
1212  /// @}
1213 
1214  //===--------------------------------------------------------------------===//
1215  /// \name Helpers for atomic expansion.
1216  /// @{
1217 
1218  /// Returns the maximum atomic operation size (in bits) supported by
1219  /// the backend. Atomic operations greater than this size (as well
1220  /// as ones that are not naturally aligned), will be expanded by
1221  /// AtomicExpandPass into an __atomic_* library call.
1223  return MaxAtomicSizeInBitsSupported;
1224  }
1225 
1226  /// Returns the size of the smallest cmpxchg or ll/sc instruction
1227  /// the backend supports. Any smaller operations are widened in
1228  /// AtomicExpandPass.
1229  ///
1230  /// Note that *unlike* operations above the maximum size, atomic ops
1231  /// are still natively supported below the minimum; they just
1232  /// require a more complex expansion.
1233  unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; }
1234 
1235  /// Whether AtomicExpandPass should automatically insert fences and reduce
1236  /// ordering for this atomic. This should be true for most architectures with
1237  /// weak memory ordering. Defaults to false.
1238  virtual bool shouldInsertFencesForAtomic(const Instruction *I) const {
1239  return false;
1240  }
1241 
1242  /// Perform a load-linked operation on Addr, returning a "Value *" with the
1243  /// corresponding pointee type. This may entail some non-trivial operations to
1244  /// truncate or reconstruct types that will be illegal in the backend. See
1245  /// ARMISelLowering for an example implementation.
1246  virtual Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
1247  AtomicOrdering Ord) const {
1248  llvm_unreachable("Load linked unimplemented on this target");
1249  }
1250 
1251  /// Perform a store-conditional operation to Addr. Return the status of the
1252  /// store. This should be 0 if the store succeeded, non-zero otherwise.
1253  virtual Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
1254  Value *Addr, AtomicOrdering Ord) const {
1255  llvm_unreachable("Store conditional unimplemented on this target");
1256  }
1257 
1258  /// Inserts in the IR a target-specific intrinsic specifying a fence.
1259  /// It is called by AtomicExpandPass before expanding an
1260  /// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad
1261  /// if shouldInsertFencesForAtomic returns true.
1262  /// RMW and CmpXchg set both IsStore and IsLoad to true.
1263  /// This function should either return a nullptr, or a pointer to an IR-level
1264  /// Instruction*. Even complex fence sequences can be represented by a
1265  /// single Instruction* through an intrinsic to be lowered later.
1266  /// Backends should override this method to produce target-specific intrinsic
1267  /// for their fences.
1268  /// FIXME: Please note that the default implementation here in terms of
1269  /// IR-level fences exists for historical/compatibility reasons and is
1270  /// *unsound* ! Fences cannot, in general, be used to restore sequential
1271  /// consistency. For example, consider the following example:
1272  /// atomic<int> x = y = 0;
1273  /// int r1, r2, r3, r4;
1274  /// Thread 0:
1275  /// x.store(1);
1276  /// Thread 1:
1277  /// y.store(1);
1278  /// Thread 2:
1279  /// r1 = x.load();
1280  /// r2 = y.load();
1281  /// Thread 3:
1282  /// r3 = y.load();
1283  /// r4 = x.load();
1284  /// r1 = r3 = 1 and r2 = r4 = 0 is impossible as long as the accesses are all
1285  /// seq_cst. But if they are lowered to monotonic accesses, no amount of
1286  /// IR-level fences can prevent it.
1287  /// @{
1289  AtomicOrdering Ord, bool IsStore,
1290  bool IsLoad) const {
1291  if (isReleaseOrStronger(Ord) && IsStore)
1292  return Builder.CreateFence(Ord);
1293  else
1294  return nullptr;
1295  }
1296 
1298  AtomicOrdering Ord, bool IsStore,
1299  bool IsLoad) const {
1300  if (isAcquireOrStronger(Ord))
1301  return Builder.CreateFence(Ord);
1302  else
1303  return nullptr;
1304  }
1305  /// @}
1306 
1307  // Emits code that executes when the comparison result in the ll/sc
1308  // expansion of a cmpxchg instruction is such that the store-conditional will
1309  // not execute. This makes it possible to balance out the load-linked with
1310  // a dedicated instruction, if desired.
1311  // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would
1312  // be unnecessarily held, except if clrex, inserted by this hook, is executed.
1313  virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const {}
1314 
1315  /// Returns true if the given (atomic) store should be expanded by the
1316  /// IR-level AtomicExpand pass into an "atomic xchg" which ignores its input.
1317  virtual bool shouldExpandAtomicStoreInIR(StoreInst *SI) const {
1318  return false;
1319  }
1320 
1321  /// Returns true if arguments should be sign-extended in lib calls.
1322  virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
1323  return IsSigned;
1324  }
1325 
1326  /// Returns how the given (atomic) load should be expanded by the
1327  /// IR-level AtomicExpand pass.
1330  }
1331 
1332  /// Returns true if the given atomic cmpxchg should be expanded by the
1333  /// IR-level AtomicExpand pass into a load-linked/store-conditional sequence
1334  /// (through emitLoadLinked() and emitStoreConditional()).
1336  return false;
1337  }
1338 
1339  /// Returns how the IR-level AtomicExpand pass should expand the given
1340  /// AtomicRMW, if at all. Default is to never expand.
1343  }
1344 
1345  /// On some platforms, an AtomicRMW that never actually modifies the value
1346  /// (such as fetch_add of 0) can be turned into a fence followed by an
1347  /// atomic load. This may sound useless, but it makes it possible for the
1348  /// processor to keep the cacheline shared, dramatically improving
1349  /// performance. And such idempotent RMWs are useful for implementing some
1350  /// kinds of locks, see for example (justification + benchmarks):
1351  /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
1352  /// This method tries doing that transformation, returning the atomic load if
1353  /// it succeeds, and nullptr otherwise.
1354  /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
1355  /// another round of expansion.
1356  virtual LoadInst *
1358  return nullptr;
1359  }
1360 
1361  /// Returns how the platform's atomic operations are extended (ZERO_EXTEND,
1362  /// SIGN_EXTEND, or ANY_EXTEND).
1364  return ISD::ZERO_EXTEND;
1365  }
1366 
1367  /// @}
1368 
1369  /// Returns true if we should normalize
1370  /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
1371  /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely
1372  /// that it saves us from materializing N0 and N1 in an integer register.
1373  /// Targets that are able to perform and/or on flags should return false here.
1375  EVT VT) const {
1376  // If a target has multiple condition registers, then it likely has logical
1377  // operations on those registers.
1379  return false;
1380  // Only do the transform if the value won't be split into multiple
1381  // registers.
1382  LegalizeTypeAction Action = getTypeAction(Context, VT);
1383  return Action != TypeExpandInteger && Action != TypeExpandFloat &&
1384  Action != TypeSplitVector;
1385  }
1386 
1387  //===--------------------------------------------------------------------===//
1388  // TargetLowering Configuration Methods - These methods should be invoked by
1389  // the derived class constructor to configure this object for the target.
1390  //
1391 protected:
1392  /// Specify how the target extends the result of integer and floating point
1393  /// boolean values from i1 to a wider type. See getBooleanContents.
1395  BooleanContents = Ty;
1396  BooleanFloatContents = Ty;
1397  }
1398 
1399  /// Specify how the target extends the result of integer and floating point
1400  /// boolean values from i1 to a wider type. See getBooleanContents.
1402  BooleanContents = IntTy;
1403  BooleanFloatContents = FloatTy;
1404  }
1405 
1406  /// Specify how the target extends the result of a vector boolean value from a
1407  /// vector of i1 to a wider type. See getBooleanContents.
1409  BooleanVectorContents = Ty;
1410  }
1411 
1412  /// Specify the target scheduling preference.
1414  SchedPreferenceInfo = Pref;
1415  }
1416 
1417  /// Indicate whether this target prefers to use _setjmp to implement
1418  /// llvm.setjmp or the version without _. Defaults to false.
1419  void setUseUnderscoreSetJmp(bool Val) {
1420  UseUnderscoreSetJmp = Val;
1421  }
1422 
1423  /// Indicate whether this target prefers to use _longjmp to implement
1424  /// llvm.longjmp or the version without _. Defaults to false.
1425  void setUseUnderscoreLongJmp(bool Val) {
1426  UseUnderscoreLongJmp = Val;
1427  }
1428 
1429  /// Indicate the minimum number of blocks to generate jump tables.
1430  void setMinimumJumpTableEntries(unsigned Val);
1431 
1432  /// Indicate the maximum number of entries in jump tables.
1433  /// Set to zero to generate unlimited jump tables.
1434  void setMaximumJumpTableSize(unsigned);
1435 
1436  /// If set to a physical register, this specifies the register that
1437  /// llvm.savestack/llvm.restorestack should save and restore.
1439  StackPointerRegisterToSaveRestore = R;
1440  }
1441 
1442  /// Tells the code generator that the target has multiple (allocatable)
1443  /// condition registers that can be used to store the results of comparisons
1444  /// for use by selects and conditional branches. With multiple condition
1445  /// registers, the code generator will not aggressively sink comparisons into
1446  /// the blocks of their users.
1447  void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
1448  HasMultipleConditionRegisters = hasManyRegs;
1449  }
1450 
1451  /// Tells the code generator that the target has BitExtract instructions.
1452  /// The code generator will aggressively sink "shift"s into the blocks of
1453  /// their users if the users will generate "and" instructions which can be
1454  /// combined with "shift" to BitExtract instructions.
1455  void setHasExtractBitsInsn(bool hasExtractInsn = true) {
1456  HasExtractBitsInsn = hasExtractInsn;
1457  }
1458 
1459  /// Tells the code generator not to expand logic operations on comparison
1460  /// predicates into separate sequences that increase the amount of flow
1461  /// control.
1462  void setJumpIsExpensive(bool isExpensive = true);
1463 
1464  /// Tells the code generator that this target supports floating point
1465  /// exceptions and cares about preserving floating point exception behavior.
1466  void setHasFloatingPointExceptions(bool FPExceptions = true) {
1467  HasFloatingPointExceptions = FPExceptions;
1468  }
1469 
1470  /// Tells the code generator which bitwidths to bypass.
1471  void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
1472  BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
1473  }
1474 
1475  /// Add the specified register class as an available regclass for the
1476  /// specified value type. This indicates the selector can handle values of
1477  /// that class natively.
1479  assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT));
1480  RegClassForVT[VT.SimpleTy] = RC;
1481  }
1482 
1483  /// Return the largest legal super-reg register class of the register class
1484  /// for the specified type and its associated "cost".
1485  virtual std::pair<const TargetRegisterClass *, uint8_t>
1486  findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const;
1487 
1488  /// Once all of the register classes are added, this allows us to compute
1489  /// derived properties we expose.
1491 
1492  /// Indicate that the specified operation does not work with the specified
1493  /// type and indicate what to do about it.
1494  void setOperationAction(unsigned Op, MVT VT,
1495  LegalizeAction Action) {
1496  assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
1497  OpActions[(unsigned)VT.SimpleTy][Op] = Action;
1498  }
1499 
1500  /// Indicate that the specified load with extension does not work with the
1501  /// specified type and indicate what to do about it.
1502  void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
1503  LegalizeAction Action) {
1504  assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
1505  MemVT.isValid() && "Table isn't big enough!");
1506  assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
1507  unsigned Shift = 4 * ExtType;
1508  LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &= ~((uint16_t)0xF << Shift);
1509  LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |= (uint16_t)Action << Shift;
1510  }
1511 
1512  /// Indicate that the specified truncating store does not work with the
1513  /// specified type and indicate what to do about it.
1514  void setTruncStoreAction(MVT ValVT, MVT MemVT,
1515  LegalizeAction Action) {
1516  assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!");
1517  TruncStoreActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy] = Action;
1518  }
1519 
1520  /// Indicate that the specified indexed load does or does not work with the
1521  /// specified type and indicate what to do abort it.
1522  ///
1523  /// NOTE: All indexed mode loads are initialized to Expand in
1524  /// TargetLowering.cpp
1525  void setIndexedLoadAction(unsigned IdxMode, MVT VT,
1526  LegalizeAction Action) {
1527  assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
1528  (unsigned)Action < 0xf && "Table isn't big enough!");
1529  // Load action are kept in the upper half.
1530  IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0;
1531  IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4;
1532  }
1533 
1534  /// Indicate that the specified indexed store does or does not work with the
1535  /// specified type and indicate what to do about it.
1536  ///
1537  /// NOTE: All indexed mode stores are initialized to Expand in
1538  /// TargetLowering.cpp
1539  void setIndexedStoreAction(unsigned IdxMode, MVT VT,
1540  LegalizeAction Action) {
1541  assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
1542  (unsigned)Action < 0xf && "Table isn't big enough!");
1543  // Store action are kept in the lower half.
1544  IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f;
1545  IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action);
1546  }
1547 
1548  /// Indicate that the specified condition code is or isn't supported on the
1549  /// target and indicate what to do about it.
1551  LegalizeAction Action) {
1552  assert(VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions) &&
1553  "Table isn't big enough!");
1554  assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
1555  /// The lower 3 bits of the SimpleTy index into Nth 4bit set from the 32-bit
1556  /// value and the upper 29 bits index into the second dimension of the array
1557  /// to select what 32-bit value to use.
1558  uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
1559  CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift);
1560  CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift;
1561  }
1562 
1563  /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
1564  /// to trying a larger integer/fp until it can find one that works. If that
1565  /// default is insufficient, this method can be used by the target to override
1566  /// the default.
1567  void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
1568  PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
1569  }
1570 
1571  /// Convenience method to set an operation to Promote and specify the type
1572  /// in a single call.
1573  void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
1574  setOperationAction(Opc, OrigVT, Promote);
1575  AddPromotedToType(Opc, OrigVT, DestVT);
1576  }
1577 
1578  /// Targets should invoke this method for each target independent node that
1579  /// they want to provide a custom DAG combiner for by implementing the
1580  /// PerformDAGCombine virtual method.
1582  assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
1583  TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
1584  }
1585 
1586  /// Set the target's required jmp_buf buffer size (in bytes); default is 200
1587  void setJumpBufSize(unsigned Size) {
1588  JumpBufSize = Size;
1589  }
1590 
1591  /// Set the target's required jmp_buf buffer alignment (in bytes); default is
1592  /// 0
1593  void setJumpBufAlignment(unsigned Align) {
1594  JumpBufAlignment = Align;
1595  }
1596 
1597  /// Set the target's minimum function alignment (in log2(bytes))
1598  void setMinFunctionAlignment(unsigned Align) {
1599  MinFunctionAlignment = Align;
1600  }
1601 
1602  /// Set the target's preferred function alignment. This should be set if
1603  /// there is a performance benefit to higher-than-minimum alignment (in
1604  /// log2(bytes))
1605  void setPrefFunctionAlignment(unsigned Align) {
1606  PrefFunctionAlignment = Align;
1607  }
1608 
1609  /// Set the target's preferred loop alignment. Default alignment is zero, it
1610  /// means the target does not care about loop alignment. The alignment is
1611  /// specified in log2(bytes). The target may also override
1612  /// getPrefLoopAlignment to provide per-loop values.
1613  void setPrefLoopAlignment(unsigned Align) {
1614  PrefLoopAlignment = Align;
1615  }
1616 
1617  /// Set the minimum stack alignment of an argument (in log2(bytes)).
1618  void setMinStackArgumentAlignment(unsigned Align) {
1619  MinStackArgumentAlignment = Align;
1620  }
1621 
1622  /// Set the maximum atomic operation size supported by the
1623  /// backend. Atomic operations greater than this size (as well as
1624  /// ones that are not naturally aligned), will be expanded by
1625  /// AtomicExpandPass into an __atomic_* library call.
1626  void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits) {
1627  MaxAtomicSizeInBitsSupported = SizeInBits;
1628  }
1629 
1630  // Sets the minimum cmpxchg or ll/sc size supported by the backend.
1631  void setMinCmpXchgSizeInBits(unsigned SizeInBits) {
1632  MinCmpXchgSizeInBits = SizeInBits;
1633  }
1634 
1635 public:
1636  //===--------------------------------------------------------------------===//
1637  // Addressing mode description hooks (used by LSR etc).
1638  //
1639 
1640  /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
1641  /// instructions reading the address. This allows as much computation as
1642  /// possible to be done in the address mode for that operand. This hook lets
1643  /// targets also pass back when this should be done on intrinsics which
1644  /// load/store.
1645  virtual bool GetAddrModeArguments(IntrinsicInst * /*I*/,
1646  SmallVectorImpl<Value*> &/*Ops*/,
1647  Type *&/*AccessTy*/,
1648  unsigned AddrSpace = 0) const {
1649  return false;
1650  }
1651 
1652  /// This represents an addressing mode of:
1653  /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
1654  /// If BaseGV is null, there is no BaseGV.
1655  /// If BaseOffs is zero, there is no base offset.
1656  /// If HasBaseReg is false, there is no base register.
1657  /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with
1658  /// no scale.
1659  struct AddrMode {
1660  GlobalValue *BaseGV = nullptr;
1661  int64_t BaseOffs = 0;
1662  bool HasBaseReg = false;
1663  int64_t Scale = 0;
1664  AddrMode() = default;
1665  };
1666 
1667  /// Return true if the addressing mode represented by AM is legal for this
1668  /// target, for a load/store of the specified type.
1669  ///
1670  /// The type may be VoidTy, in which case only return true if the addressing
1671  /// mode is legal for a load/store of any legal type. TODO: Handle
1672  /// pre/postinc as well.
1673  ///
1674  /// If the address space cannot be determined, it will be -1.
1675  ///
1676  /// TODO: Remove default argument
1677  virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
1678  Type *Ty, unsigned AddrSpace) const;
1679 
1680  /// \brief Return the cost of the scaling factor used in the addressing mode
1681  /// represented by AM for this target, for a load/store of the specified type.
1682  ///
1683  /// If the AM is supported, the return value must be >= 0.
1684  /// If the AM is not supported, it returns a negative value.
1685  /// TODO: Handle pre/postinc as well.
1686  /// TODO: Remove default argument
1687  virtual int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM,
1688  Type *Ty, unsigned AS = 0) const {
1689  // Default: assume that any scaling factor used in a legal AM is free.
1690  if (isLegalAddressingMode(DL, AM, Ty, AS))
1691  return 0;
1692  return -1;
1693  }
1694 
1695  virtual bool isFoldableMemAccessOffset(Instruction *I, int64_t Offset) const {
1696  return true;
1697  }
1698 
1699  /// Return true if the specified immediate is legal icmp immediate, that is
1700  /// the target has icmp instructions which can compare a register against the
1701  /// immediate without having to materialize the immediate into a register.
1702  virtual bool isLegalICmpImmediate(int64_t) const {
1703  return true;
1704  }
1705 
1706  /// Return true if the specified immediate is legal add immediate, that is the
1707  /// target has add instructions which can add a register with the immediate
1708  /// without having to materialize the immediate into a register.
1709  virtual bool isLegalAddImmediate(int64_t) const {
1710  return true;
1711  }
1712 
1713  /// Return true if it's significantly cheaper to shift a vector by a uniform
1714  /// scalar than by an amount which will vary across each lane. On x86, for
1715  /// example, there is a "psllw" instruction for the former case, but no simple
1716  /// instruction for a general "a << b" operation on vectors.
1717  virtual bool isVectorShiftByScalarCheap(Type *Ty) const {
1718  return false;
1719  }
1720 
1721  /// Return true if it's free to truncate a value of type FromTy to type
1722  /// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
1723  /// by referencing its sub-register AX.
1724  /// Targets must return false when FromTy <= ToTy.
1725  virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const {
1726  return false;
1727  }
1728 
1729  /// Return true if a truncation from FromTy to ToTy is permitted when deciding
1730  /// whether a call is in tail position. Typically this means that both results
1731  /// would be assigned to the same register or stack slot, but it could mean
1732  /// the target performs adequate checks of its own before proceeding with the
1733  /// tail call. Targets must return false when FromTy <= ToTy.
1734  virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const {
1735  return false;
1736  }
1737 
1738  virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const {
1739  return false;
1740  }
1741 
1742  virtual bool isProfitableToHoist(Instruction *I) const { return true; }
1743 
1744  /// Return true if the extension represented by \p I is free.
1745  /// Unlikely the is[Z|FP]ExtFree family which is based on types,
1746  /// this method can use the context provided by \p I to decide
1747  /// whether or not \p I is free.
1748  /// This method extends the behavior of the is[Z|FP]ExtFree family.
1749  /// In other words, if is[Z|FP]Free returns true, then this method
1750  /// returns true as well. The converse is not true.
1751  /// The target can perform the adequate checks by overriding isExtFreeImpl.
1752  /// \pre \p I must be a sign, zero, or fp extension.
1753  bool isExtFree(const Instruction *I) const {
1754  switch (I->getOpcode()) {
1755  case Instruction::FPExt:
1756  if (isFPExtFree(EVT::getEVT(I->getType())))
1757  return true;
1758  break;
1759  case Instruction::ZExt:
1760  if (isZExtFree(I->getOperand(0)->getType(), I->getType()))
1761  return true;
1762  break;
1763  case Instruction::SExt:
1764  break;
1765  default:
1766  llvm_unreachable("Instruction is not an extension");
1767  }
1768  return isExtFreeImpl(I);
1769  }
1770 
1771  /// Return true if any actual instruction that defines a value of type FromTy
1772  /// implicitly zero-extends the value to ToTy in the result register.
1773  ///
1774  /// The function should return true when it is likely that the truncate can
1775  /// be freely folded with an instruction defining a value of FromTy. If
1776  /// the defining instruction is unknown (because you're looking at a
1777  /// function argument, PHI, etc.) then the target may require an
1778  /// explicit truncate, which is not necessarily free, but this function
1779  /// does not deal with those cases.
1780  /// Targets must return false when FromTy >= ToTy.
1781  virtual bool isZExtFree(Type *FromTy, Type *ToTy) const {
1782  return false;
1783  }
1784 
1785  virtual bool isZExtFree(EVT FromTy, EVT ToTy) const {
1786  return false;
1787  }
1788 
1789  /// Return true if the target supplies and combines to a paired load
1790  /// two loaded values of type LoadedType next to each other in memory.
1791  /// RequiredAlignment gives the minimal alignment constraints that must be met
1792  /// to be able to select this paired load.
1793  ///
1794  /// This information is *not* used to generate actual paired loads, but it is
1795  /// used to generate a sequence of loads that is easier to combine into a
1796  /// paired load.
1797  /// For instance, something like this:
1798  /// a = load i64* addr
1799  /// b = trunc i64 a to i32
1800  /// c = lshr i64 a, 32
1801  /// d = trunc i64 c to i32
1802  /// will be optimized into:
1803  /// b = load i32* addr1
1804  /// d = load i32* addr2
1805  /// Where addr1 = addr2 +/- sizeof(i32).
1806  ///
1807  /// In other words, unless the target performs a post-isel load combining,
1808  /// this information should not be provided because it will generate more
1809  /// loads.
1810  virtual bool hasPairedLoad(EVT /*LoadedType*/,
1811  unsigned & /*RequiredAligment*/) const {
1812  return false;
1813  }
1814 
1815  /// \brief Get the maximum supported factor for interleaved memory accesses.
1816  /// Default to be the minimum interleave factor: 2.
1817  virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; }
1818 
1819  /// \brief Lower an interleaved load to target specific intrinsics. Return
1820  /// true on success.
1821  ///
1822  /// \p LI is the vector load instruction.
1823  /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector.
1824  /// \p Indices is the corresponding indices for each shufflevector.
1825  /// \p Factor is the interleave factor.
1826  virtual bool lowerInterleavedLoad(LoadInst *LI,
1828  ArrayRef<unsigned> Indices,
1829  unsigned Factor) const {
1830  return false;
1831  }
1832 
1833  /// \brief Lower an interleaved store to target specific intrinsics. Return
1834  /// true on success.
1835  ///
1836  /// \p SI is the vector store instruction.
1837  /// \p SVI is the shufflevector to RE-interleave the stored vector.
1838  /// \p Factor is the interleave factor.
1840  unsigned Factor) const {
1841  return false;
1842  }
1843 
1844  /// Return true if zero-extending the specific node Val to type VT2 is free
1845  /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
1846  /// because it's folded such as X86 zero-extending loads).
1847  virtual bool isZExtFree(SDValue Val, EVT VT2) const {
1848  return isZExtFree(Val.getValueType(), VT2);
1849  }
1850 
1851  /// Return true if an fpext operation is free (for instance, because
1852  /// single-precision floating-point numbers are implicitly extended to
1853  /// double-precision).
1854  virtual bool isFPExtFree(EVT VT) const {
1855  assert(VT.isFloatingPoint());
1856  return false;
1857  }
1858 
1859  /// Return true if folding a vector load into ExtVal (a sign, zero, or any
1860  /// extend node) is profitable.
1861  virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; }
1862 
1863  /// Return true if an fneg operation is free to the point where it is never
1864  /// worthwhile to replace it with a bitwise operation.
1865  virtual bool isFNegFree(EVT VT) const {
1866  assert(VT.isFloatingPoint());
1867  return false;
1868  }
1869 
1870  /// Return true if an fabs operation is free to the point where it is never
1871  /// worthwhile to replace it with a bitwise operation.
1872  virtual bool isFAbsFree(EVT VT) const {
1873  assert(VT.isFloatingPoint());
1874  return false;
1875  }
1876 
1877  /// Return true if an FMA operation is faster than a pair of fmul and fadd
1878  /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
1879  /// returns true, otherwise fmuladd is expanded to fmul + fadd.
1880  ///
1881  /// NOTE: This may be called before legalization on types for which FMAs are
1882  /// not legal, but should return true if those types will eventually legalize
1883  /// to types that support FMAs. After legalization, it will only be called on
1884  /// types that support FMAs (via Legal or Custom actions)
1885  virtual bool isFMAFasterThanFMulAndFAdd(EVT) const {
1886  return false;
1887  }
1888 
1889  /// Return true if it's profitable to narrow operations of type VT1 to
1890  /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
1891  /// i32 to i16.
1892  virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const {
1893  return false;
1894  }
1895 
1896  /// \brief Return true if it is beneficial to convert a load of a constant to
1897  /// just the constant itself.
1898  /// On some targets it might be more efficient to use a combination of
1899  /// arithmetic instructions to materialize the constant instead of loading it
1900  /// from a constant pool.
1901  virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
1902  Type *Ty) const {
1903  return false;
1904  }
1905 
1906  /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
1907  /// with this index. This is needed because EXTRACT_SUBVECTOR usually
1908  /// has custom lowering that depends on the index of the first element,
1909  /// and only the target knows which lowering is cheap.
1910  virtual bool isExtractSubvectorCheap(EVT ResVT, unsigned Index) const {
1911  return false;
1912  }
1913 
1914  // Return true if it is profitable to use a scalar input to a BUILD_VECTOR
1915  // even if the vector itself has multiple uses.
1916  virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const {
1917  return false;
1918  }
1919 
1920  //===--------------------------------------------------------------------===//
1921  // Runtime Library hooks
1922  //
1923 
1924  /// Rename the default libcall routine name for the specified libcall.
1926  LibcallRoutineNames[Call] = Name;
1927  }
1928 
1929  /// Get the libcall routine name for the specified libcall.
1930  const char *getLibcallName(RTLIB::Libcall Call) const {
1931  return LibcallRoutineNames[Call];
1932  }
1933 
1934  /// Override the default CondCode to be used to test the result of the
1935  /// comparison libcall against zero.
1937  CmpLibcallCCs[Call] = CC;
1938  }
1939 
1940  /// Get the CondCode that's to be used to test the result of the comparison
1941  /// libcall against zero.
1943  return CmpLibcallCCs[Call];
1944  }
1945 
1946  /// Set the CallingConv that should be used for the specified libcall.
1948  LibcallCallingConvs[Call] = CC;
1949  }
1950 
1951  /// Get the CallingConv that should be used for the specified libcall.
1953  return LibcallCallingConvs[Call];
1954  }
1955 
1956 private:
1957  const TargetMachine &TM;
1958 
1959  /// Tells the code generator that the target has multiple (allocatable)
1960  /// condition registers that can be used to store the results of comparisons
1961  /// for use by selects and conditional branches. With multiple condition
1962  /// registers, the code generator will not aggressively sink comparisons into
1963  /// the blocks of their users.
1964  bool HasMultipleConditionRegisters;
1965 
1966  /// Tells the code generator that the target has BitExtract instructions.
1967  /// The code generator will aggressively sink "shift"s into the blocks of
1968  /// their users if the users will generate "and" instructions which can be
1969  /// combined with "shift" to BitExtract instructions.
1970  bool HasExtractBitsInsn;
1971 
1972  /// Tells the code generator to bypass slow divide or remainder
1973  /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
1974  /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
1975  /// div/rem when the operands are positive and less than 256.
1976  DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
1977 
1978  /// Tells the code generator that it shouldn't generate extra flow control
1979  /// instructions and should attempt to combine flow control instructions via
1980  /// predication.
1981  bool JumpIsExpensive;
1982 
1983  /// Whether the target supports or cares about preserving floating point
1984  /// exception behavior.
1985  bool HasFloatingPointExceptions;
1986 
1987  /// This target prefers to use _setjmp to implement llvm.setjmp.
1988  ///
1989  /// Defaults to false.
1990  bool UseUnderscoreSetJmp;
1991 
1992  /// This target prefers to use _longjmp to implement llvm.longjmp.
1993  ///
1994  /// Defaults to false.
1995  bool UseUnderscoreLongJmp;
1996 
1997  /// Information about the contents of the high-bits in boolean values held in
1998  /// a type wider than i1. See getBooleanContents.
1999  BooleanContent BooleanContents;
2000 
2001  /// Information about the contents of the high-bits in boolean values held in
2002  /// a type wider than i1. See getBooleanContents.
2003  BooleanContent BooleanFloatContents;
2004 
2005  /// Information about the contents of the high-bits in boolean vector values
2006  /// when the element type is wider than i1. See getBooleanContents.
2007  BooleanContent BooleanVectorContents;
2008 
2009  /// The target scheduling preference: shortest possible total cycles or lowest
2010  /// register usage.
2011  Sched::Preference SchedPreferenceInfo;
2012 
2013  /// The size, in bytes, of the target's jmp_buf buffers
2014  unsigned JumpBufSize;
2015 
2016  /// The alignment, in bytes, of the target's jmp_buf buffers
2017  unsigned JumpBufAlignment;
2018 
2019  /// The minimum alignment that any argument on the stack needs to have.
2020  unsigned MinStackArgumentAlignment;
2021 
2022  /// The minimum function alignment (used when optimizing for size, and to
2023  /// prevent explicitly provided alignment from leading to incorrect code).
2024  unsigned MinFunctionAlignment;
2025 
2026  /// The preferred function alignment (used when alignment unspecified and
2027  /// optimizing for speed).
2028  unsigned PrefFunctionAlignment;
2029 
2030  /// The preferred loop alignment.
2031  unsigned PrefLoopAlignment;
2032 
2033  /// Size in bits of the maximum atomics size the backend supports.
2034  /// Accesses larger than this will be expanded by AtomicExpandPass.
2035  unsigned MaxAtomicSizeInBitsSupported;
2036 
2037  /// Size in bits of the minimum cmpxchg or ll/sc operation the
2038  /// backend supports.
2039  unsigned MinCmpXchgSizeInBits;
2040 
2041  /// If set to a physical register, this specifies the register that
2042  /// llvm.savestack/llvm.restorestack should save and restore.
2043  unsigned StackPointerRegisterToSaveRestore;
2044 
2045  /// This indicates the default register class to use for each ValueType the
2046  /// target supports natively.
2047  const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
2048  unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
2049  MVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
2050 
2051  /// This indicates the "representative" register class to use for each
2052  /// ValueType the target supports natively. This information is used by the
2053  /// scheduler to track register pressure. By default, the representative
2054  /// register class is the largest legal super-reg register class of the
2055  /// register class of the specified type. e.g. On x86, i8, i16, and i32's
2056  /// representative class would be GR32.
2057  const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE];
2058 
2059  /// This indicates the "cost" of the "representative" register class for each
2060  /// ValueType. The cost is used by the scheduler to approximate register
2061  /// pressure.
2062  uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE];
2063 
2064  /// For any value types we are promoting or expanding, this contains the value
2065  /// type that we are changing to. For Expanded types, this contains one step
2066  /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
2067  /// (e.g. i64 -> i16). For types natively supported by the system, this holds
2068  /// the same type (e.g. i32 -> i32).
2069  MVT TransformToType[MVT::LAST_VALUETYPE];
2070 
2071  /// For each operation and each value type, keep a LegalizeAction that
2072  /// indicates how instruction selection should deal with the operation. Most
2073  /// operations are Legal (aka, supported natively by the target), but
2074  /// operations that are not should be described. Note that operations on
2075  /// non-legal value types are not described here.
2077 
2078  /// For each load extension type and each value type, keep a LegalizeAction
2079  /// that indicates how instruction selection should deal with a load of a
2080  /// specific value type and extension type. Uses 4-bits to store the action
2081  /// for each of the 4 load ext types.
2082  uint16_t LoadExtActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
2083 
2084  /// For each value type pair keep a LegalizeAction that indicates whether a
2085  /// truncating store of a specific value type and truncating type is legal.
2087 
2088  /// For each indexed mode and each value type, keep a pair of LegalizeAction
2089  /// that indicates how instruction selection should deal with the load /
2090  /// store.
2091  ///
2092  /// The first dimension is the value_type for the reference. The second
2093  /// dimension represents the various modes for load store.
2094  uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE];
2095 
2096  /// For each condition code (ISD::CondCode) keep a LegalizeAction that
2097  /// indicates how instruction selection should deal with the condition code.
2098  ///
2099  /// Because each CC action takes up 4 bits, we need to have the array size be
2100  /// large enough to fit all of the value types. This can be done by rounding
2101  /// up the MVT::LAST_VALUETYPE value to the next multiple of 8.
2102  uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE + 7) / 8];
2103 
2104 protected:
2106 
2107 private:
2108  LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const;
2109 
2110  /// Targets can specify ISD nodes that they would like PerformDAGCombine
2111  /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
2112  /// array.
2113  unsigned char
2114  TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
2115 
2116  /// For operations that must be promoted to a specific type, this holds the
2117  /// destination type. This map should be sparse, so don't hold it as an
2118  /// array.
2119  ///
2120  /// Targets add entries to this map with AddPromotedToType(..), clients access
2121  /// this with getTypeToPromoteTo(..).
2122  std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
2123  PromoteToType;
2124 
2125  /// Stores the name each libcall.
2126  const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL];
2127 
2128  /// The ISD::CondCode that should be used to test the result of each of the
2129  /// comparison libcall against zero.
2130  ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
2131 
2132  /// Stores the CallingConv that should be used for each libcall.
2133  CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];
2134 
2135 protected:
2136  /// Return true if the extension represented by \p I is free.
2137  /// \pre \p I is a sign, zero, or fp extension and
2138  /// is[Z|FP]ExtFree of the related types is not true.
2139  virtual bool isExtFreeImpl(const Instruction *I) const { return false; }
2140 
2141  /// Depth that GatherAllAliases should should continue looking for chain
2142  /// dependencies when trying to find a more preferable chain. As an
2143  /// approximation, this should be more than the number of consecutive stores
2144  /// expected to be merged.
2146 
2147  /// \brief Specify maximum number of store instructions per memset call.
2148  ///
2149  /// When lowering \@llvm.memset this field specifies the maximum number of
2150  /// store operations that may be substituted for the call to memset. Targets
2151  /// must set this value based on the cost threshold for that target. Targets
2152  /// should assume that the memset will be done using as many of the largest
2153  /// store operations first, followed by smaller ones, if necessary, per
2154  /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
2155  /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
2156  /// store. This only applies to setting a constant array of a constant size.
2158 
2159  /// Maximum number of stores operations that may be substituted for the call
2160  /// to memset, used for functions with OptSize attribute.
2162 
2163  /// \brief Specify maximum bytes of store instructions per memcpy call.
2164  ///
2165  /// When lowering \@llvm.memcpy this field specifies the maximum number of
2166  /// store operations that may be substituted for a call to memcpy. Targets
2167  /// must set this value based on the cost threshold for that target. Targets
2168  /// should assume that the memcpy will be done using as many of the largest
2169  /// store operations first, followed by smaller ones, if necessary, per
2170  /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
2171  /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
2172  /// and one 1-byte store. This only applies to copying a constant array of
2173  /// constant size.
2175 
2176  /// Maximum number of store operations that may be substituted for a call to
2177  /// memcpy, used for functions with OptSize attribute.
2179 
2180  /// \brief Specify maximum bytes of store instructions per memmove call.
2181  ///
2182  /// When lowering \@llvm.memmove this field specifies the maximum number of
2183  /// store instructions that may be substituted for a call to memmove. Targets
2184  /// must set this value based on the cost threshold for that target. Targets
2185  /// should assume that the memmove will be done using as many of the largest
2186  /// store operations first, followed by smaller ones, if necessary, per
2187  /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
2188  /// with 8-bit alignment would result in nine 1-byte stores. This only
2189  /// applies to copying a constant array of constant size.
2191 
2192  /// Maximum number of store instructions that may be substituted for a call to
2193  /// memmove, used for functions with OptSize attribute.
2195 
2196  /// Tells the code generator that select is more expensive than a branch if
2197  /// the branch is usually predicted right.
2199 
2200  /// MaskAndBranchFoldingIsLegal - Indicates if the target supports folding
2201  /// a mask of a single bit, a compare, and a branch into a single instruction.
2203 
2204  /// \see enableExtLdPromotion.
2206 
2207  /// Return true if the value types that can be represented by the specified
2208  /// register class are all legal.
2209  bool isLegalRC(const TargetRegisterClass *RC) const;
2210 
2211  /// Replace/modify any TargetFrameIndex operands with a targte-dependent
2212  /// sequence of memory operands that is recognized by PrologEpilogInserter.
2214  MachineBasicBlock *MBB) const;
2215 };
2216 
2217 /// This class defines information used to lower LLVM code to legal SelectionDAG
2218 /// operators that the target instruction selector can accept natively.
2219 ///
2220 /// This class also defines callbacks that targets must implement to lower
2221 /// target-specific constructs to SelectionDAG operators.
2223 public:
2224  struct DAGCombinerInfo;
2225 
2226  TargetLowering(const TargetLowering&) = delete;
2227  void operator=(const TargetLowering&) = delete;
2228 
2229  /// NOTE: The TargetMachine owns TLOF.
2230  explicit TargetLowering(const TargetMachine &TM);
2231 
2232  bool isPositionIndependent() const;
2233 
2234  /// Returns true by value, base pointer and offset pointer and addressing mode
2235  /// by reference if the node's address can be legally represented as
2236  /// pre-indexed load / store address.
2237  virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
2238  SDValue &/*Offset*/,
2239  ISD::MemIndexedMode &/*AM*/,
2240  SelectionDAG &/*DAG*/) const {
2241  return false;
2242  }
2243 
2244  /// Returns true by value, base pointer and offset pointer and addressing mode
2245  /// by reference if this node can be combined with a load / store to form a
2246  /// post-indexed load / store.
2247  virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
2248  SDValue &/*Base*/,
2249  SDValue &/*Offset*/,
2250  ISD::MemIndexedMode &/*AM*/,
2251  SelectionDAG &/*DAG*/) const {
2252  return false;
2253  }
2254 
2255  /// Return the entry encoding for a jump table in the current function. The
2256  /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
2257  virtual unsigned getJumpTableEncoding() const;
2258 
2259  virtual const MCExpr *
2261  const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
2262  MCContext &/*Ctx*/) const {
2263  llvm_unreachable("Need to implement this hook if target has custom JTIs");
2264  }
2265 
2266  /// Returns relocation base for the given PIC jumptable.
2268  SelectionDAG &DAG) const;
2269 
2270  /// This returns the relocation base for the given PIC jumptable, the same as
2271  /// getPICJumpTableRelocBase, but as an MCExpr.
2272  virtual const MCExpr *
2274  unsigned JTI, MCContext &Ctx) const;
2275 
2276  /// Return true if folding a constant offset with the given GlobalAddress is
2277  /// legal. It is frequently not legal in PIC relocation models.
2278  virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
2279 
2280  bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
2281  SDValue &Chain) const;
2282 
2283  void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
2284  SDValue &NewRHS, ISD::CondCode &CCCode,
2285  const SDLoc &DL) const;
2286 
2287  /// Returns a pair of (return value, chain).
2288  /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC.
2289  std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
2290  EVT RetVT, ArrayRef<SDValue> Ops,
2291  bool isSigned, const SDLoc &dl,
2292  bool doesNotReturn = false,
2293  bool isReturnValueUsed = true) const;
2294 
2295  /// Check whether parameters to a call that are passed in callee saved
2296  /// registers are the same as from the calling function. This needs to be
2297  /// checked for tail call eligibility.
2299  const uint32_t *CallerPreservedMask,
2300  const SmallVectorImpl<CCValAssign> &ArgLocs,
2301  const SmallVectorImpl<SDValue> &OutVals) const;
2302 
2303  //===--------------------------------------------------------------------===//
2304  // TargetLowering Optimization Methods
2305  //
2306 
2307  /// A convenience struct that encapsulates a DAG, and two SDValues for
2308  /// returning information from TargetLowering to its clients that want to
2309  /// combine.
2312  bool LegalTys;
2313  bool LegalOps;
2316 
2318  bool LT, bool LO) :
2319  DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
2320 
2321  bool LegalTypes() const { return LegalTys; }
2322  bool LegalOperations() const { return LegalOps; }
2323 
2325  Old = O;
2326  New = N;
2327  return true;
2328  }
2329 
2330  /// Check to see if the specified operand of the specified instruction is a
2331  /// constant integer. If so, check to see if there are any bits set in the
2332  /// constant that are not demanded. If so, shrink the constant and return
2333  /// true.
2334  bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded);
2335 
2336  /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. This
2337  /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
2338  /// generalized for targets with other types of implicit widening casts.
2339  bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
2340  const SDLoc &dl);
2341 
2342  /// Helper for SimplifyDemandedBits that can simplify an operation with
2343  /// multiple uses. This function uses TLI.SimplifyDemandedBits to
2344  /// simplify Operand \p OpIdx of \p User and then updated \p User with
2345  /// the simplified version. No other uses of \p OpIdx are updated.
2346  /// If \p User is the only user of \p OpIdx, this function behaves exactly
2347  /// like TLI.SimplifyDemandedBits except that it also updates the DAG by
2348  /// calling DCI.CommitTargetLoweringOpt.
2349  bool SimplifyDemandedBits(SDNode *User, unsigned OpIdx,
2350  const APInt &Demanded, DAGCombinerInfo &DCI);
2351  };
2352 
2353  /// Look at Op. At this point, we know that only the DemandedMask bits of the
2354  /// result of Op are ever used downstream. If we can use this information to
2355  /// simplify Op, create a new simplified DAG node and return true, returning
2356  /// the original and new nodes in Old and New. Otherwise, analyze the
2357  /// expression and return a mask of KnownOne and KnownZero bits for the
2358  /// expression (used to simplify the caller). The KnownZero/One bits may only
2359  /// be accurate for those bits in the DemandedMask.
2360  /// \p AssumeSingleUse When this paramater is true, this function will
2361  /// attempt to simplify \p Op even if there are multiple uses.
2362  /// Callers are responsible for correctly updating the DAG based on the
2363  /// results of this function, because simply replacing replacing TLO.Old
2364  /// with TLO.New will be incorrect when this paramater is true and TLO.Old
2365  /// has multiple uses.
2366  bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
2367  APInt &KnownZero, APInt &KnownOne,
2368  TargetLoweringOpt &TLO,
2369  unsigned Depth = 0,
2370  bool AssumeSingleUse = false) const;
2371 
2372  /// Determine which of the bits specified in Mask are known to be either zero
2373  /// or one and return them in the KnownZero/KnownOne bitsets.
2374  virtual void computeKnownBitsForTargetNode(const SDValue Op,
2375  APInt &KnownZero,
2376  APInt &KnownOne,
2377  const SelectionDAG &DAG,
2378  unsigned Depth = 0) const;
2379 
2380  /// This method can be implemented by targets that want to expose additional
2381  /// information about sign bits to the DAG Combiner.
2382  virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
2383  const SelectionDAG &DAG,
2384  unsigned Depth = 0) const;
2385 
2387  void *DC; // The DAG Combiner object.
2390 
2391  public:
2393 
2394  DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc)
2395  : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
2396 
2397  bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
2400  return Level == AfterLegalizeDAG;
2401  }
2403  bool isCalledByLegalizer() const { return CalledByLegalizer; }
2404 
2405  void AddToWorklist(SDNode *N);
2406  SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo = true);
2407  SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
2408  SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
2409 
2410  void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
2411  };
2412 
2413  /// Return if the N is a constant or constant vector equal to the true value
2414  /// from getBooleanContents().
2415  bool isConstTrueVal(const SDNode *N) const;
2416 
2417  /// Return if the N is a constant or constant vector equal to the false value
2418  /// from getBooleanContents().
2419  bool isConstFalseVal(const SDNode *N) const;
2420 
2421  /// Return a constant of type VT that contains a true value that respects
2422  /// getBooleanContents()
2423  SDValue getConstTrueVal(SelectionDAG &DAG, EVT VT, const SDLoc &DL) const;
2424 
2425  /// Return if \p N is a True value when extended to \p VT.
2426  bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool Signed) const;
2427 
2428  /// Try to simplify a setcc built with the specified operands and cc. If it is
2429  /// unable to simplify it, return a null SDValue.
2431  bool foldBooleans, DAGCombinerInfo &DCI,
2432  const SDLoc &dl) const;
2433 
2434  /// Returns true (and the GlobalValue and the offset) if the node is a
2435  /// GlobalAddress + offset.
2436  virtual bool
2437  isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
2438 
2439  /// This method will be invoked for all target nodes and for any
2440  /// target-independent nodes that the target has registered with invoke it
2441  /// for.
2442  ///
2443  /// The semantics are as follows:
2444  /// Return Value:
2445  /// SDValue.Val == 0 - No change was made
2446  /// SDValue.Val == N - N was replaced, is dead, and is already handled.
2447  /// otherwise - N should be replaced by the returned Operand.
2448  ///
2449  /// In addition, methods provided by DAGCombinerInfo may be used to perform
2450  /// more complex transformations.
2451  ///
2452  virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
2453 
2454  /// Return true if it is profitable to move a following shift through this
2455  // node, adjusting any immediate operands as necessary to preserve semantics.
2456  // This transformation may not be desirable if it disrupts a particularly
2457  // auspicious target-specific tree (e.g. bitfield extraction in AArch64).
2458  // By default, it returns true.
2459  virtual bool isDesirableToCommuteWithShift(const SDNode *N /*Op*/) const {
2460  return true;
2461  }
2462 
2463  /// Return true if the target has native support for the specified value type
2464  /// and it is 'desirable' to use the type for the given node type. e.g. On x86
2465  /// i16 is legal, but undesirable since i16 instruction encodings are longer
2466  /// and some i16 instructions are slow.
2467  virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
2468  // By default, assume all legal types are desirable.
2469  return isTypeLegal(VT);
2470  }
2471 
2472  /// Return true if it is profitable for dag combiner to transform a floating
2473  /// point op of specified opcode to a equivalent op of an integer
2474  /// type. e.g. f32 load -> i32 load can be profitable on ARM.
2475  virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
2476  EVT /*VT*/) const {
2477  return false;
2478  }
2479 
2480  /// This method query the target whether it is beneficial for dag combiner to
2481  /// promote the specified node. If true, it should return the desired
2482  /// promotion type by reference.
2483  virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
2484  return false;
2485  }
2486 
2487  /// Return true if the target supports swifterror attribute. It optimizes
2488  /// loads and stores to reading and writing a specific register.
2489  virtual bool supportSwiftError() const {
2490  return false;
2491  }
2492 
2493  /// Return true if the target supports that a subset of CSRs for the given
2494  /// machine function is handled explicitly via copies.
2495  virtual bool supportSplitCSR(MachineFunction *MF) const {
2496  return false;
2497  }
2498 
2499  /// Return true if the MachineFunction contains a COPY which would imply
2500  /// HasCopyImplyingStackAdjustment.
2502  return false;
2503  }
2504 
2505  /// Perform necessary initialization to handle a subset of CSRs explicitly
2506  /// via copies. This function is called at the beginning of instruction
2507  /// selection.
2508  virtual void initializeSplitCSR(MachineBasicBlock *Entry) const {
2509  llvm_unreachable("Not Implemented");
2510  }
2511 
2512  /// Insert explicit copies in entry and exit blocks. We copy a subset of
2513  /// CSRs to virtual registers in the entry block, and copy them back to
2514  /// physical registers in the exit blocks. This function is called at the end
2515  /// of instruction selection.
2516  virtual void insertCopiesSplitCSR(
2517  MachineBasicBlock *Entry,
2518  const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
2519  llvm_unreachable("Not Implemented");
2520  }
2521 
2522  //===--------------------------------------------------------------------===//
2523  // Lowering methods - These methods must be implemented by targets so that
2524  // the SelectionDAGBuilder code knows how to lower these.
2525  //
2526 
2527  /// This hook must be implemented to lower the incoming (formal) arguments,
2528  /// described by the Ins array, into the specified DAG. The implementation
2529  /// should fill in the InVals array with legal-type argument values, and
2530  /// return the resulting token chain value.
2531  ///
2533  SDValue /*Chain*/, CallingConv::ID /*CallConv*/, bool /*isVarArg*/,
2534  const SmallVectorImpl<ISD::InputArg> & /*Ins*/, const SDLoc & /*dl*/,
2535  SelectionDAG & /*DAG*/, SmallVectorImpl<SDValue> & /*InVals*/) const {
2536  llvm_unreachable("Not Implemented");
2537  }
2538 
2539  struct ArgListEntry {
2542  bool isSExt : 1;
2543  bool isZExt : 1;
2544  bool isInReg : 1;
2545  bool isSRet : 1;
2546  bool isNest : 1;
2547  bool isByVal : 1;
2548  bool isInAlloca : 1;
2549  bool isReturned : 1;
2550  bool isSwiftSelf : 1;
2551  bool isSwiftError : 1;
2552  uint16_t Alignment;
2553 
2557  Alignment(0) {}
2558 
2559  void setAttributes(ImmutableCallSite *CS, unsigned AttrIdx);
2560  };
2561  typedef std::vector<ArgListEntry> ArgListTy;
2562 
2563  /// This structure contains all information that is necessary for lowering
2564  /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder
2565  /// needs to lower a call, and targets will see this struct in their LowerCall
2566  /// implementation.
2570  bool RetSExt : 1;
2571  bool RetZExt : 1;
2572  bool IsVarArg : 1;
2573  bool IsInReg : 1;
2574  bool DoesNotReturn : 1;
2576  bool IsConvergent : 1;
2577 
2578  // IsTailCall should be modified by implementations of
2579  // TargetLowering::LowerCall that perform tail call conversions.
2581 
2582  unsigned NumFixedArgs;
2594 
2596  : RetTy(nullptr), RetSExt(false), RetZExt(false), IsVarArg(false),
2599  CallConv(CallingConv::C), DAG(DAG), CS(nullptr), IsPatchPoint(false) {
2600  }
2601 
2603  DL = dl;
2604  return *this;
2605  }
2606 
2608  Chain = InChain;
2609  return *this;
2610  }
2611 
2613  SDValue Target, ArgListTy &&ArgsList) {
2614  RetTy = ResultType;
2615  Callee = Target;
2616  CallConv = CC;
2617  NumFixedArgs = Args.size();
2618  Args = std::move(ArgsList);
2619  return *this;
2620  }
2621 
2623  SDValue Target, ArgListTy &&ArgsList,
2625  RetTy = ResultType;
2626 
2627  IsInReg = Call.paramHasAttr(0, Attribute::InReg);
2628  DoesNotReturn =
2629  Call.doesNotReturn() ||
2630  (!Call.isInvoke() &&
2631  isa<UnreachableInst>(Call.getInstruction()->getNextNode()));
2632  IsVarArg = FTy->isVarArg();
2634  RetSExt = Call.paramHasAttr(0, Attribute::SExt);
2635  RetZExt = Call.paramHasAttr(0, Attribute::ZExt);
2636 
2637  Callee = Target;
2638 
2639  CallConv = Call.getCallingConv();
2640  NumFixedArgs = FTy->getNumParams();
2641  Args = std::move(ArgsList);
2642 
2643  CS = &Call;
2644 
2645  return *this;
2646  }
2647 
2649  IsInReg = Value;
2650  return *this;
2651  }
2652 
2654  DoesNotReturn = Value;
2655  return *this;
2656  }
2657 
2659  IsVarArg = Value;
2660  return *this;
2661  }
2662 
2664  IsTailCall = Value;
2665  return *this;
2666  }
2667 
2670  return *this;
2671  }
2672 
2674  IsConvergent = Value;
2675  return *this;
2676  }
2677 
2679  RetSExt = Value;
2680  return *this;
2681  }
2682 
2684  RetZExt = Value;
2685  return *this;
2686  }
2687 
2689  IsPatchPoint = Value;
2690  return *this;
2691  }
2692 
2694  return Args;
2695  }
2696  };
2697 
2698  /// This function lowers an abstract call to a function into an actual call.
2699  /// This returns a pair of operands. The first element is the return value
2700  /// for the function (if RetTy is not VoidTy). The second element is the
2701  /// outgoing token chain. It calls LowerCall to do the actual lowering.
2702  std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
2703 
2704  /// This hook must be implemented to lower calls into the specified
2705  /// DAG. The outgoing arguments to the call are described by the Outs array,
2706  /// and the values to be returned by the call are described by the Ins
2707  /// array. The implementation should fill in the InVals array with legal-type
2708  /// return values from the call, and return the resulting token chain value.
2709  virtual SDValue
2711  SmallVectorImpl<SDValue> &/*InVals*/) const {
2712  llvm_unreachable("Not Implemented");
2713  }
2714 
2715  /// Target-specific cleanup for formal ByVal parameters.
2716  virtual void HandleByVal(CCState *, unsigned &, unsigned) const {}
2717 
2718  /// This hook should be implemented to check whether the return values
2719  /// described by the Outs array can fit into the return registers. If false
2720  /// is returned, an sret-demotion is performed.
2721  virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
2722  MachineFunction &/*MF*/, bool /*isVarArg*/,
2723  const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
2724  LLVMContext &/*Context*/) const
2725  {
2726  // Return true by default to get preexisting behavior.
2727  return true;
2728  }
2729 
2730  /// This hook must be implemented to lower outgoing return values, described
2731  /// by the Outs array, into the specified DAG. The implementation should
2732  /// return the resulting token chain value.
2733  virtual SDValue LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
2734  bool /*isVarArg*/,
2735  const SmallVectorImpl<ISD::OutputArg> & /*Outs*/,
2736  const SmallVectorImpl<SDValue> & /*OutVals*/,
2737  const SDLoc & /*dl*/,
2738  SelectionDAG & /*DAG*/) const {
2739  llvm_unreachable("Not Implemented");
2740  }
2741 
2742  /// Return true if result of the specified node is used by a return node
2743  /// only. It also compute and return the input chain for the tail call.
2744  ///
2745  /// This is used to determine whether it is possible to codegen a libcall as
2746  /// tail call at legalization time.
2747  virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const {
2748  return false;
2749  }
2750 
2751  /// Return true if the target may be able emit the call instruction as a tail
2752  /// call. This is used by optimization passes to determine if it's profitable
2753  /// to duplicate return instructions to enable tailcall optimization.
2754  virtual bool mayBeEmittedAsTailCall(CallInst *) const {
2755  return false;
2756  }
2757 
2758  /// Return the builtin name for the __builtin___clear_cache intrinsic
2759  /// Default is to invoke the clear cache library call
2760  virtual const char * getClearCacheBuiltinName() const {
2761  return "__clear_cache";
2762  }
2763 
2764  /// Return the register ID of the name passed in. Used by named register
2765  /// global variables extension. There is no target-independent behaviour
2766  /// so the default action is to bail.
2767  virtual unsigned getRegisterByName(const char* RegName, EVT VT,
2768  SelectionDAG &DAG) const {
2769  report_fatal_error("Named registers not implemented for this target");
2770  }
2771 
2772  /// Return the type that should be used to zero or sign extend a
2773  /// zeroext/signext integer return value. FIXME: Some C calling conventions
2774  /// require the return type to be promoted, but this is not true all the time,
2775  /// e.g. i1/i8/i16 on x86/x86_64. It is also not necessary for non-C calling
2776  /// conventions. The frontend should handle this and include all of the
2777  /// necessary information.
2779  ISD::NodeType /*ExtendKind*/) const {
2780  EVT MinVT = getRegisterType(Context, MVT::i32);
2781  return VT.bitsLT(MinVT) ? MinVT : VT;
2782  }
2783 
2784  /// For some targets, an LLVM struct type must be broken down into multiple
2785  /// simple types, but the calling convention specifies that the entire struct
2786  /// must be passed in a block of consecutive registers.
2787  virtual bool
2789  bool isVarArg) const {
2790  return false;
2791  }
2792 
2793  /// Returns a 0 terminated array of registers that can be safely used as
2794  /// scratch registers.
2795  virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const {
2796  return nullptr;
2797  }
2798 
2799  /// This callback is used to prepare for a volatile or atomic load.
2800  /// It takes a chain node as input and returns the chain for the load itself.
2801  ///
2802  /// Having a callback like this is necessary for targets like SystemZ,
2803  /// which allows a CPU to reuse the result of a previous load indefinitely,
2804  /// even if a cache-coherent store is performed by another CPU. The default
2805  /// implementation does nothing.
2807  SelectionDAG &DAG) const {
2808  return Chain;
2809  }
2810 
2811  /// This callback is invoked by the type legalizer to legalize nodes with an
2812  /// illegal operand type but legal result types. It replaces the
2813  /// LowerOperation callback in the type Legalizer. The reason we can not do
2814  /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to
2815  /// use this callback.
2816  ///
2817  /// TODO: Consider merging with ReplaceNodeResults.
2818  ///
2819  /// The target places new result values for the node in Results (their number
2820  /// and types must exactly match those of the original return values of
2821  /// the node), or leaves Results empty, which indicates that the node is not
2822  /// to be custom lowered after all.
2823  /// The default implementation calls LowerOperation.
2824  virtual void LowerOperationWrapper(SDNode *N,
2826  SelectionDAG &DAG) const;
2827 
2828  /// This callback is invoked for operations that are unsupported by the
2829  /// target, which are registered to use 'custom' lowering, and whose defined
2830  /// values are all legal. If the target has no operations that require custom
2831  /// lowering, it need not implement this. The default implementation of this
2832  /// aborts.
2833  virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
2834 
2835  /// This callback is invoked when a node result type is illegal for the
2836  /// target, and the operation was registered to use 'custom' lowering for that
2837  /// result type. The target places new result values for the node in Results
2838  /// (their number and types must exactly match those of the original return
2839  /// values of the node), or leaves Results empty, which indicates that the
2840  /// node is not to be custom lowered after all.
2841  ///
2842  /// If the target has no operations that require custom lowering, it need not
2843  /// implement this. The default implementation aborts.
2844  virtual void ReplaceNodeResults(SDNode * /*N*/,
2845  SmallVectorImpl<SDValue> &/*Results*/,
2846  SelectionDAG &/*DAG*/) const {
2847  llvm_unreachable("ReplaceNodeResults not implemented for this target!");
2848  }
2849 
2850  /// This method returns the name of a target specific DAG node.
2851  virtual const char *getTargetNodeName(unsigned Opcode) const;
2852 
2853  /// This method returns a target specific FastISel object, or null if the
2854  /// target does not support "fast" ISel.
2856  const TargetLibraryInfo *) const {
2857  return nullptr;
2858  }
2859 
2860 
2862  SelectionDAG &DAG) const;
2863 
2864  //===--------------------------------------------------------------------===//
2865  // Inline Asm Support hooks
2866  //
2867 
2868  /// This hook allows the target to expand an inline asm call to be explicit
2869  /// llvm code if it wants to. This is useful for turning simple inline asms
2870  /// into LLVM intrinsics, which gives the compiler more information about the
2871  /// behavior of the code.
2872  virtual bool ExpandInlineAsm(CallInst *) const {
2873  return false;
2874  }
2875 
2877  C_Register, // Constraint represents specific register(s).
2878  C_RegisterClass, // Constraint represents any of register(s) in class.
2879  C_Memory, // Memory constraint.
2880  C_Other, // Something else.
2881  C_Unknown // Unsupported constraint.
2882  };
2883 
2885  // Generic weights.
2886  CW_Invalid = -1, // No match.
2887  CW_Okay = 0, // Acceptable.
2888  CW_Good = 1, // Good weight.
2889  CW_Better = 2, // Better weight.
2890  CW_Best = 3, // Best weight.
2891 
2892  // Well-known weights.
2893  CW_SpecificReg = CW_Okay, // Specific register operands.
2894  CW_Register = CW_Good, // Register operands.
2895  CW_Memory = CW_Better, // Memory operands.
2896  CW_Constant = CW_Best, // Constant operand.
2897  CW_Default = CW_Okay // Default or don't know type.
2898  };
2899 
2900  /// This contains information for each constraint that we are lowering.
2902  /// This contains the actual string for the code, like "m". TargetLowering
2903  /// picks the 'best' code from ConstraintInfo::Codes that most closely
2904  /// matches the operand.
2905  std::string ConstraintCode;
2906 
2907  /// Information about the constraint code, e.g. Register, RegisterClass,
2908  /// Memory, Other, Unknown.
2910 
2911  /// If this is the result output operand or a clobber, this is null,
2912  /// otherwise it is the incoming operand to the CallInst. This gets
2913  /// modified as the asm is processed.
2915 
2916  /// The ValueType for the operand value.
2918 
2919  /// Return true of this is an input operand that is a matching constraint
2920  /// like "4".
2921  bool isMatchingInputConstraint() const;
2922 
2923  /// If this is an input matching constraint, this method returns the output
2924  /// operand it matches.
2925  unsigned getMatchedOperand() const;
2926 
2927  /// Copy constructor for copying from a ConstraintInfo.
2929  : InlineAsm::ConstraintInfo(std::move(Info)),
2931  ConstraintVT(MVT::Other) {}
2932  };
2933 
2934  typedef std::vector<AsmOperandInfo> AsmOperandInfoVector;
2935 
2936  /// Split up the constraint string from the inline assembly value into the
2937  /// specific constraints and their prefixes, and also tie in the associated
2938  /// operand values. If this returns an empty vector, and if the constraint
2939  /// string itself isn't empty, there was an error parsing.
2941  const TargetRegisterInfo *TRI,
2942  ImmutableCallSite CS) const;
2943 
2944  /// Examine constraint type and operand type and determine a weight value.
2945  /// The operand object must already have been set up with the operand type.
2947  AsmOperandInfo &info, int maIndex) const;
2948 
2949  /// Examine constraint string and operand type and determine a weight value.
2950  /// The operand object must already have been set up with the operand type.
2952  AsmOperandInfo &info, const char *constraint) const;
2953 
2954  /// Determines the constraint code and constraint type to use for the specific
2955  /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
2956  /// If the actual operand being passed in is available, it can be passed in as
2957  /// Op, otherwise an empty SDValue can be passed.
2958  virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
2959  SDValue Op,
2960  SelectionDAG *DAG = nullptr) const;
2961 
2962  /// Given a constraint, return the type of constraint it is for this target.
2963  virtual ConstraintType getConstraintType(StringRef Constraint) const;
2964 
2965  /// Given a physical register constraint (e.g. {edx}), return the register
2966  /// number and the register class for the register.
2967  ///
2968  /// Given a register class constraint, like 'r', if this corresponds directly
2969  /// to an LLVM register class, return a register of 0 and the register class
2970  /// pointer.
2971  ///
2972  /// This should only be used for C_Register constraints. On error, this
2973  /// returns a register number of 0 and a null register class pointer.
2974  virtual std::pair<unsigned, const TargetRegisterClass *>
2976  StringRef Constraint, MVT VT) const;
2977 
2978  virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const {
2979  if (ConstraintCode == "i")
2980  return InlineAsm::Constraint_i;
2981  else if (ConstraintCode == "m")
2982  return InlineAsm::Constraint_m;
2984  }
2985 
2986  /// Try to replace an X constraint, which matches anything, with another that
2987  /// has more specific requirements based on the type of the corresponding
2988  /// operand. This returns null if there is no replacement to make.
2989  virtual const char *LowerXConstraint(EVT ConstraintVT) const;
2990 
2991  /// Lower the specified operand into the Ops vector. If it is invalid, don't
2992  /// add anything to Ops.
2993  virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
2994  std::vector<SDValue> &Ops,
2995  SelectionDAG &DAG) const;
2996 
2997  //===--------------------------------------------------------------------===//
2998  // Div utility functions
2999  //
3000  SDValue BuildSDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
3001  bool IsAfterLegalization,
3002  std::vector<SDNode *> *Created) const;
3003  SDValue BuildUDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
3004  bool IsAfterLegalization,
3005  std::vector<SDNode *> *Created) const;
3006 
3007  /// Targets may override this function to provide custom SDIV lowering for
3008  /// power-of-2 denominators. If the target returns an empty SDValue, LLVM
3009  /// assumes SDIV is expensive and replaces it with a series of other integer
3010  /// operations.
3011  virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor,
3012  SelectionDAG &DAG,
3013  std::vector<SDNode *> *Created) const;
3014 
3015  /// Indicate whether this target prefers to combine FDIVs with the same
3016  /// divisor. If the transform should never be done, return zero. If the
3017  /// transform should be done, return the minimum number of divisor uses
3018  /// that must exist.
3019  virtual unsigned combineRepeatedFPDivisors() const {
3020  return 0;
3021  }
3022 
3023  /// Hooks for building estimates in place of slower divisions and square
3024  /// roots.
3025 
3026  /// Return either a square root or its reciprocal estimate value for the input
3027  /// operand.
3028  /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
3029  /// 'Enabled' as set by a potential default override attribute.
3030  /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
3031  /// refinement iterations required to generate a sufficient (though not
3032  /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
3033  /// The boolean UseOneConstNR output is used to select a Newton-Raphson
3034  /// algorithm implementation that uses either one or two constants.
3035  /// The boolean Reciprocal is used to select whether the estimate is for the
3036  /// square root of the input operand or the reciprocal of its square root.
3037  /// A target may choose to implement its own refinement within this function.
3038  /// If that's true, then return '0' as the number of RefinementSteps to avoid
3039  /// any further refinement of the estimate.
3040  /// An empty SDValue return means no estimate sequence can be created.
3042  int Enabled, int &RefinementSteps,
3043  bool &UseOneConstNR, bool Reciprocal) const {
3044  return SDValue();
3045  }
3046 
3047  /// Return a reciprocal estimate value for the input operand.
3048  /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
3049  /// 'Enabled' as set by a potential default override attribute.
3050  /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
3051  /// refinement iterations required to generate a sufficient (though not
3052  /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
3053  /// A target may choose to implement its own refinement within this function.
3054  /// If that's true, then return '0' as the number of RefinementSteps to avoid
3055  /// any further refinement of the estimate.
3056  /// An empty SDValue return means no estimate sequence can be created.
3058  int Enabled, int &RefinementSteps) const {
3059  return SDValue();
3060  }
3061 
3062  //===--------------------------------------------------------------------===//
3063  // Legalization utility functions
3064  //
3065 
3066  /// Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes,
3067  /// respectively, each computing an n/2-bit part of the result.
3068  /// \param Result A vector that will be filled with the parts of the result
3069  /// in little-endian order.
3070  /// \param LL Low bits of the LHS of the MUL. You can use this parameter
3071  /// if you want to control how low bits are extracted from the LHS.
3072  /// \param LH High bits of the LHS of the MUL. See LL for meaning.
3073  /// \param RL Low bits of the RHS of the MUL. See LL for meaning
3074  /// \param RH High bits of the RHS of the MUL. See LL for meaning.
3075  /// \returns true if the node has been expanded, false if it has not
3076  bool expandMUL_LOHI(unsigned Opcode, EVT VT, SDLoc dl, SDValue LHS,
3077  SDValue RHS, SmallVectorImpl<SDValue> &Result, EVT HiLoVT,
3079  SDValue LL = SDValue(), SDValue LH = SDValue(),
3080  SDValue RL = SDValue(), SDValue RH = SDValue()) const;
3081 
3082  /// Expand a MUL into two nodes. One that computes the high bits of
3083  /// the result and one that computes the low bits.
3084  /// \param HiLoVT The value type to use for the Lo and Hi nodes.
3085  /// \param LL Low bits of the LHS of the MUL. You can use this parameter
3086  /// if you want to control how low bits are extracted from the LHS.
3087  /// \param LH High bits of the LHS of the MUL. See LL for meaning.
3088  /// \param RL Low bits of the RHS of the MUL. See LL for meaning
3089  /// \param RH High bits of the RHS of the MUL. See LL for meaning.
3090  /// \returns true if the node has been expanded. false if it has not
3091  bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
3092  SelectionDAG &DAG, MulExpansionKind Kind,
3093  SDValue LL = SDValue(), SDValue LH = SDValue(),
3094  SDValue RL = SDValue(), SDValue RH = SDValue()) const;
3095 
3096  /// Expand float(f32) to SINT(i64) conversion
3097  /// \param N Node to expand
3098  /// \param Result output after conversion
3099  /// \returns True, if the expansion was successful, false otherwise
3100  bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
3101 
3102  /// Turn load of vector type into a load of the individual elements.
3103  /// \param LD load to expand
3104  /// \returns MERGE_VALUEs of the scalar loads with their chains.
3106 
3107  // Turn a store of a vector type into stores of the individual elements.
3108  /// \param ST Store with a vector value type
3109  /// \returns MERGE_VALUs of the individual store chains.
3111 
3112  /// Expands an unaligned load to 2 half-size loads for an integer, and
3113  /// possibly more for vectors.
3114  std::pair<SDValue, SDValue> expandUnalignedLoad(LoadSDNode *LD,
3115  SelectionDAG &DAG) const;
3116 
3117  /// Expands an unaligned store to 2 half-size stores for integer values, and
3118  /// possibly more for vectors.
3120 
3121  /// Increments memory address \p Addr according to the type of the value
3122  /// \p DataVT that should be stored. If the data is stored in compressed
3123  /// form, the memory address should be incremented according to the number of
3124  /// the stored elements. This number is equal to the number of '1's bits
3125  /// in the \p Mask.
3126  /// \p DataVT is a vector type. \p Mask is a vector value.
3127  /// \p DataVT and \p Mask have the same number of vector elements.
3129  EVT DataVT, SelectionDAG &DAG,
3130  bool IsCompressedMemory) const;
3131 
3132  /// Get a pointer to vector element \p Idx located in memory for a vector of
3133  /// type \p VecVT starting at a base address of \p VecPtr. If \p Idx is out of
3134  /// bounds the returned pointer is unspecified, but will be within the vector
3135  /// bounds.
3137  SDValue Idx) const;
3138 
3139  //===--------------------------------------------------------------------===//
3140  // Instruction Emitting Hooks
3141  //
3142 
3143  /// This method should be implemented by targets that mark instructions with
3144  /// the 'usesCustomInserter' flag. These instructions are special in various
3145  /// ways, which require special support to insert. The specified MachineInstr
3146  /// is created but not inserted into any basic blocks, and this method is
3147  /// called to expand it into a sequence of instructions, potentially also
3148  /// creating new basic blocks and control flow.
3149  /// As long as the returned basic block is different (i.e., we created a new
3150  /// one), the custom inserter is free to modify the rest of \p MBB.
3151  virtual MachineBasicBlock *
3153 
3154  /// This method should be implemented by targets that mark instructions with
3155  /// the 'hasPostISelHook' flag. These instructions must be adjusted after
3156  /// instruction selection by target hooks. e.g. To fill in optional defs for
3157  /// ARM 's' setting instructions.
3159  SDNode *Node) const;
3160 
3161  /// If this function returns true, SelectionDAGBuilder emits a
3162  /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
3163  virtual bool useLoadStackGuardNode() const {
3164  return false;
3165  }
3166 
3167  /// Lower TLS global address SDNode for target independent emulated TLS model.
3169  SelectionDAG &DAG) const;
3170 
3171  // seteq(x, 0) -> truncate(srl(ctlz(zext(x)), log2(#bits)))
3172  // If we're comparing for equality to zero and isCtlzFast is true, expose the
3173  // fact that this can be implemented as a ctlz/srl pair, so that the dag
3174  // combiner can fold the new nodes.
3176 
3177 private:
3178  SDValue simplifySetCCWithAnd(EVT VT, SDValue N0, SDValue N1,
3179  ISD::CondCode Cond, DAGCombinerInfo &DCI,
3180  const SDLoc &DL) const;
3181 };
3182 
3183 /// Given an LLVM IR type and return type attributes, compute the return value
3184 /// EVTs and flags, and optionally also the offsets, if the return value is
3185 /// being lowered to memory.
3186 void GetReturnInfo(Type *ReturnType, AttributeSet attr,
3187  SmallVectorImpl<ISD::OutputArg> &Outs,
3188  const TargetLowering &TLI, const DataLayout &DL);
3189 
3190 } // end namespace llvm
3191 
3192 #endif // LLVM_TARGET_TARGETLOWERING_H
virtual bool enableAggressiveFMAFusion(EVT VT) const
Return true if target always beneficiates from combining into FMA for a given value type...
virtual bool hasBitPreservingFPLogic(EVT VT) const
Return true if it is safe to transform an integer-domain bitwise operation into the equivalent floati...
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
virtual bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const
Lower an interleaved store to target specific intrinsics.
static MVT getIntegerVT(unsigned BitWidth)
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:762
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:102
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:241
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
virtual bool isCtlzFast() const
Return true if ctlz instruction is fast.
bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
LLVMContext & Context
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)
SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const
Atomic ordering constants.
virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const
This callback is used to prepare for a volatile or atomic load.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:274
virtual bool hasCopyImplyingStackAdjustment(MachineFunction *MF) const
Return true if the MachineFunction contains a COPY which would imply HasCopyImplyingStackAdjustment.
unsigned getMaxStoresPerMemcpy(bool OptSize) const
Get maximum # of store operations permitted for llvm.memcpy.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
unsigned getPrefFunctionAlignment() const
Return the preferred function alignment.
virtual MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
unsigned getNumRegisters(LLVMContext &Context, EVT VT) const
Return the number of registers that this ValueType will eventually require.
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual bool storeOfVectorConstantIsCheap(EVT MemVT, unsigned NumElem, unsigned AddrSpace) const
Return true if it is expected to be cheaper to do a store of a non-zero vector constant with the give...
virtual bool hasAndNotCompare(SDValue Y) const
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) != Y —> (~X & Y) !=...
A Module instance is used to store all the information related to an LLVM module. ...
Definition: Module.h:52
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:137
unsigned getMinCmpXchgSizeInBits() const
Returns the size of the smallest cmpxchg or ll/sc instruction the backend supports.
bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded)
Check to see if the specified operand of the specified instruction is a constant integer.
const TargetMachine & getTargetMachine() const
an instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
Definition: Instructions.h:504
virtual void insertCopiesSplitCSR(MachineBasicBlock *Entry, const SmallVectorImpl< MachineBasicBlock * > &Exits) const
Insert explicit copies in entry and exit blocks.
virtual bool isCheapToSpeculateCttz() const
Return true if it is cheap to speculate a call to intrinsic cttz.
void setJumpBufAlignment(unsigned Align)
Set the target's required jmp_buf buffer alignment (in bytes); default is 0.
SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const
bool isConstTrueVal(const SDNode *N) const
Return if the N is a constant or constant vector equal to the true value from getBooleanContents().
bool isExtended() const
isExtended - Test if the given EVT is extended (as opposed to being simple).
Definition: ValueTypes.h:113
virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const
Return true if it is cheaper to split the store of a merged int val from a pair of smaller values int...
This class represents a function call, abstracting a target machine's calling convention.
MVT getSimpleValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the MVT corresponding to this LLVM type. See getValueType.
bool usesUnderscoreSetJmp() const
Determine if we should use _setjmp or setjmp to implement llvm.setjmp.
virtual unsigned getMaxSupportedInterleaveFactor() const
Get the maximum supported factor for interleaved memory accesses.
void setHasFloatingPointExceptions(bool FPExceptions=true)
Tells the code generator that this target supports floating point exceptions and cares about preservi...
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
virtual bool getPreIndexedAddressParts(SDNode *, SDValue &, SDValue &, ISD::MemIndexedMode &, SelectionDAG &) const
Returns true by value, base pointer and offset pointer and addressing mode by reference if the node's...
static ISD::NodeType getExtendForContent(BooleanContent Content)
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Function Alias Analysis Results
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
Type * getTypeForEVT(LLVMContext &Context) const
getTypeForEVT - This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:204
unsigned getSizeInBits() const
This instruction constructs a fixed permutation of two input vectors.
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg) const
For some targets, an LLVM struct type must be broken down into multiple simple types, but the calling convention specifies that the entire struct must be passed in a block of consecutive registers.
virtual bool isLegalICmpImmediate(int64_t) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
CallLoweringInfo & setNoReturn(bool Value=true)
const_iterator begin(StringRef path)
Get begin iterator over path.
Definition: Path.cpp:233
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, unsigned Align=1, bool *=nullptr) const
Determine if the target supports unaligned memory accesses.
An instruction for reading from memory.
Definition: Instructions.h:164
bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const
Expand float(f32) to SINT(i64) conversion.
virtual bool shouldExpandBuildVectorWithShuffles(EVT, unsigned DefinedValues) const
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:669
virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT) const
Return true if the following transform is beneficial: (store (y (conv x)), y*)) -> (store x...
virtual ISD::NodeType getExtendForAtomicOps() const
Returns how the platform's atomic operations are extended (ZERO_EXTEND, SIGN_EXTEND, or ANY_EXTEND).
virtual bool GetAddrModeArguments(IntrinsicInst *, SmallVectorImpl< Value * > &, Type *&, unsigned AddrSpace=0) const
CodeGenPrepare sinks address calculations into the same BB as Load/Store instructions reading the add...
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it...
virtual bool isFPExtFree(EVT VT) const
Return true if an fpext operation is free (for instance, because single-precision floating-point numb...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
SDValue scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Turn load of vector type into a load of the individual elements.
Value * CallOperandVal
If this is the result output operand or a clobber, this is null, otherwise it is the incoming operand...
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
virtual void HandleByVal(CCState *, unsigned &, unsigned) const
Target-specific cleanup for formal ByVal parameters.
virtual unsigned getExceptionPointerRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an ...
CallLoweringInfo & setDiscardResult(bool Value=true)
SDValue BuildUDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, bool IsAfterLegalization, std::vector< SDNode * > *Created) const
Given an ISD::UDIV node expressing a divide by constant, return a DAG expression to select that will ...
virtual bool isFPImmLegal(const APFloat &, EVT) const
Returns true if the target can instruction select the specified FP immediate natively.
LegalizeTypeAction getTypeAction(MVT VT) const
virtual const TargetRegisterClass * getRepRegClassFor(MVT VT) const
Return the 'representative' register class for the specified value type.
virtual bool isTypeDesirableForOp(unsigned, EVT VT) const
Return true if the target has native support for the specified value type and it is 'desirable' to us...
virtual Sched::Preference getSchedulingPreference(SDNode *) const
Some scheduler, e.g.
SDValue getConstTrueVal(SelectionDAG &DAG, EVT VT, const SDLoc &DL) const
Return a constant of type VT that contains a true value that respects getBooleanContents() ...
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition: ISDOpcodes.h:39
bool bitsLT(EVT VT) const
bitsLT - Return true if this has less bits than VT.
Definition: ValueTypes.h:212
virtual LoadInst * lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const
On some platforms, an AtomicRMW that never actually modifies the value (such as fetch_add of 0) can b...
bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
bool doesNotReturn() const
Determine if the call cannot return.
Definition: CallSite.h:454
SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const
Expands an unaligned store to 2 half-size stores for integer values, and possibly more for vectors...
bool enableExtLdPromotion() const
Return true if the target wants to use the optimization that turns ext(promotableInst1(...(promotableInstN(load)))) into promotedInst1(...(promotedInstN(ext(load)))).
bool isVector() const
isVector - Return true if this is a vector value type.
Definition: ValueTypes.h:133
virtual const char * LowerXConstraint(EVT ConstraintVT) const
Try to replace an X constraint, which matches anything, with another that has more specific requireme...
lazy value info
bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT) const
Return true if the specified store with truncation has solution on this target.
void setAttributes(ImmutableCallSite *CS, unsigned AttrIdx)
Set CallLoweringInfo attribute flags based on a call instruction and called function attributes...
virtual bool canOpTrap(unsigned Op, EVT VT) const
Returns true if the operation can trap for the value type.
virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context, EVT VT) const
Returns true if we should normalize select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely that it saves us from materializing N0 and N1 in an integer register.
virtual const char * getTargetNodeName(unsigned Opcode) const
This method returns the name of a target specific DAG node.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
This method will be invoked for all target nodes and for any target-independent nodes that the target...
bool hasMultipleConditionRegisters() const
Return true if multiple condition registers are available.
void operator=(const TargetLowering &)=delete
virtual bool isLegalAddImmediate(int64_t) const
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
virtual SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &RefinementSteps) const
Return a reciprocal estimate value for the input operand.
CallLoweringInfo & setVarArg(bool Value=true)
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.
virtual void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero, APInt &KnownOne, const SelectionDAG &DAG, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
CallLoweringInfo & setChain(SDValue InChain)
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:34
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:588
virtual ~TargetLoweringBase()=default
void setCondCodeAction(ISD::CondCode CC, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
This file contains the simple types necessary to represent the attributes associated with functions a...
SimpleValueType SimpleTy
void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth)
Tells the code generator which bitwidths to bypass.
virtual bool isJumpTableRelative() const
virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
unsigned getJumpBufSize() const
Returns the target's jmp_buf size in bytes (if never set, the default is 200)
SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL, EVT DataVT, SelectionDAG &DAG, bool IsCompressedMemory) const
Increments memory address Addr according to the type of the value DataVT that should be stored...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
bool isPositionIndependent() const
virtual Value * emitLoadLinked(IRBuilder<> &Builder, Value *Addr, AtomicOrdering Ord) const
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type...
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
ELFYAML::ELF_STO Other
Definition: ELFYAML.cpp:662
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
Definition: ValueTypes.h:123
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
bool isExtFree(const Instruction *I) const
Return true if the extension represented by I is free.
SmallVector< ISD::InputArg, 32 > Ins
AtomicOrdering
Atomic ordering for LLVM's memory model.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
virtual bool getPostIndexedAddressParts(SDNode *, SDNode *, SDValue &, SDValue &, ISD::MemIndexedMode &, SelectionDAG &) const
Returns true by value, base pointer and offset pointer and addressing mode by reference if this node ...
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first...
virtual Value * getIRStackGuard(IRBuilder<> &IRB) const
If the target has a standard location for the stack protector guard, returns the address of that loca...
Context object for machine code objects.
Definition: MCContext.h:51
const DenseMap< unsigned int, unsigned int > & getBypassSlowDivWidths() const
Returns map of slow types for division or remainder with corresponding fast types.
virtual bool ShouldShrinkFPConstant(EVT) const
If true, then instruction selection should seek to shrink the FP constant of the specified type to a ...
virtual bool mayBeEmittedAsTailCall(CallInst *) const
Return true if the target may be able emit the call instruction as a tail call.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition: FastISel.h:31
virtual bool isUsedByReturnOnly(SDNode *, SDValue &) const
Return true if result of the specified node is used by a return node only.
Class to represent function types.
Definition: DerivedTypes.h:102
virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const
Return the type that should be used to zero or sign extend a zeroext/signext integer return value...
CallingConv::ID getCallingConv() const
getCallingConv/setCallingConv - get or set the calling convention of the call.
Definition: CallSite.h:308
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
bool usesUnderscoreLongJmp() const
Determine if we should use _longjmp or longjmp to implement llvm.longjmp.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual Value * getSafeStackPointerLocation(IRBuilder<> &IRB) const
Returns the target-specific address of the unsafe stack pointer.
void GetReturnInfo(Type *ReturnType, AttributeSet attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags...
SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, bool foldBooleans, DAGCombinerInfo &DCI, const SDLoc &dl) const
Try to simplify a setcc built with the specified operands and cc.
MachineBasicBlock * MBB
bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool Signed) const
Return if N is a True value when extended to VT.
const ValueTypeActionImpl & getValueTypeActions() const
This contains information for each constraint that we are lowering.
Function Alias Analysis false
SmallVector< ISD::OutputArg, 32 > Outs
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const
Return how this store with truncation should be treated: either it is legal, needs to be promoted to ...
virtual void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag...
CallLoweringInfo & setZExtResult(bool Value=true)
An instruction for storing to memory.
Definition: Instructions.h:300
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:842
virtual SDValue LowerCall(CallLoweringInfo &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower calls into the specified DAG.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
virtual bool isDesirableToTransformToIntegerOp(unsigned, EVT) const
Return true if it is profitable for dag combiner to transform a floating point op of specified opcode...
virtual Instruction * emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const
Inserts in the IR a target-specific intrinsic specifying a fence.
bool isOperationLegalOrPromote(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target or can be made legal using promotion...
virtual bool isSelectSupported(SelectSupportKind) const
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const
Return true if it is beneficial to convert a load of a constant to just the constant itself...
bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal or custom on this target.
virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT) const
Type * getElementType() const
Definition: DerivedTypes.h:336
SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Idx) const
Get a pointer to vector element Idx located in memory for a vector of type VecVT starting at a base a...
virtual const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const
Returns a 0 terminated array of registers that can be safely used as scratch registers.
LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return how this load with extension should be treated: either it is legal, needs to be promoted to a ...
bool isLegalRC(const TargetRegisterClass *RC) const
Return true if the value types that can be represented by the specified register class are all legal...
virtual bool hasStandaloneRem(EVT VT) const
Return true if the target can handle a standalone remainder operation.
Class to represent pointers.
Definition: DerivedTypes.h:443
This class is used to represent ISD::STORE nodes.
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
virtual bool needsFixedCatchObjects() const
virtual bool hasAndNot(SDValue X) const
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
virtual void initializeSplitCSR(MachineBasicBlock *Entry) const
Perform necessary initialization to handle a subset of CSRs explicitly via copies.
void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode, const SDLoc &DL) const
Soften the operands of a comparison.
bool isInvoke() const
isInvoke - true if a InvokeInst is enclosed.
Definition: CallSite.h:91
bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const
Return true if the specified store with truncation is legal on this target.
virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, std::vector< SDNode * > *Created) const
Targets may override this function to provide custom SDIV lowering for power-of-2 denominators...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
bool isMatchingInputConstraint() const
Return true of this is an input operand that is a matching constraint like "4".
virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass...
virtual FastISel * createFastISel(FunctionLoweringInfo &, const TargetLibraryInfo *) const
This method returns a target specific FastISel object, or null if the target does not support "fast" ...
virtual Instruction * emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
virtual bool isShuffleMaskLegal(const SmallVectorImpl< int > &, EVT) const
Targets can use this to indicate that they only support some VECTOR_SHUFFLE operations, those with specific masks.
BooleanContent getBooleanContents(EVT Type) const
std::pair< SDValue, SDValue > expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Expands an unaligned load to 2 half-size loads for an integer, and possibly more for vectors...
unsigned const MachineRegisterInfo * MRI
bool hasFloatingPointExceptions() const
Return true if target supports floating point exceptions.
MVT - Machine Value Type.
FenceInst * CreateFence(AtomicOrdering Ordering, SynchronizationScope SynchScope=CrossThread, const Twine &Name="")
Definition: IRBuilder.h:1123
DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc)
CallLoweringInfo & setIsPatchPoint(bool Value=true)
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:48
void setJumpBufSize(unsigned Size)
Set the target's required jmp_buf buffer size (in bytes); default is 200.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
virtual void insertSSPDeclarations(Module &M) const
Inserts necessary declarations for SSP (stack protection) purpose.
virtual bool isCheapAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
virtual bool isZExtFree(EVT FromTy, EVT ToTy) const
void setTargetDAGCombine(ISD::NodeType NT)
Targets should invoke this method for each target independent node that they want to provide a custom...
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:219
virtual bool shouldExpandAtomicStoreInIR(StoreInst *SI) const
Returns true if the given (atomic) store should be expanded by the IR-level AtomicExpand pass into an...
bool isOperationLegalOrCustom(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
unsigned getMinimumJumpTableEntries() const
Return lower limit for number of blocks in a jump table.
virtual EVT getOptimalMemOpType(uint64_t, unsigned, unsigned, bool, bool, bool, MachineFunction &) const
Returns the target specific optimal type for load and store operations as a result of memset...
virtual bool supportSplitCSR(MachineFunction *MF) const
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
This is an important base class in LLVM.
Definition: Constant.h:42
virtual unsigned getPrefLoopAlignment(MachineLoop *ML=nullptr) const
Return the preferred loop alignment.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Definition: ISDOpcodes.h:818
unsigned getMatchedOperand() const
If this is an input matching constraint, this method returns the output operand it matches...
bool isFloatingPoint() const
isFloatingPoint - Return true if this is a FP, or a vector FP type.
TargetLoweringBase(const TargetMachine &TM)
NOTE: The TargetMachine owns TLOF.
bool isMaskAndBranchFoldingLegal() const
Return if the target supports combining a chain like:
virtual bool isSafeMemOpType(MVT) const
Returns true if it's safe to use load / store of the specified type to expand memcpy / memset inline...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
unsigned getMaxStoresPerMemmove(bool OptSize) const
Get maximum # of store operations permitted for llvm.memmove.
virtual bool isDesirableToCommuteWithShift(const SDNode *N) const
Return true if it is profitable to move a following shift through this.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
ConstraintInfo()
Default constructor.
Definition: InlineAsm.cpp:59
CombineLevel
Definition: DAGCombine.h:16
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
uint32_t Offset
bool expandMUL_LOHI(unsigned Opcode, EVT VT, SDLoc dl, SDValue LHS, SDValue RHS, SmallVectorImpl< SDValue > &Result, EVT HiLoVT, SelectionDAG &DAG, MulExpansionKind Kind, SDValue LL=SDValue(), SDValue LH=SDValue(), SDValue RL=SDValue(), SDValue RH=SDValue()) const
Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes, respectively, each computing an n/2-bit part of the result.
virtual bool isFMAFasterThanFMulAndFAdd(EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang","erlang-compatible garbage collector")
virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const
virtual unsigned getRegisterByName(const char *RegName, EVT VT, SelectionDAG &DAG) const
Return the register ID of the name passed in.
bool isPositionIndependent() const
void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
Convenience method to set an operation to Promote and specify the type in a single call...
bool CombineTo(SDValue O, SDValue N)
bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT, SelectionDAG &DAG, MulExpansionKind Kind, SDValue LL=SDValue(), SDValue LH=SDValue(), SDValue RL=SDValue(), SDValue RH=SDValue()) const
Expand a MUL into two nodes.
virtual bool useSoftFloat() const
uint64_t getNumElements() const
Definition: DerivedTypes.h:335
Value * getOperand(unsigned i) const
Definition: User.h:145
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
virtual bool isNarrowingProfitable(EVT, EVT) const
Return true if it's profitable to narrow operations of type VT1 to VT2.
bool isPredictableSelectExpensive() const
Return true if selects are only cheaper than branches if the branch is unlikely to be predicted right...
void setPrefFunctionAlignment(unsigned Align)
Set the target's preferred function alignment.
virtual bool useLoadStackGuardNode() const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
unsigned getMaxAtomicSizeInBitsSupported() const
Returns the maximum atomic operation size (in bits) supported by the backend.
bool SimplifyDemandedBits(SDNode *User, unsigned OpIdx, const APInt &Demanded, DAGCombinerInfo &DCI)
Helper for SimplifyDemandedBits that can simplify an operation with multiple uses.
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
bool isConstFalseVal(const SDNode *N) const
Return if the N is a constant or constant vector equal to the false value from getBooleanContents().
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask, APInt &KnownZero, APInt &KnownOne, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
SDValue BuildSDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, bool IsAfterLegalization, std::vector< SDNode * > *Created) const
Given an ISD::SDIV node expressing a divide by constant, return a DAG expression to select that will ...
virtual int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS=0) const
Return the cost of the scaling factor used in the addressing mode represented by AM for this target...
unsigned getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
EVT - Extended Value Type.
Definition: ValueTypes.h:31
bool isSlowDivBypassed() const
Returns true if target has indicated at least one type should be bypassed.
LegalizeTypeAction getTypeAction(MVT VT) const
virtual SDValue LowerReturn(SDValue, CallingConv::ID, bool, const SmallVectorImpl< ISD::OutputArg > &, const SmallVectorImpl< SDValue > &, const SDLoc &, SelectionDAG &) const
This hook must be implemented to lower outgoing return values, described by the Outs array...
std::vector< ArgListEntry > ArgListTy
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Return the desired alignment for ByVal or InAlloca aggregate function arguments in the caller paramet...
This structure contains all information that is necessary for lowering calls.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
virtual bool isExtFreeImpl(const Instruction *I) const
Return true if the extension represented by I is free.
virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const
Return true if folding a vector load into ExtVal (a sign, zero, or any extend node) is profitable...
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements)
getVectorVT - Returns the EVT that represents a vector NumElements in length, where each element is o...
Definition: ValueTypes.h:70
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, bool isSigned, const SDLoc &dl, bool doesNotReturn=false, bool isReturnValueUsed=true) const
Returns a pair of (return value, chain).
void setUseUnderscoreLongJmp(bool Val)
Indicate whether this target prefers to use _longjmp to implement llvm.longjmp or the version without...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC)
Set the CallingConv that should be used for the specified libcall.
AsmOperandInfo(InlineAsm::ConstraintInfo Info)
Copy constructor for copying from a ConstraintInfo.
std::string ConstraintCode
This contains the actual string for the code, like "m".
virtual SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const
Returns relocation base for the given PIC jumptable.
unsigned GatherAllAliasesMaxDepth
Depth that GatherAllAliases should should continue looking for chain dependencies when trying to find...
virtual SDValue LowerFormalArguments(SDValue, CallingConv::ID, bool, const SmallVectorImpl< ISD::InputArg > &, const SDLoc &, SelectionDAG &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
static bool isReleaseOrStronger(AtomicOrdering ao)
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const
Get the CondCode that's to be used to test the result of the comparison libcall against zero...
void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC)
Override the default CondCode to be used to test the result of the comparison libcall against zero...
void setHasExtractBitsInsn(bool hasExtractInsn=true)
Tells the code generator that the target has BitExtract instructions.
void operator=(const TargetLoweringBase &)=delete
virtual Value * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual void ReplaceNodeResults(SDNode *, SmallVectorImpl< SDValue > &, SelectionDAG &) const
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
void initActions()
Initialize all of the actions to default values.
std::vector< AsmOperandInfo > AsmOperandInfoVector
int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const
Return a ReciprocalEstimate enum value for a division of the given type based on the function's attri...
CCState - This class holds information needed while lowering arguments and return values...
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
bool MaskAndBranchFoldingIsLegal
MaskAndBranchFoldingIsLegal - Indicates if the target supports folding a mask of a single bit...
ReciprocalEstimate
Reciprocal estimate status values used by the functions below.
InstrTy * getInstruction() const
Definition: CallSite.h:93
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
LegalizeAction getIndexedStoreAction(unsigned IdxMode, MVT VT) const
Return how the indexed store should be treated: either it is legal, needs to be promoted to a larger ...
virtual bool isVectorClearMaskLegal(const SmallVectorImpl< int > &, EVT) const
Similar to isShuffleMaskLegal.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:166
virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:230
Provides information about what library functions are available for the current target.
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
virtual Value * emitStoreConditional(IRBuilder<> &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const
Perform a store-conditional operation to Addr.
TargetLoweringOpt(SelectionDAG &InDAG, bool LT, bool LO)
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
Definition: STLExtras.h:649
static const char * Target
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
void setHasMultipleConditionRegisters(bool hasManyRegs=true)
Tells the code generator that the target has multiple (allocatable) condition registers that can be u...
CallLoweringInfo & setSExtResult(bool Value=true)
virtual const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase, but as an MCExpr.
virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all...
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset...
Represents one node in the SelectionDAG.
virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
bool hasExtractBitsInsn() const
Return true if the target has BitExtract instructions.
int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const
Return the refinement step count for a square root of the given type based on the function's attribut...
static GCRegistry::Add< ShadowStackGC > C("shadow-stack","Very portable GC for uncooperative code generators")
virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const SelectionDAG &DAG, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits ...
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
virtual bool isProfitableToHoist(Instruction *I) const
Class to represent vector types.
Definition: DerivedTypes.h:369
virtual const char * getClearCacheBuiltinName() const
Return the builtin name for the __builtin___clear_cache intrinsic Default is to invoke the clear cach...
void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
Target - Wrapper for Target specific information.
Class for arbitrary precision integers.
Definition: APInt.h:77
virtual bool IsDesirableToPromoteOp(SDValue, EVT &) const
This method query the target whether it is beneficial for dag combiner to promote the specified node...
void setMinFunctionAlignment(unsigned Align)
Set the target's minimum function alignment (in log2(bytes))
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
std::pair< int, MVT > getTypeLegalizationCost(const DataLayout &DL, Type *Ty) const
Estimate the cost of type-legalization and the legalized type.
void setPrefLoopAlignment(unsigned Align)
Set the target's preferred loop alignment.
void setMaximumJumpTableSize(unsigned)
Indicate the maximum number of entries in jump tables.
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:400
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:403
ValueTypeActionImpl ValueTypeActions
MulExpansionKind
Enum that specifies when a multiplication should be expanded.
virtual SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &RefinementSteps, bool &UseOneConstNR, bool Reciprocal) const
Hooks for building estimates in place of slower divisions and square roots.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(EVT VT) const
Return the preferred vector type legalization action.
virtual bool isIntDivCheap(EVT VT, AttributeSet Attr) const
Return true if integer divide is usually cheaper than a sequence of several shifts, adds, and multiplies for this target.
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
static bool isAcquireOrStronger(AtomicOrdering ao)
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
virtual MVT::SimpleValueType getCmpLibcallReturnType() const
Return the ValueType for comparison libcalls.
CallLoweringInfo & setTailCall(bool Value=true)
virtual bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT) const
Return true if the following transform is beneficial: fold (conv (load x)) -> (load (conv*)x) On archi...
MVT getRegisterType(LLVMContext &Context, EVT VT) const
Return the type of registers that this ValueType will eventually require.
Representation of each machine instruction.
Definition: MachineInstr.h:52
Basic Alias true
CallLoweringInfo & setConvergent(bool Value=true)
SmallVector< SDValue, 32 > OutVals
bool isValid() const
isValid - Return true if this is a valid simple valuetype.
virtual bool lowerInterleavedLoad(LoadInst *LI, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor) const
Lower an interleaved load to target specific intrinsics.
LegalizeAction getIndexedLoadAction(unsigned IdxMode, MVT VT) const
Return how the indexed load should be treated: either it is legal, needs to be promoted to a larger s...
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
virtual bool isVectorShiftByScalarCheap(Type *Ty) const
Return true if it's significantly cheaper to shift a vector by a uniform scalar than by an amount whi...
SelectSupportKind
Enum that describes what type of support for selects the target has.
virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const
Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
unsigned getJumpBufAlignment() const
Returns the target's jmp_buf alignment in bytes (if never set, the default is 0)
void setTypeAction(MVT VT, LegalizeTypeAction Action)
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AddrSpace) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
void * PointerTy
Definition: GenericValue.h:24
virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const
EVT is not used in-tree, but is used by out-of-tree target.
unsigned getMinFunctionAlignment() const
Return the minimum function alignment.
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:536
unsigned getMaxStoresPerMemset(bool OptSize) const
Get maximum # of store operations permitted for llvm.memset.
ImmutableCallSite - establish a view to a call site for examination.
Definition: CallSite.h:665
virtual bool shouldInsertFencesForAtomic(const Instruction *I) const
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic...
unsigned getSizeInBits() const
getSizeInBits - Return the size of the specified value type in bits.
Definition: ValueTypes.h:256
#define I(x, y, z)
Definition: MD5.cpp:54
#define N
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
bool paramHasAttr(unsigned i, Attribute::AttrKind Kind) const
Return true if the call or the callee has the given attribute.
Definition: CallSite.h:359
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Definition: DataLayout.h:349
unsigned MaxStoresPerMemmoveOptSize
Maximum number of store instructions that may be substituted for a call to memmove, used for functions with OptSize attribute.
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
virtual const MCExpr * LowerCustomJumpTableEntry(const MachineJumpTableInfo *, const MachineBasicBlock *, unsigned, MCContext &) const
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
virtual bool isExtractSubvectorCheap(EVT ResVT, unsigned Index) const
Return true if EXTRACT_SUBVECTOR is cheap for this result type with this index.
virtual bool isFoldableMemAccessOffset(Instruction *I, int64_t Offset) const
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
getEVT - Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:309
virtual bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const
Returns true if the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass into a ...
Value * getDefaultSafeStackPointerLocation(IRBuilder<> &IRB, bool UseTLS) const
EVT getValueType() const
Return the ValueType of the referenced return value.
CallLoweringInfo & setCallee(Type *ResultType, FunctionType *FTy, SDValue Target, ArgListTy &&ArgsList, ImmutableCallSite &Call)
LLVM_NODISCARD bool empty() const
Definition: DenseMap.h:80
bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded, const SDLoc &dl)
Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
bool isVarArg() const
Definition: DerivedTypes.h:122
const unsigned Kind
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
int getDivRefinementSteps(EVT VT, MachineFunction &MF) const
Return the refinement step count for a division of the given type based on the function's attributes...
virtual bool ExpandInlineAsm(CallInst *) const
This hook allows the target to expand an inline asm call to be explicit llvm code if it wants to...
bool isFloatingPoint() const
isFloatingPoint - Return true if this is a FP, or a vector FP type.
Definition: ValueTypes.h:118
bool use_empty() const
Definition: Value.h:299
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
virtual BranchProbability getPredictableBranchThreshold() const
If a branch or a select condition is skewed in one direction by more than this factor, it is very likely to be predicted correctly.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
bool isFloat(MCInstrInfo const &MCII, MCInst const &MCI)
Return whether it is a floating-point insn.
bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, unsigned Alignment=1, bool *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given address space and align...
bool isSimple() const
isSimple - Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:107
void setMinStackArgumentAlignment(unsigned Align)
Set the minimum stack alignment of an argument (in log2(bytes)).
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
CallLoweringInfo & setInRegister(bool Value=true)
TargetLowering(const TargetLowering &)=delete
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
LLVM Value Representation.
Definition: Value.h:71
void setUseUnderscoreSetJmp(bool Val)
Indicate whether this target prefers to use _setjmp to implement llvm.setjmp or the version without _...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:111
unsigned getMaximumJumpTableSize() const
Return upper limit for number of entries in a jump table.
virtual bool isZExtFree(SDValue Val, EVT VT2) const
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
bool hasTargetDAGCombine(ISD::NodeType NT) const
If true, the target has custom DAG combine transformations that it can perform for the specified node...
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:81
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
virtual ConstraintWeight getMultipleConstraintMatchWeight(AsmOperandInfo &info, int maIndex) const
Examine constraint type and operand type and determine a weight value.
virtual uint8_t getRepRegClassCostFor(MVT VT) const
Return the cost of the 'representative' register class for the specified value type.
virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, unsigned &Cost) const
Return true if the target can combine store(extractelement VectorTy, Idx).
Primary interface to the complete machine description for the target machine.
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:47
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
Return true if folding a constant offset with the given GlobalAddress is legal.
virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml","ocaml 3.10-compatible collector")
unsigned getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
MVT ConstraintVT
The ValueType for the operand value.
Conversion operators.
Definition: ISDOpcodes.h:397
BooleanContent
Enum that describes how the target represents true/false values.
virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the specified operation is illegal but has a custom lowering on that type...
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal on this target.
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
bool isBigEndian() const
Definition: DataLayout.h:221
virtual unsigned combineRepeatedFPDivisors() const
Indicate whether this target prefers to combine FDIVs with the same divisor.
virtual unsigned getExceptionSelectorRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
bool isJumpExpensive() const
Return true if Flow Control is an expensive operation that should be avoided.
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
This callback is invoked for operations that are unsupported by the target, which are registered to u...
virtual bool isFNegFree(EVT VT) const
Return true if an fneg operation is free to the point where it is never worthwhile to replace it with...
unsigned getGatherAllAliasesMaxDepth() const
Fast - This calling convention attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:42
virtual bool hasPairedLoad(EVT, unsigned &) const
Return true if the target supplies and combines to a paired load two loaded values of type LoadedType...
virtual bool shouldAlignPointerArgs(CallInst *, unsigned &, unsigned &) const
Return true if the pointer arguments to CI should be aligned by aligning the object whose address is ...
void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, ImmutableCallSite CS) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
LegalizeAction getCondCodeAction(ISD::CondCode CC, MVT VT) const
Return how the condition code should be treated: either it is legal, needs to be expanded to some oth...
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
MVT getSimpleVT() const
getSimpleVT - Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:226
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:44
void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO)
virtual bool isCheapToSpeculateCtlz() const
Return true if it is cheap to speculate a call to intrinsic ctlz.
void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
MVT getTypeToPromoteTo(unsigned Op, MVT VT) const
If the action for this operation is to promote, this method returns the ValueType to promote to...
int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const
Return a ReciprocalEstimate enum value for a square root of the given type based on the function's at...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:799
unsigned getVectorNumElements() const
getVectorNumElements - Given a vector type, return the number of elements it contains.
Definition: ValueTypes.h:248
This class is used to represent ISD::LOAD nodes.