LLVM 19.0.0git
TargetLowering.h
Go to the documentation of this file.
1//===- llvm/CodeGen/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file describes how to lower LLVM code to machine code. This has two
11/// main components:
12///
13/// 1. Which ValueTypes are natively supported by the target.
14/// 2. Which operations are supported for supported ValueTypes.
15/// 3. Cost thresholds for alternative implementations of certain operations.
16///
17/// In addition it has a few other components, like information about FP
18/// immediates.
19///
20//===----------------------------------------------------------------------===//
21
22#ifndef LLVM_CODEGEN_TARGETLOWERING_H
23#define LLVM_CODEGEN_TARGETLOWERING_H
24
25#include "llvm/ADT/APInt.h"
26#include "llvm/ADT/ArrayRef.h"
27#include "llvm/ADT/DenseMap.h"
29#include "llvm/ADT/StringRef.h"
40#include "llvm/IR/Attributes.h"
41#include "llvm/IR/CallingConv.h"
42#include "llvm/IR/DataLayout.h"
44#include "llvm/IR/Function.h"
45#include "llvm/IR/InlineAsm.h"
46#include "llvm/IR/Instruction.h"
48#include "llvm/IR/Type.h"
53#include <algorithm>
54#include <cassert>
55#include <climits>
56#include <cstdint>
57#include <iterator>
58#include <map>
59#include <string>
60#include <utility>
61#include <vector>
62
63namespace llvm {
64
65class AssumptionCache;
66class CCState;
67class CCValAssign;
70class Constant;
71class FastISel;
72class FunctionLoweringInfo;
73class GlobalValue;
74class Loop;
75class GISelKnownBits;
76class IntrinsicInst;
77class IRBuilderBase;
78struct KnownBits;
79class LLVMContext;
80class MachineBasicBlock;
81class MachineFunction;
82class MachineInstr;
83class MachineJumpTableInfo;
84class MachineLoop;
85class MachineRegisterInfo;
86class MCContext;
87class MCExpr;
88class Module;
89class ProfileSummaryInfo;
90class TargetLibraryInfo;
91class TargetMachine;
92class TargetRegisterClass;
93class TargetRegisterInfo;
94class TargetTransformInfo;
95class Value;
96
97namespace Sched {
98
99enum Preference : uint8_t {
100 None, // No preference
101 Source, // Follow source order.
102 RegPressure, // Scheduling for lowest register pressure.
103 Hybrid, // Scheduling for both latency and register pressure.
104 ILP, // Scheduling for ILP in low register pressure mode.
105 VLIW, // Scheduling for VLIW targets.
106 Fast, // Fast suboptimal list scheduling
107 Linearize, // Linearize DAG, no scheduling
108 Last = Linearize // Marker for the last Sched::Preference
110
111} // end namespace Sched
112
113// MemOp models a memory operation, either memset or memcpy/memmove.
114struct MemOp {
115private:
116 // Shared
117 uint64_t Size;
118 bool DstAlignCanChange; // true if destination alignment can satisfy any
119 // constraint.
120 Align DstAlign; // Specified alignment of the memory operation.
121
122 bool AllowOverlap;
123 // memset only
124 bool IsMemset; // If setthis memory operation is a memset.
125 bool ZeroMemset; // If set clears out memory with zeros.
126 // memcpy only
127 bool MemcpyStrSrc; // Indicates whether the memcpy source is an in-register
128 // constant so it does not need to be loaded.
129 Align SrcAlign; // Inferred alignment of the source or default value if the
130 // memory operation does not need to load the value.
131public:
132 static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign,
133 Align SrcAlign, bool IsVolatile,
134 bool MemcpyStrSrc = false) {
135 MemOp Op;
136 Op.Size = Size;
137 Op.DstAlignCanChange = DstAlignCanChange;
138 Op.DstAlign = DstAlign;
139 Op.AllowOverlap = !IsVolatile;
140 Op.IsMemset = false;
141 Op.ZeroMemset = false;
142 Op.MemcpyStrSrc = MemcpyStrSrc;
143 Op.SrcAlign = SrcAlign;
144 return Op;
145 }
146
147 static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign,
148 bool IsZeroMemset, bool IsVolatile) {
149 MemOp Op;
150 Op.Size = Size;
151 Op.DstAlignCanChange = DstAlignCanChange;
152 Op.DstAlign = DstAlign;
153 Op.AllowOverlap = !IsVolatile;
154 Op.IsMemset = true;
155 Op.ZeroMemset = IsZeroMemset;
156 Op.MemcpyStrSrc = false;
157 return Op;
158 }
159
160 uint64_t size() const { return Size; }
162 assert(!DstAlignCanChange);
163 return DstAlign;
164 }
165 bool isFixedDstAlign() const { return !DstAlignCanChange; }
166 bool allowOverlap() const { return AllowOverlap; }
167 bool isMemset() const { return IsMemset; }
168 bool isMemcpy() const { return !IsMemset; }
170 return isMemcpy() && !DstAlignCanChange;
171 }
172 bool isZeroMemset() const { return isMemset() && ZeroMemset; }
173 bool isMemcpyStrSrc() const {
174 assert(isMemcpy() && "Must be a memcpy");
175 return MemcpyStrSrc;
176 }
178 assert(isMemcpy() && "Must be a memcpy");
179 return SrcAlign;
180 }
181 bool isSrcAligned(Align AlignCheck) const {
182 return isMemset() || llvm::isAligned(AlignCheck, SrcAlign.value());
183 }
184 bool isDstAligned(Align AlignCheck) const {
185 return DstAlignCanChange || llvm::isAligned(AlignCheck, DstAlign.value());
186 }
187 bool isAligned(Align AlignCheck) const {
188 return isSrcAligned(AlignCheck) && isDstAligned(AlignCheck);
189 }
190};
191
192/// This base class for TargetLowering contains the SelectionDAG-independent
193/// parts that can be used from the rest of CodeGen.
195public:
196 /// This enum indicates whether operations are valid for a target, and if not,
197 /// what action should be used to make them valid.
198 enum LegalizeAction : uint8_t {
199 Legal, // The target natively supports this operation.
200 Promote, // This operation should be executed in a larger type.
201 Expand, // Try to expand this to other ops, otherwise use a libcall.
202 LibCall, // Don't try to expand this to other ops, always use a libcall.
203 Custom // Use the LowerOperation hook to implement custom lowering.
204 };
205
206 /// This enum indicates whether a types are legal for a target, and if not,
207 /// what action should be used to make them valid.
208 enum LegalizeTypeAction : uint8_t {
209 TypeLegal, // The target natively supports this type.
210 TypePromoteInteger, // Replace this integer with a larger one.
211 TypeExpandInteger, // Split this integer into two of half the size.
212 TypeSoftenFloat, // Convert this float to a same size integer type.
213 TypeExpandFloat, // Split this float into two of half the size.
214 TypeScalarizeVector, // Replace this one-element vector with its element.
215 TypeSplitVector, // Split this vector into two of half the size.
216 TypeWidenVector, // This vector should be widened into a larger vector.
217 TypePromoteFloat, // Replace this float with a larger one.
218 TypeSoftPromoteHalf, // Soften half to i16 and use float to do arithmetic.
219 TypeScalarizeScalableVector, // This action is explicitly left unimplemented.
220 // While it is theoretically possible to
221 // legalize operations on scalable types with a
222 // loop that handles the vscale * #lanes of the
223 // vector, this is non-trivial at SelectionDAG
224 // level and these types are better to be
225 // widened or promoted.
226 };
227
228 /// LegalizeKind holds the legalization kind that needs to happen to EVT
229 /// in order to type-legalize it.
230 using LegalizeKind = std::pair<LegalizeTypeAction, EVT>;
231
232 /// Enum that describes how the target represents true/false values.
234 UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage.
235 ZeroOrOneBooleanContent, // All bits zero except for bit 0.
236 ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
237 };
238
239 /// Enum that describes what type of support for selects the target has.
241 ScalarValSelect, // The target supports scalar selects (ex: cmov).
242 ScalarCondVectorVal, // The target supports selects with a scalar condition
243 // and vector values (ex: cmov).
244 VectorMaskSelect // The target supports vector selects with a vector
245 // mask (ex: x86 blends).
246 };
247
248 /// Enum that specifies what an atomic load/AtomicRMWInst is expanded
249 /// to, if at all. Exists because different targets have different levels of
250 /// support for these atomic instructions, and also have different options
251 /// w.r.t. what they should expand to.
253 None, // Don't expand the instruction.
254 CastToInteger, // Cast the atomic instruction to another type, e.g. from
255 // floating-point to integer type.
256 LLSC, // Expand the instruction into loadlinked/storeconditional; used
257 // by ARM/AArch64.
258 LLOnly, // Expand the (load) instruction into just a load-linked, which has
259 // greater atomic guarantees than a normal load.
260 CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
261 MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop.
262 BitTestIntrinsic, // Use a target-specific intrinsic for special bit
263 // operations; used by X86.
264 CmpArithIntrinsic,// Use a target-specific intrinsic for special compare
265 // operations; used by X86.
266 Expand, // Generic expansion in terms of other atomic operations.
267
268 // Rewrite to a non-atomic form for use in a known non-preemptible
269 // environment.
271 };
272
273 /// Enum that specifies when a multiplication should be expanded.
274 enum class MulExpansionKind {
275 Always, // Always expand the instruction.
276 OnlyLegalOrCustom, // Only expand when the resulting instructions are legal
277 // or custom.
278 };
279
280 /// Enum that specifies when a float negation is beneficial.
281 enum class NegatibleCost {
282 Cheaper = 0, // Negated expression is cheaper.
283 Neutral = 1, // Negated expression has the same cost.
284 Expensive = 2 // Negated expression is more expensive.
285 };
286
287 /// Enum of different potentially desirable ways to fold (and/or (setcc ...),
288 /// (setcc ...)).
289 enum AndOrSETCCFoldKind : uint8_t {
290 None = 0, // No fold is preferable.
291 AddAnd = 1, // Fold with `Add` op and `And` op is preferable.
292 NotAnd = 2, // Fold with `Not` op and `And` op is preferable.
293 ABS = 4, // Fold with `llvm.abs` op is preferable.
294 };
295
297 public:
298 Value *Val = nullptr;
300 Type *Ty = nullptr;
301 bool IsSExt : 1;
302 bool IsZExt : 1;
303 bool IsInReg : 1;
304 bool IsSRet : 1;
305 bool IsNest : 1;
306 bool IsByVal : 1;
307 bool IsByRef : 1;
308 bool IsInAlloca : 1;
310 bool IsReturned : 1;
311 bool IsSwiftSelf : 1;
312 bool IsSwiftAsync : 1;
313 bool IsSwiftError : 1;
315 MaybeAlign Alignment = std::nullopt;
316 Type *IndirectType = nullptr;
317
323
324 void setAttributes(const CallBase *Call, unsigned ArgIdx);
325 };
326 using ArgListTy = std::vector<ArgListEntry>;
327
328 virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC,
329 ArgListTy &Args) const {};
330
332 switch (Content) {
334 // Extend by adding rubbish bits.
335 return ISD::ANY_EXTEND;
337 // Extend by adding zero bits.
338 return ISD::ZERO_EXTEND;
340 // Extend by copying the sign bit.
341 return ISD::SIGN_EXTEND;
342 }
343 llvm_unreachable("Invalid content kind");
344 }
345
346 explicit TargetLoweringBase(const TargetMachine &TM);
349 virtual ~TargetLoweringBase() = default;
350
351 /// Return true if the target support strict float operation
352 bool isStrictFPEnabled() const {
353 return IsStrictFPEnabled;
354 }
355
356protected:
357 /// Initialize all of the actions to default values.
358 void initActions();
359
360public:
361 const TargetMachine &getTargetMachine() const { return TM; }
362
363 virtual bool useSoftFloat() const { return false; }
364
365 /// Return the pointer type for the given address space, defaults to
366 /// the pointer type from the data layout.
367 /// FIXME: The default needs to be removed once all the code is updated.
368 virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const {
369 return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
370 }
371
372 /// Return the in-memory pointer type for the given address space, defaults to
373 /// the pointer type from the data layout.
374 /// FIXME: The default needs to be removed once all the code is updated.
375 virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS = 0) const {
376 return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
377 }
378
379 /// Return the type for frame index, which is determined by
380 /// the alloca address space specified through the data layout.
382 return getPointerTy(DL, DL.getAllocaAddrSpace());
383 }
384
385 /// Return the type for code pointers, which is determined by the program
386 /// address space specified through the data layout.
388 return getPointerTy(DL, DL.getProgramAddressSpace());
389 }
390
391 /// Return the type for operands of fence.
392 /// TODO: Let fence operands be of i32 type and remove this.
393 virtual MVT getFenceOperandTy(const DataLayout &DL) const {
394 return getPointerTy(DL);
395 }
396
397 /// Return the type to use for a scalar shift opcode, given the shifted amount
398 /// type. Targets should return a legal type if the input type is legal.
399 /// Targets can return a type that is too small if the input type is illegal.
400 virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const;
401
402 /// Returns the type for the shift amount of a shift opcode. For vectors,
403 /// returns the input type. For scalars, behavior depends on \p LegalTypes. If
404 /// \p LegalTypes is true, calls getScalarShiftAmountTy, otherwise uses
405 /// pointer type. If getScalarShiftAmountTy or pointer type cannot represent
406 /// all possible shift amounts, returns MVT::i32. In general, \p LegalTypes
407 /// should be set to true for calls during type legalization and after type
408 /// legalization has been completed.
409 EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL,
410 bool LegalTypes = true) const;
411
412 /// Return the preferred type to use for a shift opcode, given the shifted
413 /// amount type is \p ShiftValueTy.
415 virtual LLT getPreferredShiftAmountTy(LLT ShiftValueTy) const {
416 return ShiftValueTy;
417 }
418
419 /// Returns the type to be used for the index operand of:
420 /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
421 /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
422 virtual MVT getVectorIdxTy(const DataLayout &DL) const {
423 return getPointerTy(DL);
424 }
425
426 /// Returns the type to be used for the EVL/AVL operand of VP nodes:
427 /// ISD::VP_ADD, ISD::VP_SUB, etc. It must be a legal scalar integer type,
428 /// and must be at least as large as i32. The EVL is implicitly zero-extended
429 /// to any larger type.
430 virtual MVT getVPExplicitVectorLengthTy() const { return MVT::i32; }
431
432 /// This callback is used to inspect load/store instructions and add
433 /// target-specific MachineMemOperand flags to them. The default
434 /// implementation does nothing.
437 }
438
439 /// This callback is used to inspect load/store SDNode.
440 /// The default implementation does nothing.
444 }
445
448 AssumptionCache *AC = nullptr,
449 const TargetLibraryInfo *LibInfo = nullptr) const;
451 const DataLayout &DL) const;
453 const DataLayout &DL) const;
454
455 virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
456 return true;
457 }
458
459 /// Return true if the @llvm.get.active.lane.mask intrinsic should be expanded
460 /// using generic code in SelectionDAGBuilder.
461 virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const {
462 return true;
463 }
464
465 virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF,
466 bool IsScalable) const {
467 return true;
468 }
469
470 /// Return true if the @llvm.experimental.cttz.elts intrinsic should be
471 /// expanded using generic code in SelectionDAGBuilder.
472 virtual bool shouldExpandCttzElements(EVT VT) const { return true; }
473
474 /// Return the minimum number of bits required to hold the maximum possible
475 /// number of trailing zero vector elements.
477 bool ZeroIsPoison,
478 const ConstantRange *VScaleRange) const;
479
480 // Return true if op(vecreduce(x), vecreduce(y)) should be reassociated to
481 // vecreduce(op(x, y)) for the reduction opcode RedOpc.
482 virtual bool shouldReassociateReduction(unsigned RedOpc, EVT VT) const {
483 return true;
484 }
485
486 /// Return true if it is profitable to convert a select of FP constants into
487 /// a constant pool load whose address depends on the select condition. The
488 /// parameter may be used to differentiate a select with FP compare from
489 /// integer compare.
490 virtual bool reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {
491 return true;
492 }
493
494 /// Return true if multiple condition registers are available.
496 return HasMultipleConditionRegisters;
497 }
498
499 /// Return true if the target has BitExtract instructions.
500 bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
501
502 /// Return the preferred vector type legalization action.
505 // The default action for one element vectors is to scalarize
507 return TypeScalarizeVector;
508 // The default action for an odd-width vector is to widen.
509 if (!VT.isPow2VectorType())
510 return TypeWidenVector;
511 // The default action for other vectors is to promote
512 return TypePromoteInteger;
513 }
514
515 // Return true if the half type should be promoted using soft promotion rules
516 // where each operation is promoted to f32 individually, then converted to
517 // fp16. The default behavior is to promote chains of operations, keeping
518 // intermediate results in f32 precision and range.
519 virtual bool softPromoteHalfType() const { return false; }
520
521 // Return true if, for soft-promoted half, the half type should be passed
522 // passed to and returned from functions as f32. The default behavior is to
523 // pass as i16. If soft-promoted half is not used, this function is ignored
524 // and values are always passed and returned as f32.
525 virtual bool useFPRegsForHalfType() const { return false; }
526
527 // There are two general methods for expanding a BUILD_VECTOR node:
528 // 1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
529 // them together.
530 // 2. Build the vector on the stack and then load it.
531 // If this function returns true, then method (1) will be used, subject to
532 // the constraint that all of the necessary shuffles are legal (as determined
533 // by isShuffleMaskLegal). If this function returns false, then method (2) is
534 // always used. The vector type, and the number of defined values, are
535 // provided.
536 virtual bool
538 unsigned DefinedValues) const {
539 return DefinedValues < 3;
540 }
541
542 /// Return true if integer divide is usually cheaper than a sequence of
543 /// several shifts, adds, and multiplies for this target.
544 /// The definition of "cheaper" may depend on whether we're optimizing
545 /// for speed or for size.
546 virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const { return false; }
547
548 /// Return true if the target can handle a standalone remainder operation.
549 virtual bool hasStandaloneRem(EVT VT) const {
550 return true;
551 }
552
553 /// Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
554 virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const {
555 // Default behavior is to replace SQRT(X) with X*RSQRT(X).
556 return false;
557 }
558
559 /// Reciprocal estimate status values used by the functions below.
563 Enabled = 1
564 };
565
566 /// Return a ReciprocalEstimate enum value for a square root of the given type
567 /// based on the function's attributes. If the operation is not overridden by
568 /// the function's attributes, "Unspecified" is returned and target defaults
569 /// are expected to be used for instruction selection.
571
572 /// Return a ReciprocalEstimate enum value for a division of the given type
573 /// based on the function's attributes. If the operation is not overridden by
574 /// the function's attributes, "Unspecified" is returned and target defaults
575 /// are expected to be used for instruction selection.
577
578 /// Return the refinement step count for a square root of the given type based
579 /// on the function's attributes. If the operation is not overridden by
580 /// the function's attributes, "Unspecified" is returned and target defaults
581 /// are expected to be used for instruction selection.
582 int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const;
583
584 /// Return the refinement step count for a division of the given type based
585 /// on the function's attributes. If the operation is not overridden by
586 /// the function's attributes, "Unspecified" is returned and target defaults
587 /// are expected to be used for instruction selection.
588 int getDivRefinementSteps(EVT VT, MachineFunction &MF) const;
589
590 /// Returns true if target has indicated at least one type should be bypassed.
591 bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
592
593 /// Returns map of slow types for division or remainder with corresponding
594 /// fast types
596 return BypassSlowDivWidths;
597 }
598
599 /// Return true only if vscale must be a power of two.
600 virtual bool isVScaleKnownToBeAPowerOfTwo() const { return false; }
601
602 /// Return true if Flow Control is an expensive operation that should be
603 /// avoided.
604 bool isJumpExpensive() const { return JumpIsExpensive; }
605
606 // Costs parameters used by
607 // SelectionDAGBuilder::shouldKeepJumpConditionsTogether.
608 // shouldKeepJumpConditionsTogether will use these parameter value to
609 // determine if two conditions in the form `br (and/or cond1, cond2)` should
610 // be split into two branches or left as one.
611 //
612 // BaseCost is the cost threshold (in latency). If the estimated latency of
613 // computing both `cond1` and `cond2` is below the cost of just computing
614 // `cond1` + BaseCost, the two conditions will be kept together. Otherwise
615 // they will be split.
616 //
617 // LikelyBias increases BaseCost if branch probability info indicates that it
618 // is likely that both `cond1` and `cond2` will be computed.
619 //
620 // UnlikelyBias decreases BaseCost if branch probability info indicates that
621 // it is likely that both `cond1` and `cond2` will be computed.
622 //
623 // Set any field to -1 to make it ignored (setting BaseCost to -1 results in
624 // `shouldKeepJumpConditionsTogether` always returning false).
629 };
630 // Return params for deciding if we should keep two branch conditions merged
631 // or split them into two separate branches.
632 // Arg0: The binary op joining the two conditions (and/or).
633 // Arg1: The first condition (cond1)
634 // Arg2: The second condition (cond2)
635 virtual CondMergingParams
637 const Value *) const {
638 // -1 will always result in splitting.
639 return {-1, -1, -1};
640 }
641
642 /// Return true if selects are only cheaper than branches if the branch is
643 /// unlikely to be predicted right.
646 }
647
648 virtual bool fallBackToDAGISel(const Instruction &Inst) const {
649 return false;
650 }
651
652 /// Return true if the following transform is beneficial:
653 /// fold (conv (load x)) -> (load (conv*)x)
654 /// On architectures that don't natively support some vector loads
655 /// efficiently, casting the load to a smaller vector of larger types and
656 /// loading is more efficient, however, this can be undone by optimizations in
657 /// dag combiner.
658 virtual bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
659 const SelectionDAG &DAG,
660 const MachineMemOperand &MMO) const;
661
662 /// Return true if the following transform is beneficial:
663 /// (store (y (conv x)), y*)) -> (store x, (x*))
664 virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT,
665 const SelectionDAG &DAG,
666 const MachineMemOperand &MMO) const {
667 // Default to the same logic as loads.
668 return isLoadBitCastBeneficial(StoreVT, BitcastVT, DAG, MMO);
669 }
670
671 /// Return true if it is expected to be cheaper to do a store of vector
672 /// constant with the given size and type for the address space than to
673 /// store the individual scalar element constants.
674 virtual bool storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT,
675 unsigned NumElem,
676 unsigned AddrSpace) const {
677 return IsZero;
678 }
679
680 /// Allow store merging for the specified type after legalization in addition
681 /// to before legalization. This may transform stores that do not exist
682 /// earlier (for example, stores created from intrinsics).
683 virtual bool mergeStoresAfterLegalization(EVT MemVT) const {
684 return true;
685 }
686
687 /// Returns if it's reasonable to merge stores to MemVT size.
688 virtual bool canMergeStoresTo(unsigned AS, EVT MemVT,
689 const MachineFunction &MF) const {
690 return true;
691 }
692
693 /// Return true if it is cheap to speculate a call to intrinsic cttz.
694 virtual bool isCheapToSpeculateCttz(Type *Ty) const {
695 return false;
696 }
697
698 /// Return true if it is cheap to speculate a call to intrinsic ctlz.
699 virtual bool isCheapToSpeculateCtlz(Type *Ty) const {
700 return false;
701 }
702
703 /// Return true if ctlz instruction is fast.
704 virtual bool isCtlzFast() const {
705 return false;
706 }
707
708 /// Return true if ctpop instruction is fast.
709 virtual bool isCtpopFast(EVT VT) const {
710 return isOperationLegal(ISD::CTPOP, VT);
711 }
712
713 /// Return the maximum number of "x & (x - 1)" operations that can be done
714 /// instead of deferring to a custom CTPOP.
715 virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const {
716 return 1;
717 }
718
719 /// Return true if instruction generated for equality comparison is folded
720 /// with instruction generated for signed comparison.
721 virtual bool isEqualityCmpFoldedWithSignedCmp() const { return true; }
722
723 /// Return true if the heuristic to prefer icmp eq zero should be used in code
724 /// gen prepare.
725 virtual bool preferZeroCompareBranch() const { return false; }
726
727 /// Return true if it is cheaper to split the store of a merged int val
728 /// from a pair of smaller values into multiple stores.
729 virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const {
730 return false;
731 }
732
733 /// Return if the target supports combining a
734 /// chain like:
735 /// \code
736 /// %andResult = and %val1, #mask
737 /// %icmpResult = icmp %andResult, 0
738 /// \endcode
739 /// into a single machine instruction of a form like:
740 /// \code
741 /// cc = test %register, #mask
742 /// \endcode
743 virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
744 return false;
745 }
746
747 /// Return true if it is valid to merge the TargetMMOFlags in two SDNodes.
748 virtual bool
750 const MemSDNode &NodeY) const {
751 return true;
752 }
753
754 /// Use bitwise logic to make pairs of compares more efficient. For example:
755 /// and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0
756 /// This should be true when it takes more than one instruction to lower
757 /// setcc (cmp+set on x86 scalar), when bitwise ops are faster than logic on
758 /// condition bits (crand on PowerPC), and/or when reducing cmp+br is a win.
759 virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const {
760 return false;
761 }
762
763 /// Return the preferred operand type if the target has a quick way to compare
764 /// integer values of the given size. Assume that any legal integer type can
765 /// be compared efficiently. Targets may override this to allow illegal wide
766 /// types to return a vector type if there is support to compare that type.
767 virtual MVT hasFastEqualityCompare(unsigned NumBits) const {
768 MVT VT = MVT::getIntegerVT(NumBits);
770 }
771
772 /// Return true if the target should transform:
773 /// (X & Y) == Y ---> (~X & Y) == 0
774 /// (X & Y) != Y ---> (~X & Y) != 0
775 ///
776 /// This may be profitable if the target has a bitwise and-not operation that
777 /// sets comparison flags. A target may want to limit the transformation based
778 /// on the type of Y or if Y is a constant.
779 ///
780 /// Note that the transform will not occur if Y is known to be a power-of-2
781 /// because a mask and compare of a single bit can be handled by inverting the
782 /// predicate, for example:
783 /// (X & 8) == 8 ---> (X & 8) != 0
784 virtual bool hasAndNotCompare(SDValue Y) const {
785 return false;
786 }
787
788 /// Return true if the target has a bitwise and-not operation:
789 /// X = ~A & B
790 /// This can be used to simplify select or other instructions.
791 virtual bool hasAndNot(SDValue X) const {
792 // If the target has the more complex version of this operation, assume that
793 // it has this operation too.
794 return hasAndNotCompare(X);
795 }
796
797 /// Return true if the target has a bit-test instruction:
798 /// (X & (1 << Y)) ==/!= 0
799 /// This knowledge can be used to prevent breaking the pattern,
800 /// or creating it if it could be recognized.
801 virtual bool hasBitTest(SDValue X, SDValue Y) const { return false; }
802
803 /// There are two ways to clear extreme bits (either low or high):
804 /// Mask: x & (-1 << y) (the instcombine canonical form)
805 /// Shifts: x >> y << y
806 /// Return true if the variant with 2 variable shifts is preferred.
807 /// Return false if there is no preference.
809 // By default, let's assume that no one prefers shifts.
810 return false;
811 }
812
813 /// Return true if it is profitable to fold a pair of shifts into a mask.
814 /// This is usually true on most targets. But some targets, like Thumb1,
815 /// have immediate shift instructions, but no immediate "and" instruction;
816 /// this makes the fold unprofitable.
818 CombineLevel Level) const {
819 return true;
820 }
821
822 /// Should we tranform the IR-optimal check for whether given truncation
823 /// down into KeptBits would be truncating or not:
824 /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits)
825 /// Into it's more traditional form:
826 /// ((%x << C) a>> C) dstcond %x
827 /// Return true if we should transform.
828 /// Return false if there is no preference.
830 unsigned KeptBits) const {
831 // By default, let's assume that no one prefers shifts.
832 return false;
833 }
834
835 /// Given the pattern
836 /// (X & (C l>>/<< Y)) ==/!= 0
837 /// return true if it should be transformed into:
838 /// ((X <</l>> Y) & C) ==/!= 0
839 /// WARNING: if 'X' is a constant, the fold may deadlock!
840 /// FIXME: we could avoid passing XC, but we can't use isConstOrConstSplat()
841 /// here because it can end up being not linked in.
844 unsigned OldShiftOpcode, unsigned NewShiftOpcode,
845 SelectionDAG &DAG) const {
846 if (hasBitTest(X, Y)) {
847 // One interesting pattern that we'd want to form is 'bit test':
848 // ((1 << Y) & C) ==/!= 0
849 // But we also need to be careful not to try to reverse that fold.
850
851 // Is this '1 << Y' ?
852 if (OldShiftOpcode == ISD::SHL && CC->isOne())
853 return false; // Keep the 'bit test' pattern.
854
855 // Will it be '1 << Y' after the transform ?
856 if (XC && NewShiftOpcode == ISD::SHL && XC->isOne())
857 return true; // Do form the 'bit test' pattern.
858 }
859
860 // If 'X' is a constant, and we transform, then we will immediately
861 // try to undo the fold, thus causing endless combine loop.
862 // So by default, let's assume everyone prefers the fold
863 // iff 'X' is not a constant.
864 return !XC;
865 }
866
867 // Return true if its desirable to perform the following transform:
868 // (fmul C, (uitofp Pow2))
869 // -> (bitcast_to_FP (add (bitcast_to_INT C), Log2(Pow2) << mantissa))
870 // (fdiv C, (uitofp Pow2))
871 // -> (bitcast_to_FP (sub (bitcast_to_INT C), Log2(Pow2) << mantissa))
872 //
873 // This is only queried after we have verified the transform will be bitwise
874 // equals.
875 //
876 // SDNode *N : The FDiv/FMul node we want to transform.
877 // SDValue FPConst: The Float constant operand in `N`.
878 // SDValue IntPow2: The Integer power of 2 operand in `N`.
880 SDValue IntPow2) const {
881 // Default to avoiding fdiv which is often very expensive.
882 return N->getOpcode() == ISD::FDIV;
883 }
884
885 // Given:
886 // (icmp eq/ne (and X, C0), (shift X, C1))
887 // or
888 // (icmp eq/ne X, (rotate X, CPow2))
889
890 // If C0 is a mask or shifted mask and the shift amt (C1) isolates the
891 // remaining bits (i.e something like `(x64 & UINT32_MAX) == (x64 >> 32)`)
892 // Do we prefer the shift to be shift-right, shift-left, or rotate.
893 // Note: Its only valid to convert the rotate version to the shift version iff
894 // the shift-amt (`C1`) is a power of 2 (including 0).
895 // If ShiftOpc (current Opcode) is returned, do nothing.
897 EVT VT, unsigned ShiftOpc, bool MayTransformRotate,
898 const APInt &ShiftOrRotateAmt,
899 const std::optional<APInt> &AndMask) const {
900 return ShiftOpc;
901 }
902
903 /// These two forms are equivalent:
904 /// sub %y, (xor %x, -1)
905 /// add (add %x, 1), %y
906 /// The variant with two add's is IR-canonical.
907 /// Some targets may prefer one to the other.
908 virtual bool preferIncOfAddToSubOfNot(EVT VT) const {
909 // By default, let's assume that everyone prefers the form with two add's.
910 return true;
911 }
912
913 // By default prefer folding (abs (sub nsw x, y)) -> abds(x, y). Some targets
914 // may want to avoid this to prevent loss of sub_nsw pattern.
915 virtual bool preferABDSToABSWithNSW(EVT VT) const {
916 return true;
917 }
918
919 // Return true if the target wants to transform Op(Splat(X)) -> Splat(Op(X))
920 virtual bool preferScalarizeSplat(SDNode *N) const { return true; }
921
922 // Return true if the target wants to transform:
923 // (TruncVT truncate(sext_in_reg(VT X, ExtVT))
924 // -> (TruncVT sext_in_reg(truncate(VT X), ExtVT))
925 // Some targets might prefer pre-sextinreg to improve truncation/saturation.
926 virtual bool preferSextInRegOfTruncate(EVT TruncVT, EVT VT, EVT ExtVT) const {
927 return true;
928 }
929
930 /// Return true if the target wants to use the optimization that
931 /// turns ext(promotableInst1(...(promotableInstN(load)))) into
932 /// promotedInst1(...(promotedInstN(ext(load)))).
934
935 /// Return true if the target can combine store(extractelement VectorTy,
936 /// Idx).
937 /// \p Cost[out] gives the cost of that transformation when this is true.
938 virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
939 unsigned &Cost) const {
940 return false;
941 }
942
943 /// Return true if the target shall perform extract vector element and store
944 /// given that the vector is known to be splat of constant.
945 /// \p Index[out] gives the index of the vector element to be extracted when
946 /// this is true.
948 Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const {
949 return false;
950 }
951
952 /// Return true if inserting a scalar into a variable element of an undef
953 /// vector is more efficiently handled by splatting the scalar instead.
954 virtual bool shouldSplatInsEltVarIndex(EVT) const {
955 return false;
956 }
957
958 /// Return true if target always benefits from combining into FMA for a
959 /// given value type. This must typically return false on targets where FMA
960 /// takes more cycles to execute than FADD.
961 virtual bool enableAggressiveFMAFusion(EVT VT) const { return false; }
962
963 /// Return true if target always benefits from combining into FMA for a
964 /// given value type. This must typically return false on targets where FMA
965 /// takes more cycles to execute than FADD.
966 virtual bool enableAggressiveFMAFusion(LLT Ty) const { return false; }
967
968 /// Return the ValueType of the result of SETCC operations.
969 virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
970 EVT VT) const;
971
972 /// Return the ValueType for comparison libcalls. Comparison libcalls include
973 /// floating point comparison calls, and Ordered/Unordered check calls on
974 /// floating point numbers.
975 virtual
977
978 /// For targets without i1 registers, this gives the nature of the high-bits
979 /// of boolean values held in types wider than i1.
980 ///
981 /// "Boolean values" are special true/false values produced by nodes like
982 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
983 /// Not to be confused with general values promoted from i1. Some cpus
984 /// distinguish between vectors of boolean and scalars; the isVec parameter
985 /// selects between the two kinds. For example on X86 a scalar boolean should
986 /// be zero extended from i1, while the elements of a vector of booleans
987 /// should be sign extended from i1.
988 ///
989 /// Some cpus also treat floating point types the same way as they treat
990 /// vectors instead of the way they treat scalars.
991 BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
992 if (isVec)
993 return BooleanVectorContents;
994 return isFloat ? BooleanFloatContents : BooleanContents;
995 }
996
998 return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
999 }
1000
1001 /// Promote the given target boolean to a target boolean of the given type.
1002 /// A target boolean is an integer value, not necessarily of type i1, the bits
1003 /// of which conform to getBooleanContents.
1004 ///
1005 /// ValVT is the type of values that produced the boolean.
1007 EVT ValVT) const {
1008 SDLoc dl(Bool);
1009 EVT BoolVT =
1010 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), ValVT);
1012 return DAG.getNode(ExtendCode, dl, BoolVT, Bool);
1013 }
1014
1015 /// Return target scheduling preference.
1017 return SchedPreferenceInfo;
1018 }
1019
1020 /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
1021 /// for different nodes. This function returns the preference (or none) for
1022 /// the given node.
1024 return Sched::None;
1025 }
1026
1027 /// Return the register class that should be used for the specified value
1028 /// type.
1029 virtual const TargetRegisterClass *getRegClassFor(MVT VT, bool isDivergent = false) const {
1030 (void)isDivergent;
1031 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
1032 assert(RC && "This value type is not natively supported!");
1033 return RC;
1034 }
1035
1036 /// Allows target to decide about the register class of the
1037 /// specific value that is live outside the defining block.
1038 /// Returns true if the value needs uniform register class.
1040 const Value *) const {
1041 return false;
1042 }
1043
1044 /// Return the 'representative' register class for the specified value
1045 /// type.
1046 ///
1047 /// The 'representative' register class is the largest legal super-reg
1048 /// register class for the register class of the value type. For example, on
1049 /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
1050 /// register class is GR64 on x86_64.
1051 virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
1052 const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
1053 return RC;
1054 }
1055
1056 /// Return the cost of the 'representative' register class for the specified
1057 /// value type.
1058 virtual uint8_t getRepRegClassCostFor(MVT VT) const {
1059 return RepRegClassCostForVT[VT.SimpleTy];
1060 }
1061
1062 /// Return the preferred strategy to legalize tihs SHIFT instruction, with
1063 /// \p ExpansionFactor being the recursion depth - how many expansion needed.
1068 };
1071 unsigned ExpansionFactor) const {
1072 if (ExpansionFactor == 1)
1075 }
1076
1077 /// Return true if the target has native support for the specified value type.
1078 /// This means that it has a register that directly holds it without
1079 /// promotions or expansions.
1080 bool isTypeLegal(EVT VT) const {
1081 assert(!VT.isSimple() ||
1082 (unsigned)VT.getSimpleVT().SimpleTy < std::size(RegClassForVT));
1083 return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
1084 }
1085
1087 /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
1088 /// that indicates how instruction selection should deal with the type.
1089 LegalizeTypeAction ValueTypeActions[MVT::VALUETYPE_SIZE];
1090
1091 public:
1093 std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions),
1094 TypeLegal);
1095 }
1096
1098 return ValueTypeActions[VT.SimpleTy];
1099 }
1100
1102 ValueTypeActions[VT.SimpleTy] = Action;
1103 }
1104 };
1105
1107 return ValueTypeActions;
1108 }
1109
1110 /// Return pair that represents the legalization kind (first) that needs to
1111 /// happen to EVT (second) in order to type-legalize it.
1112 ///
1113 /// First: how we should legalize values of this type, either it is already
1114 /// legal (return 'Legal') or we need to promote it to a larger type (return
1115 /// 'Promote'), or we need to expand it into multiple registers of smaller
1116 /// integer type (return 'Expand'). 'Custom' is not an option.
1117 ///
1118 /// Second: for types supported by the target, this is an identity function.
1119 /// For types that must be promoted to larger types, this returns the larger
1120 /// type to promote to. For integer types that are larger than the largest
1121 /// integer register, this contains one step in the expansion to get to the
1122 /// smaller register. For illegal floating point types, this returns the
1123 /// integer type to transform to.
1124 LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const;
1125
1126 /// Return how we should legalize values of this type, either it is already
1127 /// legal (return 'Legal') or we need to promote it to a larger type (return
1128 /// 'Promote'), or we need to expand it into multiple registers of smaller
1129 /// integer type (return 'Expand'). 'Custom' is not an option.
1131 return getTypeConversion(Context, VT).first;
1132 }
1134 return ValueTypeActions.getTypeAction(VT);
1135 }
1136
1137 /// For types supported by the target, this is an identity function. For
1138 /// types that must be promoted to larger types, this returns the larger type
1139 /// to promote to. For integer types that are larger than the largest integer
1140 /// register, this contains one step in the expansion to get to the smaller
1141 /// register. For illegal floating point types, this returns the integer type
1142 /// to transform to.
1143 virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
1144 return getTypeConversion(Context, VT).second;
1145 }
1146
1147 /// For types supported by the target, this is an identity function. For
1148 /// types that must be expanded (i.e. integer types that are larger than the
1149 /// largest integer register or illegal floating point types), this returns
1150 /// the largest legal type it will be expanded to.
1151 EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
1152 assert(!VT.isVector());
1153 while (true) {
1154 switch (getTypeAction(Context, VT)) {
1155 case TypeLegal:
1156 return VT;
1157 case TypeExpandInteger:
1158 VT = getTypeToTransformTo(Context, VT);
1159 break;
1160 default:
1161 llvm_unreachable("Type is not legal nor is it to be expanded!");
1162 }
1163 }
1164 }
1165
1166 /// Vector types are broken down into some number of legal first class types.
1167 /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
1168 /// promoted EVT::f64 values with the X86 FP stack. Similarly, EVT::v2i64
1169 /// turns into 4 EVT::i32 values with both PPC and X86.
1170 ///
1171 /// This method returns the number of registers needed, and the VT for each
1172 /// register. It also returns the VT and quantity of the intermediate values
1173 /// before they are promoted/expanded.
1174 unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
1175 EVT &IntermediateVT,
1176 unsigned &NumIntermediates,
1177 MVT &RegisterVT) const;
1178
1179 /// Certain targets such as MIPS require that some types such as vectors are
1180 /// always broken down into scalars in some contexts. This occurs even if the
1181 /// vector type is legal.
1183 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
1184 unsigned &NumIntermediates, MVT &RegisterVT) const {
1185 return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates,
1186 RegisterVT);
1187 }
1188
1190 unsigned opc = 0; // target opcode
1191 EVT memVT; // memory VT
1192
1193 // value representing memory location
1195
1196 // Fallback address space for use if ptrVal is nullptr. std::nullopt means
1197 // unknown address space.
1198 std::optional<unsigned> fallbackAddressSpace;
1199
1200 int offset = 0; // offset off of ptrVal
1201 uint64_t size = 0; // the size of the memory location
1202 // (taken from memVT if zero)
1203 MaybeAlign align = Align(1); // alignment
1204
1206 IntrinsicInfo() = default;
1207 };
1208
1209 /// Given an intrinsic, checks if on the target the intrinsic will need to map
1210 /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
1211 /// true and store the intrinsic information into the IntrinsicInfo that was
1212 /// passed to the function.
1215 unsigned /*Intrinsic*/) const {
1216 return false;
1217 }
1218
1219 /// Returns true if the target can instruction select the specified FP
1220 /// immediate natively. If false, the legalizer will materialize the FP
1221 /// immediate as a load from a constant pool.
1222 virtual bool isFPImmLegal(const APFloat & /*Imm*/, EVT /*VT*/,
1223 bool ForCodeSize = false) const {
1224 return false;
1225 }
1226
1227 /// Targets can use this to indicate that they only support *some*
1228 /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a
1229 /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
1230 /// legal.
1231 virtual bool isShuffleMaskLegal(ArrayRef<int> /*Mask*/, EVT /*VT*/) const {
1232 return true;
1233 }
1234
1235 /// Returns true if the operation can trap for the value type.
1236 ///
1237 /// VT must be a legal type. By default, we optimistically assume most
1238 /// operations don't trap except for integer divide and remainder.
1239 virtual bool canOpTrap(unsigned Op, EVT VT) const;
1240
1241 /// Similar to isShuffleMaskLegal. Targets can use this to indicate if there
1242 /// is a suitable VECTOR_SHUFFLE that can be used to replace a VAND with a
1243 /// constant pool entry.
1245 EVT /*VT*/) const {
1246 return false;
1247 }
1248
1249 /// How to legalize this custom operation?
1251 return Legal;
1252 }
1253
1254 /// Return how this operation should be treated: either it is legal, needs to
1255 /// be promoted to a larger size, needs to be expanded to some other code
1256 /// sequence, or the target has a custom expander for it.
1258 // If a target-specific SDNode requires legalization, require the target
1259 // to provide custom legalization for it.
1260 if (Op >= std::size(OpActions[0]))
1261 return Custom;
1262 if (VT.isExtended())
1263 return Expand;
1264 return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op];
1265 }
1266
1267 /// Custom method defined by each target to indicate if an operation which
1268 /// may require a scale is supported natively by the target.
1269 /// If not, the operation is illegal.
1270 virtual bool isSupportedFixedPointOperation(unsigned Op, EVT VT,
1271 unsigned Scale) const {
1272 return false;
1273 }
1274
1275 /// Some fixed point operations may be natively supported by the target but
1276 /// only for specific scales. This method allows for checking
1277 /// if the width is supported by the target for a given operation that may
1278 /// depend on scale.
1280 unsigned Scale) const {
1281 auto Action = getOperationAction(Op, VT);
1282 if (Action != Legal)
1283 return Action;
1284
1285 // This operation is supported in this type but may only work on specific
1286 // scales.
1287 bool Supported;
1288 switch (Op) {
1289 default:
1290 llvm_unreachable("Unexpected fixed point operation.");
1291 case ISD::SMULFIX:
1292 case ISD::SMULFIXSAT:
1293 case ISD::UMULFIX:
1294 case ISD::UMULFIXSAT:
1295 case ISD::SDIVFIX:
1296 case ISD::SDIVFIXSAT:
1297 case ISD::UDIVFIX:
1298 case ISD::UDIVFIXSAT:
1299 Supported = isSupportedFixedPointOperation(Op, VT, Scale);
1300 break;
1301 }
1302
1303 return Supported ? Action : Expand;
1304 }
1305
1306 // If Op is a strict floating-point operation, return the result
1307 // of getOperationAction for the equivalent non-strict operation.
1309 unsigned EqOpc;
1310 switch (Op) {
1311 default: llvm_unreachable("Unexpected FP pseudo-opcode");
1312#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1313 case ISD::STRICT_##DAGN: EqOpc = ISD::DAGN; break;
1314#define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1315 case ISD::STRICT_##DAGN: EqOpc = ISD::SETCC; break;
1316#include "llvm/IR/ConstrainedOps.def"
1317 }
1318
1319 return getOperationAction(EqOpc, VT);
1320 }
1321
1322 /// Return true if the specified operation is legal on this target or can be
1323 /// made legal with custom lowering. This is used to help guide high-level
1324 /// lowering decisions. LegalOnly is an optional convenience for code paths
1325 /// traversed pre and post legalisation.
1327 bool LegalOnly = false) const {
1328 if (LegalOnly)
1329 return isOperationLegal(Op, VT);
1330
1331 return (VT == MVT::Other || isTypeLegal(VT)) &&
1332 (getOperationAction(Op, VT) == Legal ||
1333 getOperationAction(Op, VT) == Custom);
1334 }
1335
1336 /// Return true if the specified operation is legal on this target or can be
1337 /// made legal using promotion. This is used to help guide high-level lowering
1338 /// decisions. LegalOnly is an optional convenience for code paths traversed
1339 /// pre and post legalisation.
1341 bool LegalOnly = false) const {
1342 if (LegalOnly)
1343 return isOperationLegal(Op, VT);
1344
1345 return (VT == MVT::Other || isTypeLegal(VT)) &&
1346 (getOperationAction(Op, VT) == Legal ||
1347 getOperationAction(Op, VT) == Promote);
1348 }
1349
1350 /// Return true if the specified operation is legal on this target or can be
1351 /// made legal with custom lowering or using promotion. This is used to help
1352 /// guide high-level lowering decisions. LegalOnly is an optional convenience
1353 /// for code paths traversed pre and post legalisation.
1355 bool LegalOnly = false) const {
1356 if (LegalOnly)
1357 return isOperationLegal(Op, VT);
1358
1359 return (VT == MVT::Other || isTypeLegal(VT)) &&
1360 (getOperationAction(Op, VT) == Legal ||
1361 getOperationAction(Op, VT) == Custom ||
1362 getOperationAction(Op, VT) == Promote);
1363 }
1364
1365 /// Return true if the operation uses custom lowering, regardless of whether
1366 /// the type is legal or not.
1367 bool isOperationCustom(unsigned Op, EVT VT) const {
1368 return getOperationAction(Op, VT) == Custom;
1369 }
1370
1371 /// Return true if lowering to a jump table is allowed.
1372 virtual bool areJTsAllowed(const Function *Fn) const {
1373 if (Fn->getFnAttribute("no-jump-tables").getValueAsBool())
1374 return false;
1375
1376 return isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
1378 }
1379
1380 /// Check whether the range [Low,High] fits in a machine word.
1381 bool rangeFitsInWord(const APInt &Low, const APInt &High,
1382 const DataLayout &DL) const {
1383 // FIXME: Using the pointer type doesn't seem ideal.
1384 uint64_t BW = DL.getIndexSizeInBits(0u);
1385 uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1;
1386 return Range <= BW;
1387 }
1388
1389 /// Return true if lowering to a jump table is suitable for a set of case
1390 /// clusters which may contain \p NumCases cases, \p Range range of values.
1391 virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases,
1393 BlockFrequencyInfo *BFI) const;
1394
1395 /// Returns preferred type for switch condition.
1397 EVT ConditionVT) const;
1398
1399 /// Return true if lowering to a bit test is suitable for a set of case
1400 /// clusters which contains \p NumDests unique destinations, \p Low and
1401 /// \p High as its lowest and highest case values, and expects \p NumCmps
1402 /// case value comparisons. Check if the number of destinations, comparison
1403 /// metric, and range are all suitable.
1404 bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps,
1405 const APInt &Low, const APInt &High,
1406 const DataLayout &DL) const {
1407 // FIXME: I don't think NumCmps is the correct metric: a single case and a
1408 // range of cases both require only one branch to lower. Just looking at the
1409 // number of clusters and destinations should be enough to decide whether to
1410 // build bit tests.
1411
1412 // To lower a range with bit tests, the range must fit the bitwidth of a
1413 // machine word.
1414 if (!rangeFitsInWord(Low, High, DL))
1415 return false;
1416
1417 // Decide whether it's profitable to lower this range with bit tests. Each
1418 // destination requires a bit test and branch, and there is an overall range
1419 // check branch. For a small number of clusters, separate comparisons might
1420 // be cheaper, and for many destinations, splitting the range might be
1421 // better.
1422 return (NumDests == 1 && NumCmps >= 3) || (NumDests == 2 && NumCmps >= 5) ||
1423 (NumDests == 3 && NumCmps >= 6);
1424 }
1425
1426 /// Return true if the specified operation is illegal on this target or
1427 /// unlikely to be made legal with custom lowering. This is used to help guide
1428 /// high-level lowering decisions.
1429 bool isOperationExpand(unsigned Op, EVT VT) const {
1430 return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
1431 }
1432
1433 /// Return true if the specified operation is legal on this target.
1434 bool isOperationLegal(unsigned Op, EVT VT) const {
1435 return (VT == MVT::Other || isTypeLegal(VT)) &&
1436 getOperationAction(Op, VT) == Legal;
1437 }
1438
1439 /// Return how this load with extension should be treated: either it is legal,
1440 /// needs to be promoted to a larger size, needs to be expanded to some other
1441 /// code sequence, or the target has a custom expander for it.
1442 LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT,
1443 EVT MemVT) const {
1444 if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
1445 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
1446 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
1448 MemI < MVT::VALUETYPE_SIZE && "Table isn't big enough!");
1449 unsigned Shift = 4 * ExtType;
1450 return (LegalizeAction)((LoadExtActions[ValI][MemI] >> Shift) & 0xf);
1451 }
1452
1453 /// Return true if the specified load with extension is legal on this target.
1454 bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
1455 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal;
1456 }
1457
1458 /// Return true if the specified load with extension is legal or custom
1459 /// on this target.
1460 bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const {
1461 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal ||
1462 getLoadExtAction(ExtType, ValVT, MemVT) == Custom;
1463 }
1464
1465 /// Same as getLoadExtAction, but for atomic loads.
1467 EVT MemVT) const {
1468 if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
1469 unsigned ValI = (unsigned)ValVT.getSimpleVT().SimpleTy;
1470 unsigned MemI = (unsigned)MemVT.getSimpleVT().SimpleTy;
1472 MemI < MVT::VALUETYPE_SIZE && "Table isn't big enough!");
1473 unsigned Shift = 4 * ExtType;
1474 LegalizeAction Action =
1475 (LegalizeAction)((AtomicLoadExtActions[ValI][MemI] >> Shift) & 0xf);
1476 assert((Action == Legal || Action == Expand) &&
1477 "Unsupported atomic load extension action.");
1478 return Action;
1479 }
1480
1481 /// Return true if the specified atomic load with extension is legal on
1482 /// this target.
1483 bool isAtomicLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
1484 return getAtomicLoadExtAction(ExtType, ValVT, MemVT) == Legal;
1485 }
1486
1487 /// Return how this store with truncation should be treated: either it is
1488 /// legal, needs to be promoted to a larger size, needs to be expanded to some
1489 /// other code sequence, or the target has a custom expander for it.
1491 if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
1492 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
1493 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
1495 "Table isn't big enough!");
1496 return TruncStoreActions[ValI][MemI];
1497 }
1498
1499 /// Return true if the specified store with truncation is legal on this
1500 /// target.
1501 bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
1502 return isTypeLegal(ValVT) && getTruncStoreAction(ValVT, MemVT) == Legal;
1503 }
1504
1505 /// Return true if the specified store with truncation has solution on this
1506 /// target.
1507 bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT) const {
1508 return isTypeLegal(ValVT) &&
1509 (getTruncStoreAction(ValVT, MemVT) == Legal ||
1510 getTruncStoreAction(ValVT, MemVT) == Custom);
1511 }
1512
1513 virtual bool canCombineTruncStore(EVT ValVT, EVT MemVT,
1514 bool LegalOnly) const {
1515 if (LegalOnly)
1516 return isTruncStoreLegal(ValVT, MemVT);
1517
1518 return isTruncStoreLegalOrCustom(ValVT, MemVT);
1519 }
1520
1521 /// Return how the indexed load should be treated: either it is legal, needs
1522 /// to be promoted to a larger size, needs to be expanded to some other code
1523 /// sequence, or the target has a custom expander for it.
1524 LegalizeAction getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
1525 return getIndexedModeAction(IdxMode, VT, IMAB_Load);
1526 }
1527
1528 /// Return true if the specified indexed load is legal on this target.
1529 bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
1530 return VT.isSimple() &&
1531 (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
1532 getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
1533 }
1534
1535 /// Return how the indexed store should be treated: either it is legal, needs
1536 /// to be promoted to a larger size, needs to be expanded to some other code
1537 /// sequence, or the target has a custom expander for it.
1538 LegalizeAction getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
1539 return getIndexedModeAction(IdxMode, VT, IMAB_Store);
1540 }
1541
1542 /// Return true if the specified indexed load is legal on this target.
1543 bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
1544 return VT.isSimple() &&
1545 (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
1546 getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
1547 }
1548
1549 /// Return how the indexed load should be treated: either it is legal, needs
1550 /// to be promoted to a larger size, needs to be expanded to some other code
1551 /// sequence, or the target has a custom expander for it.
1552 LegalizeAction getIndexedMaskedLoadAction(unsigned IdxMode, MVT VT) const {
1553 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad);
1554 }
1555
1556 /// Return true if the specified indexed load is legal on this target.
1557 bool isIndexedMaskedLoadLegal(unsigned IdxMode, EVT VT) const {
1558 return VT.isSimple() &&
1559 (getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
1561 }
1562
1563 /// Return how the indexed store should be treated: either it is legal, needs
1564 /// to be promoted to a larger size, needs to be expanded to some other code
1565 /// sequence, or the target has a custom expander for it.
1566 LegalizeAction getIndexedMaskedStoreAction(unsigned IdxMode, MVT VT) const {
1567 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedStore);
1568 }
1569
1570 /// Return true if the specified indexed load is legal on this target.
1571 bool isIndexedMaskedStoreLegal(unsigned IdxMode, EVT VT) const {
1572 return VT.isSimple() &&
1573 (getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
1575 }
1576
1577 /// Returns true if the index type for a masked gather/scatter requires
1578 /// extending
1579 virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const { return false; }
1580
1581 // Returns true if Extend can be folded into the index of a masked gathers/scatters
1582 // on this target.
1583 virtual bool shouldRemoveExtendFromGSIndex(SDValue Extend, EVT DataVT) const {
1584 return false;
1585 }
1586
1587 // Return true if the target supports a scatter/gather instruction with
1588 // indices which are scaled by the particular value. Note that all targets
1589 // must by definition support scale of 1.
1591 uint64_t ElemSize) const {
1592 // MGATHER/MSCATTER are only required to support scaling by one or by the
1593 // element size.
1594 if (Scale != ElemSize && Scale != 1)
1595 return false;
1596 return true;
1597 }
1598
1599 /// Return how the condition code should be treated: either it is legal, needs
1600 /// to be expanded to some other code sequence, or the target has a custom
1601 /// expander for it.
1604 assert((unsigned)CC < std::size(CondCodeActions) &&
1605 ((unsigned)VT.SimpleTy >> 3) < std::size(CondCodeActions[0]) &&
1606 "Table isn't big enough!");
1607 // See setCondCodeAction for how this is encoded.
1608 uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
1609 uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 3];
1610 LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0xF);
1611 assert(Action != Promote && "Can't promote condition code!");
1612 return Action;
1613 }
1614
1615 /// Return true if the specified condition code is legal on this target.
1617 return getCondCodeAction(CC, VT) == Legal;
1618 }
1619
1620 /// Return true if the specified condition code is legal or custom on this
1621 /// target.
1623 return getCondCodeAction(CC, VT) == Legal ||
1624 getCondCodeAction(CC, VT) == Custom;
1625 }
1626
1627 /// If the action for this operation is to promote, this method returns the
1628 /// ValueType to promote to.
1629 MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
1631 "This operation isn't promoted!");
1632
1633 // See if this has an explicit type specified.
1634 std::map<std::pair<unsigned, MVT::SimpleValueType>,
1636 PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
1637 if (PTTI != PromoteToType.end()) return PTTI->second;
1638
1639 assert((VT.isInteger() || VT.isFloatingPoint()) &&
1640 "Cannot autopromote this type, add it with AddPromotedToType.");
1641
1642 uint64_t VTBits = VT.getScalarSizeInBits();
1643 MVT NVT = VT;
1644 do {
1645 NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
1646 assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
1647 "Didn't find type to promote to!");
1648 } while (VTBits >= NVT.getScalarSizeInBits() || !isTypeLegal(NVT) ||
1649 getOperationAction(Op, NVT) == Promote);
1650 return NVT;
1651 }
1652
1654 bool AllowUnknown = false) const {
1655 return getValueType(DL, Ty, AllowUnknown);
1656 }
1657
1658 /// Return the EVT corresponding to this LLVM type. This is fixed by the LLVM
1659 /// operations except for the pointer size. If AllowUnknown is true, this
1660 /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
1661 /// otherwise it will assert.
1663 bool AllowUnknown = false) const {
1664 // Lower scalar pointers to native pointer types.
1665 if (auto *PTy = dyn_cast<PointerType>(Ty))
1666 return getPointerTy(DL, PTy->getAddressSpace());
1667
1668 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
1669 Type *EltTy = VTy->getElementType();
1670 // Lower vectors of pointers to native pointer types.
1671 if (auto *PTy = dyn_cast<PointerType>(EltTy)) {
1672 EVT PointerTy(getPointerTy(DL, PTy->getAddressSpace()));
1673 EltTy = PointerTy.getTypeForEVT(Ty->getContext());
1674 }
1675 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(EltTy, false),
1676 VTy->getElementCount());
1677 }
1678
1679 return EVT::getEVT(Ty, AllowUnknown);
1680 }
1681
1683 bool AllowUnknown = false) const {
1684 // Lower scalar pointers to native pointer types.
1685 if (auto *PTy = dyn_cast<PointerType>(Ty))
1686 return getPointerMemTy(DL, PTy->getAddressSpace());
1687
1688 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
1689 Type *EltTy = VTy->getElementType();
1690 if (auto *PTy = dyn_cast<PointerType>(EltTy)) {
1691 EVT PointerTy(getPointerMemTy(DL, PTy->getAddressSpace()));
1692 EltTy = PointerTy.getTypeForEVT(Ty->getContext());
1693 }
1694 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(EltTy, false),
1695 VTy->getElementCount());
1696 }
1697
1698 return getValueType(DL, Ty, AllowUnknown);
1699 }
1700
1701
1702 /// Return the MVT corresponding to this LLVM type. See getValueType.
1704 bool AllowUnknown = false) const {
1705 return getValueType(DL, Ty, AllowUnknown).getSimpleVT();
1706 }
1707
1708 /// Return the desired alignment for ByVal or InAlloca aggregate function
1709 /// arguments in the caller parameter area. This is the actual alignment, not
1710 /// its logarithm.
1711 virtual uint64_t getByValTypeAlignment(Type *Ty, const DataLayout &DL) const;
1712
1713 /// Return the type of registers that this ValueType will eventually require.
1715 assert((unsigned)VT.SimpleTy < std::size(RegisterTypeForVT));
1716 return RegisterTypeForVT[VT.SimpleTy];
1717 }
1718
1719 /// Return the type of registers that this ValueType will eventually require.
1720 MVT getRegisterType(LLVMContext &Context, EVT VT) const {
1721 if (VT.isSimple())
1722 return getRegisterType(VT.getSimpleVT());
1723 if (VT.isVector()) {
1724 EVT VT1;
1725 MVT RegisterVT;
1726 unsigned NumIntermediates;
1727 (void)getVectorTypeBreakdown(Context, VT, VT1,
1728 NumIntermediates, RegisterVT);
1729 return RegisterVT;
1730 }
1731 if (VT.isInteger()) {
1732 return getRegisterType(Context, getTypeToTransformTo(Context, VT));
1733 }
1734 llvm_unreachable("Unsupported extended type!");
1735 }
1736
1737 /// Return the number of registers that this ValueType will eventually
1738 /// require.
1739 ///
1740 /// This is one for any types promoted to live in larger registers, but may be
1741 /// more than one for types (like i64) that are split into pieces. For types
1742 /// like i140, which are first promoted then expanded, it is the number of
1743 /// registers needed to hold all the bits of the original type. For an i140
1744 /// on a 32 bit machine this means 5 registers.
1745 ///
1746 /// RegisterVT may be passed as a way to override the default settings, for
1747 /// instance with i128 inline assembly operands on SystemZ.
1748 virtual unsigned
1750 std::optional<MVT> RegisterVT = std::nullopt) const {
1751 if (VT.isSimple()) {
1752 assert((unsigned)VT.getSimpleVT().SimpleTy <
1753 std::size(NumRegistersForVT));
1754 return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
1755 }
1756 if (VT.isVector()) {
1757 EVT VT1;
1758 MVT VT2;
1759 unsigned NumIntermediates;
1760 return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
1761 }
1762 if (VT.isInteger()) {
1763 unsigned BitWidth = VT.getSizeInBits();
1764 unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
1765 return (BitWidth + RegWidth - 1) / RegWidth;
1766 }
1767 llvm_unreachable("Unsupported extended type!");
1768 }
1769
1770 /// Certain combinations of ABIs, Targets and features require that types
1771 /// are legal for some operations and not for other operations.
1772 /// For MIPS all vector types must be passed through the integer register set.
1774 CallingConv::ID CC, EVT VT) const {
1775 return getRegisterType(Context, VT);
1776 }
1777
1778 /// Certain targets require unusual breakdowns of certain types. For MIPS,
1779 /// this occurs when a vector type is used, as vector are passed through the
1780 /// integer register set.
1783 EVT VT) const {
1784 return getNumRegisters(Context, VT);
1785 }
1786
1787 /// Certain targets have context sensitive alignment requirements, where one
1788 /// type has the alignment requirement of another type.
1790 const DataLayout &DL) const {
1791 return DL.getABITypeAlign(ArgTy);
1792 }
1793
1794 /// If true, then instruction selection should seek to shrink the FP constant
1795 /// of the specified type to a smaller type in order to save space and / or
1796 /// reduce runtime.
1797 virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
1798
1799 /// Return true if it is profitable to reduce a load to a smaller type.
1800 /// Example: (i16 (trunc (i32 (load x))) -> i16 load x
1802 EVT NewVT) const {
1803 // By default, assume that it is cheaper to extract a subvector from a wide
1804 // vector load rather than creating multiple narrow vector loads.
1805 if (NewVT.isVector() && !Load->hasOneUse())
1806 return false;
1807
1808 return true;
1809 }
1810
1811 /// Return true (the default) if it is profitable to remove a sext_inreg(x)
1812 /// where the sext is redundant, and use x directly.
1813 virtual bool shouldRemoveRedundantExtend(SDValue Op) const { return true; }
1814
1815 /// Indicates if any padding is guaranteed to go at the most significant bits
1816 /// when storing the type to memory and the type size isn't equal to the store
1817 /// size.
1819 return VT.isScalarInteger() && !VT.isByteSized();
1820 }
1821
1822 /// When splitting a value of the specified type into parts, does the Lo
1823 /// or Hi part come first? This usually follows the endianness, except
1824 /// for ppcf128, where the Hi part always comes first.
1826 return DL.isBigEndian() || VT == MVT::ppcf128;
1827 }
1828
1829 /// If true, the target has custom DAG combine transformations that it can
1830 /// perform for the specified node.
1832 assert(unsigned(NT >> 3) < std::size(TargetDAGCombineArray));
1833 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
1834 }
1835
1838 }
1839
1840 /// Returns the size of the platform's va_list object.
1841 virtual unsigned getVaListSizeInBits(const DataLayout &DL) const {
1842 return getPointerTy(DL).getSizeInBits();
1843 }
1844
1845 /// Get maximum # of store operations permitted for llvm.memset
1846 ///
1847 /// This function returns the maximum number of store operations permitted
1848 /// to replace a call to llvm.memset. The value is set by the target at the
1849 /// performance threshold for such a replacement. If OptSize is true,
1850 /// return the limit for functions that have OptSize attribute.
1851 unsigned getMaxStoresPerMemset(bool OptSize) const {
1853 }
1854
1855 /// Get maximum # of store operations permitted for llvm.memcpy
1856 ///
1857 /// This function returns the maximum number of store operations permitted
1858 /// to replace a call to llvm.memcpy. The value is set by the target at the
1859 /// performance threshold for such a replacement. If OptSize is true,
1860 /// return the limit for functions that have OptSize attribute.
1861 unsigned getMaxStoresPerMemcpy(bool OptSize) const {
1863 }
1864
1865 /// \brief Get maximum # of store operations to be glued together
1866 ///
1867 /// This function returns the maximum number of store operations permitted
1868 /// to glue together during lowering of llvm.memcpy. The value is set by
1869 // the target at the performance threshold for such a replacement.
1870 virtual unsigned getMaxGluedStoresPerMemcpy() const {
1872 }
1873
1874 /// Get maximum # of load operations permitted for memcmp
1875 ///
1876 /// This function returns the maximum number of load operations permitted
1877 /// to replace a call to memcmp. The value is set by the target at the
1878 /// performance threshold for such a replacement. If OptSize is true,
1879 /// return the limit for functions that have OptSize attribute.
1880 unsigned getMaxExpandSizeMemcmp(bool OptSize) const {
1882 }
1883
1884 /// Get maximum # of store operations permitted for llvm.memmove
1885 ///
1886 /// This function returns the maximum number of store operations permitted
1887 /// to replace a call to llvm.memmove. The value is set by the target at the
1888 /// performance threshold for such a replacement. If OptSize is true,
1889 /// return the limit for functions that have OptSize attribute.
1890 unsigned getMaxStoresPerMemmove(bool OptSize) const {
1892 }
1893
1894 /// Determine if the target supports unaligned memory accesses.
1895 ///
1896 /// This function returns true if the target allows unaligned memory accesses
1897 /// of the specified type in the given address space. If true, it also returns
1898 /// a relative speed of the unaligned memory access in the last argument by
1899 /// reference. The higher the speed number the faster the operation comparing
1900 /// to a number returned by another such call. This is used, for example, in
1901 /// situations where an array copy/move/set is converted to a sequence of
1902 /// store operations. Its use helps to ensure that such replacements don't
1903 /// generate code that causes an alignment error (trap) on the target machine.
1905 EVT, unsigned AddrSpace = 0, Align Alignment = Align(1),
1907 unsigned * /*Fast*/ = nullptr) const {
1908 return false;
1909 }
1910
1911 /// LLT handling variant.
1913 LLT, unsigned AddrSpace = 0, Align Alignment = Align(1),
1915 unsigned * /*Fast*/ = nullptr) const {
1916 return false;
1917 }
1918
1919 /// This function returns true if the memory access is aligned or if the
1920 /// target allows this specific unaligned memory access. If the access is
1921 /// allowed, the optional final parameter returns a relative speed of the
1922 /// access (as defined by the target).
1924 LLVMContext &Context, const DataLayout &DL, EVT VT,
1925 unsigned AddrSpace = 0, Align Alignment = Align(1),
1927 unsigned *Fast = nullptr) const;
1928
1929 /// Return true if the memory access of this type is aligned or if the target
1930 /// allows this specific unaligned access for the given MachineMemOperand.
1931 /// If the access is allowed, the optional final parameter returns a relative
1932 /// speed of the access (as defined by the target).
1934 const DataLayout &DL, EVT VT,
1935 const MachineMemOperand &MMO,
1936 unsigned *Fast = nullptr) const;
1937
1938 /// Return true if the target supports a memory access of this type for the
1939 /// given address space and alignment. If the access is allowed, the optional
1940 /// final parameter returns the relative speed of the access (as defined by
1941 /// the target).
1942 virtual bool
1943 allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
1944 unsigned AddrSpace = 0, Align Alignment = Align(1),
1946 unsigned *Fast = nullptr) const;
1947
1948 /// Return true if the target supports a memory access of this type for the
1949 /// given MachineMemOperand. If the access is allowed, the optional
1950 /// final parameter returns the relative access speed (as defined by the
1951 /// target).
1952 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
1953 const MachineMemOperand &MMO,
1954 unsigned *Fast = nullptr) const;
1955
1956 /// LLT handling variant.
1957 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, LLT Ty,
1958 const MachineMemOperand &MMO,
1959 unsigned *Fast = nullptr) const;
1960
1961 /// Returns the target specific optimal type for load and store operations as
1962 /// a result of memset, memcpy, and memmove lowering.
1963 /// It returns EVT::Other if the type should be determined using generic
1964 /// target-independent logic.
1965 virtual EVT
1967 const AttributeList & /*FuncAttributes*/) const {
1968 return MVT::Other;
1969 }
1970
1971 /// LLT returning variant.
1972 virtual LLT
1974 const AttributeList & /*FuncAttributes*/) const {
1975 return LLT();
1976 }
1977
1978 /// Returns true if it's safe to use load / store of the specified type to
1979 /// expand memcpy / memset inline.
1980 ///
1981 /// This is mostly true for all types except for some special cases. For
1982 /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
1983 /// fstpl which also does type conversion. Note the specified type doesn't
1984 /// have to be legal as the hook is used before type legalization.
1985 virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
1986
1987 /// Return lower limit for number of blocks in a jump table.
1988 virtual unsigned getMinimumJumpTableEntries() const;
1989
1990 /// Return lower limit of the density in a jump table.
1991 unsigned getMinimumJumpTableDensity(bool OptForSize) const;
1992
1993 /// Return upper limit for number of entries in a jump table.
1994 /// Zero if no limit.
1995 unsigned getMaximumJumpTableSize() const;
1996
1997 virtual bool isJumpTableRelative() const;
1998
1999 /// If a physical register, this specifies the register that
2000 /// llvm.savestack/llvm.restorestack should save and restore.
2002 return StackPointerRegisterToSaveRestore;
2003 }
2004
2005 /// If a physical register, this returns the register that receives the
2006 /// exception address on entry to an EH pad.
2007 virtual Register
2008 getExceptionPointerRegister(const Constant *PersonalityFn) const {
2009 return Register();
2010 }
2011
2012 /// If a physical register, this returns the register that receives the
2013 /// exception typeid on entry to a landing pad.
2014 virtual Register
2015 getExceptionSelectorRegister(const Constant *PersonalityFn) const {
2016 return Register();
2017 }
2018
2019 virtual bool needsFixedCatchObjects() const {
2020 report_fatal_error("Funclet EH is not implemented for this target");
2021 }
2022
2023 /// Return the minimum stack alignment of an argument.
2025 return MinStackArgumentAlignment;
2026 }
2027
2028 /// Return the minimum function alignment.
2029 Align getMinFunctionAlignment() const { return MinFunctionAlignment; }
2030
2031 /// Return the preferred function alignment.
2032 Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; }
2033
2034 /// Return the preferred loop alignment.
2035 virtual Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const;
2036
2037 /// Return the maximum amount of bytes allowed to be emitted when padding for
2038 /// alignment
2039 virtual unsigned
2041
2042 /// Should loops be aligned even when the function is marked OptSize (but not
2043 /// MinSize).
2044 virtual bool alignLoopsWithOptSize() const { return false; }
2045
2046 /// If the target has a standard location for the stack protector guard,
2047 /// returns the address of that location. Otherwise, returns nullptr.
2048 /// DEPRECATED: please override useLoadStackGuardNode and customize
2049 /// LOAD_STACK_GUARD, or customize \@llvm.stackguard().
2050 virtual Value *getIRStackGuard(IRBuilderBase &IRB) const;
2051
2052 /// Inserts necessary declarations for SSP (stack protection) purpose.
2053 /// Should be used only when getIRStackGuard returns nullptr.
2054 virtual void insertSSPDeclarations(Module &M) const;
2055
2056 /// Return the variable that's previously inserted by insertSSPDeclarations,
2057 /// if any, otherwise return nullptr. Should be used only when
2058 /// getIRStackGuard returns nullptr.
2059 virtual Value *getSDagStackGuard(const Module &M) const;
2060
2061 /// If this function returns true, stack protection checks should XOR the
2062 /// frame pointer (or whichever pointer is used to address locals) into the
2063 /// stack guard value before checking it. getIRStackGuard must return nullptr
2064 /// if this returns true.
2065 virtual bool useStackGuardXorFP() const { return false; }
2066
2067 /// If the target has a standard stack protection check function that
2068 /// performs validation and error handling, returns the function. Otherwise,
2069 /// returns nullptr. Must be previously inserted by insertSSPDeclarations.
2070 /// Should be used only when getIRStackGuard returns nullptr.
2071 virtual Function *getSSPStackGuardCheck(const Module &M) const;
2072
2073protected:
2075 bool UseTLS) const;
2076
2077public:
2078 /// Returns the target-specific address of the unsafe stack pointer.
2079 virtual Value *getSafeStackPointerLocation(IRBuilderBase &IRB) const;
2080
2081 /// Returns the name of the symbol used to emit stack probes or the empty
2082 /// string if not applicable.
2083 virtual bool hasStackProbeSymbol(const MachineFunction &MF) const { return false; }
2084
2085 virtual bool hasInlineStackProbe(const MachineFunction &MF) const { return false; }
2086
2088 return "";
2089 }
2090
2091 /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we
2092 /// are happy to sink it into basic blocks. A cast may be free, but not
2093 /// necessarily a no-op. e.g. a free truncate from a 64-bit to 32-bit pointer.
2094 virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const;
2095
2096 /// Return true if the pointer arguments to CI should be aligned by aligning
2097 /// the object whose address is being passed. If so then MinSize is set to the
2098 /// minimum size the object must be to be aligned and PrefAlign is set to the
2099 /// preferred alignment.
2100 virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/,
2101 Align & /*PrefAlign*/) const {
2102 return false;
2103 }
2104
2105 //===--------------------------------------------------------------------===//
2106 /// \name Helpers for TargetTransformInfo implementations
2107 /// @{
2108
2109 /// Get the ISD node that corresponds to the Instruction class opcode.
2110 int InstructionOpcodeToISD(unsigned Opcode) const;
2111
2112 /// @}
2113
2114 //===--------------------------------------------------------------------===//
2115 /// \name Helpers for atomic expansion.
2116 /// @{
2117
2118 /// Returns the maximum atomic operation size (in bits) supported by
2119 /// the backend. Atomic operations greater than this size (as well
2120 /// as ones that are not naturally aligned), will be expanded by
2121 /// AtomicExpandPass into an __atomic_* library call.
2123 return MaxAtomicSizeInBitsSupported;
2124 }
2125
2126 /// Returns the size in bits of the maximum div/rem the backend supports.
2127 /// Larger operations will be expanded by ExpandLargeDivRem.
2129 return MaxDivRemBitWidthSupported;
2130 }
2131
2132 /// Returns the size in bits of the maximum larget fp convert the backend
2133 /// supports. Larger operations will be expanded by ExpandLargeFPConvert.
2135 return MaxLargeFPConvertBitWidthSupported;
2136 }
2137
2138 /// Returns the size of the smallest cmpxchg or ll/sc instruction
2139 /// the backend supports. Any smaller operations are widened in
2140 /// AtomicExpandPass.
2141 ///
2142 /// Note that *unlike* operations above the maximum size, atomic ops
2143 /// are still natively supported below the minimum; they just
2144 /// require a more complex expansion.
2145 unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; }
2146
2147 /// Whether the target supports unaligned atomic operations.
2148 bool supportsUnalignedAtomics() const { return SupportsUnalignedAtomics; }
2149
2150 /// Whether AtomicExpandPass should automatically insert fences and reduce
2151 /// ordering for this atomic. This should be true for most architectures with
2152 /// weak memory ordering. Defaults to false.
2153 virtual bool shouldInsertFencesForAtomic(const Instruction *I) const {
2154 return false;
2155 }
2156
2157 /// Whether AtomicExpandPass should automatically insert a trailing fence
2158 /// without reducing the ordering for this atomic. Defaults to false.
2159 virtual bool
2161 return false;
2162 }
2163
2164 /// Perform a load-linked operation on Addr, returning a "Value *" with the
2165 /// corresponding pointee type. This may entail some non-trivial operations to
2166 /// truncate or reconstruct types that will be illegal in the backend. See
2167 /// ARMISelLowering for an example implementation.
2168 virtual Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy,
2169 Value *Addr, AtomicOrdering Ord) const {
2170 llvm_unreachable("Load linked unimplemented on this target");
2171 }
2172
2173 /// Perform a store-conditional operation to Addr. Return the status of the
2174 /// store. This should be 0 if the store succeeded, non-zero otherwise.
2176 Value *Addr, AtomicOrdering Ord) const {
2177 llvm_unreachable("Store conditional unimplemented on this target");
2178 }
2179
2180 /// Perform a masked atomicrmw using a target-specific intrinsic. This
2181 /// represents the core LL/SC loop which will be lowered at a late stage by
2182 /// the backend. The target-specific intrinsic returns the loaded value and
2183 /// is not responsible for masking and shifting the result.
2185 AtomicRMWInst *AI,
2186 Value *AlignedAddr, Value *Incr,
2187 Value *Mask, Value *ShiftAmt,
2188 AtomicOrdering Ord) const {
2189 llvm_unreachable("Masked atomicrmw expansion unimplemented on this target");
2190 }
2191
2192 /// Perform a atomicrmw expansion using a target-specific way. This is
2193 /// expected to be called when masked atomicrmw and bit test atomicrmw don't
2194 /// work, and the target supports another way to lower atomicrmw.
2195 virtual void emitExpandAtomicRMW(AtomicRMWInst *AI) const {
2197 "Generic atomicrmw expansion unimplemented on this target");
2198 }
2199
2200 /// Perform a bit test atomicrmw using a target-specific intrinsic. This
2201 /// represents the combined bit test intrinsic which will be lowered at a late
2202 /// stage by the backend.
2205 "Bit test atomicrmw expansion unimplemented on this target");
2206 }
2207
2208 /// Perform a atomicrmw which the result is only used by comparison, using a
2209 /// target-specific intrinsic. This represents the combined atomic and compare
2210 /// intrinsic which will be lowered at a late stage by the backend.
2213 "Compare arith atomicrmw expansion unimplemented on this target");
2214 }
2215
2216 /// Perform a masked cmpxchg using a target-specific intrinsic. This
2217 /// represents the core LL/SC loop which will be lowered at a late stage by
2218 /// the backend. The target-specific intrinsic returns the loaded value and
2219 /// is not responsible for masking and shifting the result.
2221 IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
2222 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
2223 llvm_unreachable("Masked cmpxchg expansion unimplemented on this target");
2224 }
2225
2226 //===--------------------------------------------------------------------===//
2227 /// \name KCFI check lowering.
2228 /// @{
2229
2232 const TargetInstrInfo *TII) const {
2233 llvm_unreachable("KCFI is not supported on this target");
2234 }
2235
2236 /// @}
2237
2238 /// Inserts in the IR a target-specific intrinsic specifying a fence.
2239 /// It is called by AtomicExpandPass before expanding an
2240 /// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad
2241 /// if shouldInsertFencesForAtomic returns true.
2242 ///
2243 /// Inst is the original atomic instruction, prior to other expansions that
2244 /// may be performed.
2245 ///
2246 /// This function should either return a nullptr, or a pointer to an IR-level
2247 /// Instruction*. Even complex fence sequences can be represented by a
2248 /// single Instruction* through an intrinsic to be lowered later.
2249 ///
2250 /// The default implementation emits an IR fence before any release (or
2251 /// stronger) operation that stores, and after any acquire (or stronger)
2252 /// operation. This is generally a correct implementation, but backends may
2253 /// override if they wish to use alternative schemes (e.g. the PowerPC
2254 /// standard ABI uses a fence before a seq_cst load instead of after a
2255 /// seq_cst store).
2256 /// @{
2257 virtual Instruction *emitLeadingFence(IRBuilderBase &Builder,
2258 Instruction *Inst,
2259 AtomicOrdering Ord) const;
2260
2262 Instruction *Inst,
2263 AtomicOrdering Ord) const;
2264 /// @}
2265
2266 // Emits code that executes when the comparison result in the ll/sc
2267 // expansion of a cmpxchg instruction is such that the store-conditional will
2268 // not execute. This makes it possible to balance out the load-linked with
2269 // a dedicated instruction, if desired.
2270 // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would
2271 // be unnecessarily held, except if clrex, inserted by this hook, is executed.
2272 virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const {}
2273
2274 /// Returns true if arguments should be sign-extended in lib calls.
2275 virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
2276 return IsSigned;
2277 }
2278
2279 /// Returns true if arguments should be extended in lib calls.
2280 virtual bool shouldExtendTypeInLibCall(EVT Type) const {
2281 return true;
2282 }
2283
2284 /// Returns how the given (atomic) load should be expanded by the
2285 /// IR-level AtomicExpand pass.
2288 }
2289
2290 /// Returns how the given (atomic) load should be cast by the IR-level
2291 /// AtomicExpand pass.
2293 if (LI->getType()->isFloatingPointTy())
2296 }
2297
2298 /// Returns how the given (atomic) store should be expanded by the IR-level
2299 /// AtomicExpand pass into. For instance AtomicExpansionKind::Expand will try
2300 /// to use an atomicrmw xchg.
2303 }
2304
2305 /// Returns how the given (atomic) store should be cast by the IR-level
2306 /// AtomicExpand pass into. For instance AtomicExpansionKind::CastToInteger
2307 /// will try to cast the operands to integer values.
2309 if (SI->getValueOperand()->getType()->isFloatingPointTy())
2312 }
2313
2314 /// Returns how the given atomic cmpxchg should be expanded by the IR-level
2315 /// AtomicExpand pass.
2316 virtual AtomicExpansionKind
2319 }
2320
2321 /// Returns how the IR-level AtomicExpand pass should expand the given
2322 /// AtomicRMW, if at all. Default is to never expand.
2324 return RMW->isFloatingPointOperation() ?
2326 }
2327
2328 /// Returns how the given atomic atomicrmw should be cast by the IR-level
2329 /// AtomicExpand pass.
2330 virtual AtomicExpansionKind
2332 if (RMWI->getOperation() == AtomicRMWInst::Xchg &&
2333 (RMWI->getValOperand()->getType()->isFloatingPointTy() ||
2334 RMWI->getValOperand()->getType()->isPointerTy()))
2336
2338 }
2339
2340 /// On some platforms, an AtomicRMW that never actually modifies the value
2341 /// (such as fetch_add of 0) can be turned into a fence followed by an
2342 /// atomic load. This may sound useless, but it makes it possible for the
2343 /// processor to keep the cacheline shared, dramatically improving
2344 /// performance. And such idempotent RMWs are useful for implementing some
2345 /// kinds of locks, see for example (justification + benchmarks):
2346 /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
2347 /// This method tries doing that transformation, returning the atomic load if
2348 /// it succeeds, and nullptr otherwise.
2349 /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
2350 /// another round of expansion.
2351 virtual LoadInst *
2353 return nullptr;
2354 }
2355
2356 /// Returns how the platform's atomic operations are extended (ZERO_EXTEND,
2357 /// SIGN_EXTEND, or ANY_EXTEND).
2359 return ISD::ZERO_EXTEND;
2360 }
2361
2362 /// Returns how the platform's atomic compare and swap expects its comparison
2363 /// value to be extended (ZERO_EXTEND, SIGN_EXTEND, or ANY_EXTEND). This is
2364 /// separate from getExtendForAtomicOps, which is concerned with the
2365 /// sign-extension of the instruction's output, whereas here we are concerned
2366 /// with the sign-extension of the input. For targets with compare-and-swap
2367 /// instructions (or sub-word comparisons in their LL/SC loop expansions),
2368 /// the input can be ANY_EXTEND, but the output will still have a specific
2369 /// extension.
2371 return ISD::ANY_EXTEND;
2372 }
2373
2374 /// @}
2375
2376 /// Returns true if we should normalize
2377 /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
2378 /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely
2379 /// that it saves us from materializing N0 and N1 in an integer register.
2380 /// Targets that are able to perform and/or on flags should return false here.
2382 EVT VT) const {
2383 // If a target has multiple condition registers, then it likely has logical
2384 // operations on those registers.
2386 return false;
2387 // Only do the transform if the value won't be split into multiple
2388 // registers.
2389 LegalizeTypeAction Action = getTypeAction(Context, VT);
2390 return Action != TypeExpandInteger && Action != TypeExpandFloat &&
2391 Action != TypeSplitVector;
2392 }
2393
2394 virtual bool isProfitableToCombineMinNumMaxNum(EVT VT) const { return true; }
2395
2396 /// Return true if a select of constants (select Cond, C1, C2) should be
2397 /// transformed into simple math ops with the condition value. For example:
2398 /// select Cond, C1, C1-1 --> add (zext Cond), C1-1
2399 virtual bool convertSelectOfConstantsToMath(EVT VT) const {
2400 return false;
2401 }
2402
2403 /// Return true if it is profitable to transform an integer
2404 /// multiplication-by-constant into simpler operations like shifts and adds.
2405 /// This may be true if the target does not directly support the
2406 /// multiplication operation for the specified type or the sequence of simpler
2407 /// ops is faster than the multiply.
2409 EVT VT, SDValue C) const {
2410 return false;
2411 }
2412
2413 /// Return true if it may be profitable to transform
2414 /// (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2).
2415 /// This may not be true if c1 and c2 can be represented as immediates but
2416 /// c1*c2 cannot, for example.
2417 /// The target should check if c1, c2 and c1*c2 can be represented as
2418 /// immediates, or have to be materialized into registers. If it is not sure
2419 /// about some cases, a default true can be returned to let the DAGCombiner
2420 /// decide.
2421 /// AddNode is (add x, c1), and ConstNode is c2.
2423 SDValue ConstNode) const {
2424 return true;
2425 }
2426
2427 /// Return true if it is more correct/profitable to use strict FP_TO_INT
2428 /// conversion operations - canonicalizing the FP source value instead of
2429 /// converting all cases and then selecting based on value.
2430 /// This may be true if the target throws exceptions for out of bounds
2431 /// conversions or has fast FP CMOV.
2432 virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT,
2433 bool IsSigned) const {
2434 return false;
2435 }
2436
2437 /// Return true if it is beneficial to expand an @llvm.powi.* intrinsic.
2438 /// If not optimizing for size, expanding @llvm.powi.* intrinsics is always
2439 /// considered beneficial.
2440 /// If optimizing for size, expansion is only considered beneficial for upto
2441 /// 5 multiplies and a divide (if the exponent is negative).
2442 bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const {
2443 if (Exponent < 0)
2444 Exponent = -Exponent;
2445 uint64_t E = static_cast<uint64_t>(Exponent);
2446 return !OptForSize || (llvm::popcount(E) + Log2_64(E) < 7);
2447 }
2448
2449 //===--------------------------------------------------------------------===//
2450 // TargetLowering Configuration Methods - These methods should be invoked by
2451 // the derived class constructor to configure this object for the target.
2452 //
2453protected:
2454 /// Specify how the target extends the result of integer and floating point
2455 /// boolean values from i1 to a wider type. See getBooleanContents.
2457 BooleanContents = Ty;
2458 BooleanFloatContents = Ty;
2459 }
2460
2461 /// Specify how the target extends the result of integer and floating point
2462 /// boolean values from i1 to a wider type. See getBooleanContents.
2464 BooleanContents = IntTy;
2465 BooleanFloatContents = FloatTy;
2466 }
2467
2468 /// Specify how the target extends the result of a vector boolean value from a
2469 /// vector of i1 to a wider type. See getBooleanContents.
2471 BooleanVectorContents = Ty;
2472 }
2473
2474 /// Specify the target scheduling preference.
2476 SchedPreferenceInfo = Pref;
2477 }
2478
2479 /// Indicate the minimum number of blocks to generate jump tables.
2480 void setMinimumJumpTableEntries(unsigned Val);
2481
2482 /// Indicate the maximum number of entries in jump tables.
2483 /// Set to zero to generate unlimited jump tables.
2484 void setMaximumJumpTableSize(unsigned);
2485
2486 /// If set to a physical register, this specifies the register that
2487 /// llvm.savestack/llvm.restorestack should save and restore.
2489 StackPointerRegisterToSaveRestore = R;
2490 }
2491
2492 /// Tells the code generator that the target has multiple (allocatable)
2493 /// condition registers that can be used to store the results of comparisons
2494 /// for use by selects and conditional branches. With multiple condition
2495 /// registers, the code generator will not aggressively sink comparisons into
2496 /// the blocks of their users.
2497 void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
2498 HasMultipleConditionRegisters = hasManyRegs;
2499 }
2500
2501 /// Tells the code generator that the target has BitExtract instructions.
2502 /// The code generator will aggressively sink "shift"s into the blocks of
2503 /// their users if the users will generate "and" instructions which can be
2504 /// combined with "shift" to BitExtract instructions.
2505 void setHasExtractBitsInsn(bool hasExtractInsn = true) {
2506 HasExtractBitsInsn = hasExtractInsn;
2507 }
2508
2509 /// Tells the code generator not to expand logic operations on comparison
2510 /// predicates into separate sequences that increase the amount of flow
2511 /// control.
2512 void setJumpIsExpensive(bool isExpensive = true);
2513
2514 /// Tells the code generator which bitwidths to bypass.
2515 void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
2516 BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
2517 }
2518
2519 /// Add the specified register class as an available regclass for the
2520 /// specified value type. This indicates the selector can handle values of
2521 /// that class natively.
2523 assert((unsigned)VT.SimpleTy < std::size(RegClassForVT));
2524 RegClassForVT[VT.SimpleTy] = RC;
2525 }
2526
2527 /// Return the largest legal super-reg register class of the register class
2528 /// for the specified type and its associated "cost".
2529 virtual std::pair<const TargetRegisterClass *, uint8_t>
2531
2532 /// Once all of the register classes are added, this allows us to compute
2533 /// derived properties we expose.
2535
2536 /// Indicate that the specified operation does not work with the specified
2537 /// type and indicate what to do about it. Note that VT may refer to either
2538 /// the type of a result or that of an operand of Op.
2539 void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action) {
2540 assert(Op < std::size(OpActions[0]) && "Table isn't big enough!");
2541 OpActions[(unsigned)VT.SimpleTy][Op] = Action;
2542 }
2544 LegalizeAction Action) {
2545 for (auto Op : Ops)
2546 setOperationAction(Op, VT, Action);
2547 }
2549 LegalizeAction Action) {
2550 for (auto VT : VTs)
2551 setOperationAction(Ops, VT, Action);
2552 }
2553
2554 /// Indicate that the specified load with extension does not work with the
2555 /// specified type and indicate what to do about it.
2556 void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
2557 LegalizeAction Action) {
2558 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
2559 MemVT.isValid() && "Table isn't big enough!");
2560 assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
2561 unsigned Shift = 4 * ExtType;
2562 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &= ~((uint16_t)0xF << Shift);
2563 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |= (uint16_t)Action << Shift;
2564 }
2565 void setLoadExtAction(ArrayRef<unsigned> ExtTypes, MVT ValVT, MVT MemVT,
2566 LegalizeAction Action) {
2567 for (auto ExtType : ExtTypes)
2568 setLoadExtAction(ExtType, ValVT, MemVT, Action);
2569 }
2571 ArrayRef<MVT> MemVTs, LegalizeAction Action) {
2572 for (auto MemVT : MemVTs)
2573 setLoadExtAction(ExtTypes, ValVT, MemVT, Action);
2574 }
2575
2576 /// Let target indicate that an extending atomic load of the specified type
2577 /// is legal.
2578 void setAtomicLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
2579 LegalizeAction Action) {
2580 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
2581 MemVT.isValid() && "Table isn't big enough!");
2582 assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
2583 unsigned Shift = 4 * ExtType;
2584 AtomicLoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &=
2585 ~((uint16_t)0xF << Shift);
2586 AtomicLoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |=
2587 ((uint16_t)Action << Shift);
2588 }
2590 LegalizeAction Action) {
2591 for (auto ExtType : ExtTypes)
2592 setAtomicLoadExtAction(ExtType, ValVT, MemVT, Action);
2593 }
2595 ArrayRef<MVT> MemVTs, LegalizeAction Action) {
2596 for (auto MemVT : MemVTs)
2597 setAtomicLoadExtAction(ExtTypes, ValVT, MemVT, Action);
2598 }
2599
2600 /// Indicate that the specified truncating store does not work with the
2601 /// specified type and indicate what to do about it.
2602 void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action) {
2603 assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!");
2604 TruncStoreActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy] = Action;
2605 }
2606
2607 /// Indicate that the specified indexed load does or does not work with the
2608 /// specified type and indicate what to do abort it.
2609 ///
2610 /// NOTE: All indexed mode loads are initialized to Expand in
2611 /// TargetLowering.cpp
2613 LegalizeAction Action) {
2614 for (auto IdxMode : IdxModes)
2615 setIndexedModeAction(IdxMode, VT, IMAB_Load, Action);
2616 }
2617
2619 LegalizeAction Action) {
2620 for (auto VT : VTs)
2621 setIndexedLoadAction(IdxModes, VT, Action);
2622 }
2623
2624 /// Indicate that the specified indexed store does or does not work with the
2625 /// specified type and indicate what to do about it.
2626 ///
2627 /// NOTE: All indexed mode stores are initialized to Expand in
2628 /// TargetLowering.cpp
2630 LegalizeAction Action) {
2631 for (auto IdxMode : IdxModes)
2632 setIndexedModeAction(IdxMode, VT, IMAB_Store, Action);
2633 }
2634
2636 LegalizeAction Action) {
2637 for (auto VT : VTs)
2638 setIndexedStoreAction(IdxModes, VT, Action);
2639 }
2640
2641 /// Indicate that the specified indexed masked load does or does not work with
2642 /// the specified type and indicate what to do about it.
2643 ///
2644 /// NOTE: All indexed mode masked loads are initialized to Expand in
2645 /// TargetLowering.cpp
2646 void setIndexedMaskedLoadAction(unsigned IdxMode, MVT VT,
2647 LegalizeAction Action) {
2648 setIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad, Action);
2649 }
2650
2651 /// Indicate that the specified indexed masked store does or does not work
2652 /// with the specified type and indicate what to do about it.
2653 ///
2654 /// NOTE: All indexed mode masked stores are initialized to Expand in
2655 /// TargetLowering.cpp
2656 void setIndexedMaskedStoreAction(unsigned IdxMode, MVT VT,
2657 LegalizeAction Action) {
2658 setIndexedModeAction(IdxMode, VT, IMAB_MaskedStore, Action);
2659 }
2660
2661 /// Indicate that the specified condition code is or isn't supported on the
2662 /// target and indicate what to do about it.
2664 LegalizeAction Action) {
2665 for (auto CC : CCs) {
2666 assert(VT.isValid() && (unsigned)CC < std::size(CondCodeActions) &&
2667 "Table isn't big enough!");
2668 assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
2669 /// The lower 3 bits of the SimpleTy index into Nth 4bit set from the
2670 /// 32-bit value and the upper 29 bits index into the second dimension of
2671 /// the array to select what 32-bit value to use.
2672 uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
2673 CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift);
2674 CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift;
2675 }
2676 }
2678 LegalizeAction Action) {
2679 for (auto VT : VTs)
2680 setCondCodeAction(CCs, VT, Action);
2681 }
2682
2683 /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
2684 /// to trying a larger integer/fp until it can find one that works. If that
2685 /// default is insufficient, this method can be used by the target to override
2686 /// the default.
2687 void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
2688 PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
2689 }
2690
2691 /// Convenience method to set an operation to Promote and specify the type
2692 /// in a single call.
2693 void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
2694 setOperationAction(Opc, OrigVT, Promote);
2695 AddPromotedToType(Opc, OrigVT, DestVT);
2696 }
2698 MVT DestVT) {
2699 for (auto Op : Ops) {
2700 setOperationAction(Op, OrigVT, Promote);
2701 AddPromotedToType(Op, OrigVT, DestVT);
2702 }
2703 }
2704
2705 /// Targets should invoke this method for each target independent node that
2706 /// they want to provide a custom DAG combiner for by implementing the
2707 /// PerformDAGCombine virtual method.
2709 for (auto NT : NTs) {
2710 assert(unsigned(NT >> 3) < std::size(TargetDAGCombineArray));
2711 TargetDAGCombineArray[NT >> 3] |= 1 << (NT & 7);
2712 }
2713 }
2714
2715 /// Set the target's minimum function alignment.
2717 MinFunctionAlignment = Alignment;
2718 }
2719
2720 /// Set the target's preferred function alignment. This should be set if
2721 /// there is a performance benefit to higher-than-minimum alignment
2723 PrefFunctionAlignment = Alignment;
2724 }
2725
2726 /// Set the target's preferred loop alignment. Default alignment is one, it
2727 /// means the target does not care about loop alignment. The target may also
2728 /// override getPrefLoopAlignment to provide per-loop values.
2729 void setPrefLoopAlignment(Align Alignment) { PrefLoopAlignment = Alignment; }
2730 void setMaxBytesForAlignment(unsigned MaxBytes) {
2731 MaxBytesForAlignment = MaxBytes;
2732 }
2733
2734 /// Set the minimum stack alignment of an argument.
2736 MinStackArgumentAlignment = Alignment;
2737 }
2738
2739 /// Set the maximum atomic operation size supported by the
2740 /// backend. Atomic operations greater than this size (as well as
2741 /// ones that are not naturally aligned), will be expanded by
2742 /// AtomicExpandPass into an __atomic_* library call.
2743 void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits) {
2744 MaxAtomicSizeInBitsSupported = SizeInBits;
2745 }
2746
2747 /// Set the size in bits of the maximum div/rem the backend supports.
2748 /// Larger operations will be expanded by ExpandLargeDivRem.
2749 void setMaxDivRemBitWidthSupported(unsigned SizeInBits) {
2750 MaxDivRemBitWidthSupported = SizeInBits;
2751 }
2752
2753 /// Set the size in bits of the maximum fp convert the backend supports.
2754 /// Larger operations will be expanded by ExpandLargeFPConvert.
2755 void setMaxLargeFPConvertBitWidthSupported(unsigned SizeInBits) {
2756 MaxLargeFPConvertBitWidthSupported = SizeInBits;
2757 }
2758
2759 /// Sets the minimum cmpxchg or ll/sc size supported by the backend.
2760 void setMinCmpXchgSizeInBits(unsigned SizeInBits) {
2761 MinCmpXchgSizeInBits = SizeInBits;
2762 }
2763
2764 /// Sets whether unaligned atomic operations are supported.
2765 void setSupportsUnalignedAtomics(bool UnalignedSupported) {
2766 SupportsUnalignedAtomics = UnalignedSupported;
2767 }
2768
2769public:
2770 //===--------------------------------------------------------------------===//
2771 // Addressing mode description hooks (used by LSR etc).
2772 //
2773
2774 /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
2775 /// instructions reading the address. This allows as much computation as
2776 /// possible to be done in the address mode for that operand. This hook lets
2777 /// targets also pass back when this should be done on intrinsics which
2778 /// load/store.
2780 SmallVectorImpl<Value*> &/*Ops*/,
2781 Type *&/*AccessTy*/) const {
2782 return false;
2783 }
2784
2785 /// This represents an addressing mode of:
2786 /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*vscale
2787 /// If BaseGV is null, there is no BaseGV.
2788 /// If BaseOffs is zero, there is no base offset.
2789 /// If HasBaseReg is false, there is no base register.
2790 /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with
2791 /// no scale.
2792 /// If ScalableOffset is zero, there is no scalable offset.
2793 struct AddrMode {
2795 int64_t BaseOffs = 0;
2796 bool HasBaseReg = false;
2797 int64_t Scale = 0;
2798 int64_t ScalableOffset = 0;
2799 AddrMode() = default;
2800 };
2801
2802 /// Return true if the addressing mode represented by AM is legal for this
2803 /// target, for a load/store of the specified type.
2804 ///
2805 /// The type may be VoidTy, in which case only return true if the addressing
2806 /// mode is legal for a load/store of any legal type. TODO: Handle
2807 /// pre/postinc as well.
2808 ///
2809 /// If the address space cannot be determined, it will be -1.
2810 ///
2811 /// TODO: Remove default argument
2812 virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
2813 Type *Ty, unsigned AddrSpace,
2814 Instruction *I = nullptr) const;
2815
2816 /// Returns true if the targets addressing mode can target thread local
2817 /// storage (TLS).
2818 virtual bool addressingModeSupportsTLS(const GlobalValue &) const {
2819 return false;
2820 }
2821
2822 /// Return the prefered common base offset.
2823 virtual int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset,
2824 int64_t MaxOffset) const {
2825 return 0;
2826 }
2827
2828 /// Return true if the specified immediate is legal icmp immediate, that is
2829 /// the target has icmp instructions which can compare a register against the
2830 /// immediate without having to materialize the immediate into a register.
2831 virtual bool isLegalICmpImmediate(int64_t) const {
2832 return true;
2833 }
2834
2835 /// Return true if the specified immediate is legal add immediate, that is the
2836 /// target has add instructions which can add a register with the immediate
2837 /// without having to materialize the immediate into a register.
2838 virtual bool isLegalAddImmediate(int64_t) const {
2839 return true;
2840 }
2841
2842 /// Return true if adding the specified scalable immediate is legal, that is
2843 /// the target has add instructions which can add a register with the
2844 /// immediate (multiplied by vscale) without having to materialize the
2845 /// immediate into a register.
2846 virtual bool isLegalAddScalableImmediate(int64_t) const { return false; }
2847
2848 /// Return true if the specified immediate is legal for the value input of a
2849 /// store instruction.
2850 virtual bool isLegalStoreImmediate(int64_t Value) const {
2851 // Default implementation assumes that at least 0 works since it is likely
2852 // that a zero register exists or a zero immediate is allowed.
2853 return Value == 0;
2854 }
2855
2856 /// Return true if it's significantly cheaper to shift a vector by a uniform
2857 /// scalar than by an amount which will vary across each lane. On x86 before
2858 /// AVX2 for example, there is a "psllw" instruction for the former case, but
2859 /// no simple instruction for a general "a << b" operation on vectors.
2860 /// This should also apply to lowering for vector funnel shifts (rotates).
2861 virtual bool isVectorShiftByScalarCheap(Type *Ty) const {
2862 return false;
2863 }
2864
2865 /// Given a shuffle vector SVI representing a vector splat, return a new
2866 /// scalar type of size equal to SVI's scalar type if the new type is more
2867 /// profitable. Returns nullptr otherwise. For example under MVE float splats
2868 /// are converted to integer to prevent the need to move from SPR to GPR
2869 /// registers.
2871 return nullptr;
2872 }
2873
2874 /// Given a set in interconnected phis of type 'From' that are loaded/stored
2875 /// or bitcast to type 'To', return true if the set should be converted to
2876 /// 'To'.
2877 virtual bool shouldConvertPhiType(Type *From, Type *To) const {
2878 return (From->isIntegerTy() || From->isFloatingPointTy()) &&
2879 (To->isIntegerTy() || To->isFloatingPointTy());
2880 }
2881
2882 /// Returns true if the opcode is a commutative binary operation.
2883 virtual bool isCommutativeBinOp(unsigned Opcode) const {
2884 // FIXME: This should get its info from the td file.
2885 switch (Opcode) {
2886 case ISD::ADD:
2887 case ISD::SMIN:
2888 case ISD::SMAX:
2889 case ISD::UMIN:
2890 case ISD::UMAX:
2891 case ISD::MUL:
2892 case ISD::MULHU:
2893 case ISD::MULHS:
2894 case ISD::SMUL_LOHI:
2895 case ISD::UMUL_LOHI:
2896 case ISD::FADD:
2897 case ISD::FMUL:
2898 case ISD::AND:
2899 case ISD::OR:
2900 case ISD::XOR:
2901 case ISD::SADDO:
2902 case ISD::UADDO:
2903 case ISD::ADDC:
2904 case ISD::ADDE:
2905 case ISD::SADDSAT:
2906 case ISD::UADDSAT:
2907 case ISD::FMINNUM:
2908 case ISD::FMAXNUM:
2909 case ISD::FMINNUM_IEEE:
2910 case ISD::FMAXNUM_IEEE:
2911 case ISD::FMINIMUM:
2912 case ISD::FMAXIMUM:
2913 case ISD::AVGFLOORS:
2914 case ISD::AVGFLOORU:
2915 case ISD::AVGCEILS:
2916 case ISD::AVGCEILU:
2917 case ISD::ABDS:
2918 case ISD::ABDU:
2919 return true;
2920 default: return false;
2921 }
2922 }
2923
2924 /// Return true if the node is a math/logic binary operator.
2925 virtual bool isBinOp(unsigned Opcode) const {
2926 // A commutative binop must be a binop.
2927 if (isCommutativeBinOp(Opcode))
2928 return true;
2929 // These are non-commutative binops.
2930 switch (Opcode) {
2931 case ISD::SUB:
2932 case ISD::SHL:
2933 case ISD::SRL:
2934 case ISD::SRA:
2935 case ISD::ROTL:
2936 case ISD::ROTR:
2937 case ISD::SDIV:
2938 case ISD::UDIV:
2939 case ISD::SREM:
2940 case ISD::UREM:
2941 case ISD::SSUBSAT:
2942 case ISD::USUBSAT:
2943 case ISD::FSUB:
2944 case ISD::FDIV:
2945 case ISD::FREM:
2946 return true;
2947 default:
2948 return false;
2949 }
2950 }
2951
2952 /// Return true if it's free to truncate a value of type FromTy to type
2953 /// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
2954 /// by referencing its sub-register AX.
2955 /// Targets must return false when FromTy <= ToTy.
2956 virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const {
2957 return false;
2958 }
2959
2960 /// Return true if a truncation from FromTy to ToTy is permitted when deciding
2961 /// whether a call is in tail position. Typically this means that both results
2962 /// would be assigned to the same register or stack slot, but it could mean
2963 /// the target performs adequate checks of its own before proceeding with the
2964 /// tail call. Targets must return false when FromTy <= ToTy.
2965 virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const {
2966 return false;
2967 }
2968
2969 virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const { return false; }
2970 virtual bool isTruncateFree(LLT FromTy, LLT ToTy, const DataLayout &DL,
2971 LLVMContext &Ctx) const {
2972 return isTruncateFree(getApproximateEVTForLLT(FromTy, DL, Ctx),
2973 getApproximateEVTForLLT(ToTy, DL, Ctx));
2974 }
2975
2976 /// Return true if truncating the specific node Val to type VT2 is free.
2977 virtual bool isTruncateFree(SDValue Val, EVT VT2) const {
2978 // Fallback to type matching.
2979 return isTruncateFree(Val.getValueType(), VT2);
2980 }
2981
2982 virtual bool isProfitableToHoist(Instruction *I) const { return true; }
2983
2984 /// Return true if the extension represented by \p I is free.
2985 /// Unlikely the is[Z|FP]ExtFree family which is based on types,
2986 /// this method can use the context provided by \p I to decide
2987 /// whether or not \p I is free.
2988 /// This method extends the behavior of the is[Z|FP]ExtFree family.
2989 /// In other words, if is[Z|FP]Free returns true, then this method
2990 /// returns true as well. The converse is not true.
2991 /// The target can perform the adequate checks by overriding isExtFreeImpl.
2992 /// \pre \p I must be a sign, zero, or fp extension.
2993 bool isExtFree(const Instruction *I) const {
2994 switch (I->getOpcode()) {
2995 case Instruction::FPExt:
2996 if (isFPExtFree(EVT::getEVT(I->getType()),
2997 EVT::getEVT(I->getOperand(0)->getType())))
2998 return true;
2999 break;
3000 case Instruction::ZExt:
3001 if (isZExtFree(I->getOperand(0)->getType(), I->getType()))
3002 return true;
3003 break;
3004 case Instruction::SExt:
3005 break;
3006 default:
3007 llvm_unreachable("Instruction is not an extension");
3008 }
3009 return isExtFreeImpl(I);
3010 }
3011
3012 /// Return true if \p Load and \p Ext can form an ExtLoad.
3013 /// For example, in AArch64
3014 /// %L = load i8, i8* %ptr
3015 /// %E = zext i8 %L to i32
3016 /// can be lowered into one load instruction
3017 /// ldrb w0, [x0]
3018 bool isExtLoad(const LoadInst *Load, const Instruction *Ext,
3019 const DataLayout &DL) const {
3020 EVT VT = getValueType(DL, Ext->getType());
3021 EVT LoadVT = getValueType(DL, Load->getType());
3022
3023 // If the load has other users and the truncate is not free, the ext
3024 // probably isn't free.
3025 if (!Load->hasOneUse() && (isTypeLegal(LoadVT) || !isTypeLegal(VT)) &&
3026 !isTruncateFree(Ext->getType(), Load->getType()))
3027 return false;
3028
3029 // Check whether the target supports casts folded into loads.
3030 unsigned LType;
3031 if (isa<ZExtInst>(Ext))
3032 LType = ISD::ZEXTLOAD;
3033 else {
3034 assert(isa<SExtInst>(Ext) && "Unexpected ext type!");
3035 LType = ISD::SEXTLOAD;
3036 }
3037
3038 return isLoadExtLegal(LType, VT, LoadVT);
3039 }
3040
3041 /// Return true if any actual instruction that defines a value of type FromTy
3042 /// implicitly zero-extends the value to ToTy in the result register.
3043 ///
3044 /// The function should return true when it is likely that the truncate can
3045 /// be freely folded with an instruction defining a value of FromTy. If
3046 /// the defining instruction is unknown (because you're looking at a
3047 /// function argument, PHI, etc.) then the target may require an
3048 /// explicit truncate, which is not necessarily free, but this function
3049 /// does not deal with those cases.
3050 /// Targets must return false when FromTy >= ToTy.
3051 virtual bool isZExtFree(Type *FromTy, Type *ToTy) const {
3052 return false;
3053 }
3054
3055 virtual bool isZExtFree(EVT FromTy, EVT ToTy) const { return false; }
3056 virtual bool isZExtFree(LLT FromTy, LLT ToTy, const DataLayout &DL,
3057 LLVMContext &Ctx) const {
3058 return isZExtFree(getApproximateEVTForLLT(FromTy, DL, Ctx),
3059 getApproximateEVTForLLT(ToTy, DL, Ctx));
3060 }
3061
3062 /// Return true if zero-extending the specific node Val to type VT2 is free
3063 /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
3064 /// because it's folded such as X86 zero-extending loads).
3065 virtual bool isZExtFree(SDValue Val, EVT VT2) const {
3066 return isZExtFree(Val.getValueType(), VT2);
3067 }
3068
3069 /// Return true if sign-extension from FromTy to ToTy is cheaper than
3070 /// zero-extension.
3071 virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const {
3072 return false;
3073 }
3074
3075 /// Return true if this constant should be sign extended when promoting to
3076 /// a larger type.
3077 virtual bool signExtendConstant(const ConstantInt *C) const { return false; }
3078
3079 /// Return true if sinking I's operands to the same basic block as I is
3080 /// profitable, e.g. because the operands can be folded into a target
3081 /// instruction during instruction selection. After calling the function
3082 /// \p Ops contains the Uses to sink ordered by dominance (dominating users
3083 /// come first).
3085 SmallVectorImpl<Use *> &Ops) const {
3086 return false;
3087 }
3088
3089 /// Try to optimize extending or truncating conversion instructions (like
3090 /// zext, trunc, fptoui, uitofp) for the target.
3091 virtual bool
3093 const TargetTransformInfo &TTI) const {
3094 return false;
3095 }
3096
3097 /// Return true if the target supplies and combines to a paired load
3098 /// two loaded values of type LoadedType next to each other in memory.
3099 /// RequiredAlignment gives the minimal alignment constraints that must be met
3100 /// to be able to select this paired load.
3101 ///
3102 /// This information is *not* used to generate actual paired loads, but it is
3103 /// used to generate a sequence of loads that is easier to combine into a
3104 /// paired load.
3105 /// For instance, something like this:
3106 /// a = load i64* addr
3107 /// b = trunc i64 a to i32
3108 /// c = lshr i64 a, 32
3109 /// d = trunc i64 c to i32
3110 /// will be optimized into:
3111 /// b = load i32* addr1
3112 /// d = load i32* addr2
3113 /// Where addr1 = addr2 +/- sizeof(i32).
3114 ///
3115 /// In other words, unless the target performs a post-isel load combining,
3116 /// this information should not be provided because it will generate more
3117 /// loads.
3118 virtual bool hasPairedLoad(EVT /*LoadedType*/,
3119 Align & /*RequiredAlignment*/) const {
3120 return false;
3121 }
3122
3123 /// Return true if the target has a vector blend instruction.
3124 virtual bool hasVectorBlend() const { return false; }
3125
3126 /// Get the maximum supported factor for interleaved memory accesses.
3127 /// Default to be the minimum interleave factor: 2.
3128 virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; }
3129
3130 /// Lower an interleaved load to target specific intrinsics. Return
3131 /// true on success.
3132 ///
3133 /// \p LI is the vector load instruction.
3134 /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector.
3135 /// \p Indices is the corresponding indices for each shufflevector.
3136 /// \p Factor is the interleave factor.
3139 ArrayRef<unsigned> Indices,
3140 unsigned Factor) const {
3141 return false;
3142 }
3143
3144 /// Lower an interleaved store to target specific intrinsics. Return
3145 /// true on success.
3146 ///
3147 /// \p SI is the vector store instruction.
3148 /// \p SVI is the shufflevector to RE-interleave the stored vector.
3149 /// \p Factor is the interleave factor.
3151 unsigned Factor) const {
3152 return false;
3153 }
3154
3155 /// Lower a deinterleave intrinsic to a target specific load intrinsic.
3156 /// Return true on success. Currently only supports
3157 /// llvm.vector.deinterleave2
3158 ///
3159 /// \p DI is the deinterleave intrinsic.
3160 /// \p LI is the accompanying load instruction
3162 LoadInst *LI) const {
3163 return false;
3164 }
3165
3166 /// Lower an interleave intrinsic to a target specific store intrinsic.
3167 /// Return true on success. Currently only supports
3168 /// llvm.vector.interleave2
3169 ///
3170 /// \p II is the interleave intrinsic.
3171 /// \p SI is the accompanying store instruction
3173 StoreInst *SI) const {
3174 return false;
3175 }
3176
3177 /// Return true if an fpext operation is free (for instance, because
3178 /// single-precision floating-point numbers are implicitly extended to
3179 /// double-precision).
3180 virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const {
3181 assert(SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() &&
3182 "invalid fpext types");
3183 return false;
3184 }
3185
3186 /// Return true if an fpext operation input to an \p Opcode operation is free
3187 /// (for instance, because half-precision floating-point numbers are
3188 /// implicitly extended to float-precision) for an FMA instruction.
3189 virtual bool isFPExtFoldable(const MachineInstr &MI, unsigned Opcode,
3190 LLT DestTy, LLT SrcTy) const {
3191 return false;
3192 }
3193
3194 /// Return true if an fpext operation input to an \p Opcode operation is free
3195 /// (for instance, because half-precision floating-point numbers are
3196 /// implicitly extended to float-precision) for an FMA instruction.
3197 virtual bool isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode,
3198 EVT DestVT, EVT SrcVT) const {
3199 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
3200 "invalid fpext types");
3201 return isFPExtFree(DestVT, SrcVT);
3202 }
3203
3204 /// Return true if folding a vector load into ExtVal (a sign, zero, or any
3205 /// extend node) is profitable.
3206 virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; }
3207
3208 /// Return true if an fneg operation is free to the point where it is never
3209 /// worthwhile to replace it with a bitwise operation.
3210 virtual bool isFNegFree(EVT VT) const {
3211 assert(VT.isFloatingPoint());
3212 return false;
3213 }
3214
3215 /// Return true if an fabs operation is free to the point where it is never
3216 /// worthwhile to replace it with a bitwise operation.
3217 virtual bool isFAbsFree(EVT VT) const {
3218 assert(VT.isFloatingPoint());
3219 return false;
3220 }
3221
3222 /// Return true if an FMA operation is faster than a pair of fmul and fadd
3223 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
3224 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
3225 ///
3226 /// NOTE: This may be called before legalization on types for which FMAs are
3227 /// not legal, but should return true if those types will eventually legalize
3228 /// to types that support FMAs. After legalization, it will only be called on
3229 /// types that support FMAs (via Legal or Custom actions)
3231 EVT) const {
3232 return false;
3233 }
3234
3235 /// Return true if an FMA operation is faster than a pair of fmul and fadd
3236 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
3237 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
3238 ///
3239 /// NOTE: This may be called before legalization on types for which FMAs are
3240 /// not legal, but should return true if those types will eventually legalize
3241 /// to types that support FMAs. After legalization, it will only be called on
3242 /// types that support FMAs (via Legal or Custom actions)
3244 LLT) const {
3245 return false;
3246 }
3247
3248 /// IR version
3249 virtual bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *) const {
3250 return false;
3251 }
3252
3253 /// Returns true if \p MI can be combined with another instruction to
3254 /// form TargetOpcode::G_FMAD. \p N may be an TargetOpcode::G_FADD,
3255 /// TargetOpcode::G_FSUB, or an TargetOpcode::G_FMUL which will be
3256 /// distributed into an fadd/fsub.
3257 virtual bool isFMADLegal(const MachineInstr &MI, LLT Ty) const {
3258 assert((MI.getOpcode() == TargetOpcode::G_FADD ||
3259 MI.getOpcode() == TargetOpcode::G_FSUB ||
3260 MI.getOpcode() == TargetOpcode::G_FMUL) &&
3261 "unexpected node in FMAD forming combine");
3262 switch (Ty.getScalarSizeInBits()) {
3263 case 16:
3264 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f16);
3265 case 32:
3266 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f32);
3267 case 64:
3268 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f64);
3269 default:
3270 break;
3271 }
3272
3273 return false;
3274 }
3275
3276 /// Returns true if be combined with to form an ISD::FMAD. \p N may be an
3277 /// ISD::FADD, ISD::FSUB, or an ISD::FMUL which will be distributed into an
3278 /// fadd/fsub.
3279 virtual bool isFMADLegal(const SelectionDAG &DAG, const SDNode *N) const {
3280 assert((N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::FSUB ||
3281 N->getOpcode() == ISD::FMUL) &&
3282 "unexpected node in FMAD forming combine");
3283 return isOperationLegal(ISD::FMAD, N->getValueType(0));
3284 }
3285
3286 // Return true when the decision to generate FMA's (or FMS, FMLA etc) rather
3287 // than FMUL and ADD is delegated to the machine combiner.
3289 CodeGenOptLevel OptLevel) const {
3290 return false;
3291 }
3292
3293 /// Return true if it's profitable to narrow operations of type SrcVT to
3294 /// DestVT. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
3295 /// i32 to i16.
3296 virtual bool isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
3297 return false;
3298 }
3299
3300 /// Return true if pulling a binary operation into a select with an identity
3301 /// constant is profitable. This is the inverse of an IR transform.
3302 /// Example: X + (Cond ? Y : 0) --> Cond ? (X + Y) : X
3303 virtual bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode,
3304 EVT VT) const {
3305 return false;
3306 }
3307
3308 /// Return true if it is beneficial to convert a load of a constant to
3309 /// just the constant itself.
3310 /// On some targets it might be more efficient to use a combination of
3311 /// arithmetic instructions to materialize the constant instead of loading it
3312 /// from a constant pool.
3314 Type *Ty) const {
3315 return false;
3316 }
3317
3318 /// Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type
3319 /// from this source type with this index. This is needed because
3320 /// EXTRACT_SUBVECTOR usually has custom lowering that depends on the index of
3321 /// the first element, and only the target knows which lowering is cheap.
3322 virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
3323 unsigned Index) const {
3324 return false;
3325 }
3326
3327 /// Try to convert an extract element of a vector binary operation into an
3328 /// extract element followed by a scalar operation.
3329 virtual bool shouldScalarizeBinop(SDValue VecOp) const {
3330 return false;
3331 }
3332
3333 /// Return true if extraction of a scalar element from the given vector type
3334 /// at the given index is cheap. For example, if scalar operations occur on
3335 /// the same register file as vector operations, then an extract element may
3336 /// be a sub-register rename rather than an actual instruction.
3337 virtual bool isExtractVecEltCheap(EVT VT, unsigned Index) const {
3338 return false;
3339 }
3340
3341 /// Try to convert math with an overflow comparison into the corresponding DAG
3342 /// node operation. Targets may want to override this independently of whether
3343 /// the operation is legal/custom for the given type because it may obscure
3344 /// matching of other patterns.
3345 virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
3346 bool MathUsed) const {
3347 // TODO: The default logic is inherited from code in CodeGenPrepare.
3348 // The opcode should not make a difference by default?
3349 if (Opcode != ISD::UADDO)
3350 return false;
3351
3352 // Allow the transform as long as we have an integer type that is not
3353 // obviously illegal and unsupported and if the math result is used
3354 // besides the overflow check. On some targets (e.g. SPARC), it is
3355 // not profitable to form on overflow op if the math result has no
3356 // concrete users.
3357 if (VT.isVector())
3358 return false;
3359 return MathUsed && (VT.isSimple() || !isOperationExpand(Opcode, VT));
3360 }
3361
3362 // Return true if it is profitable to use a scalar input to a BUILD_VECTOR
3363 // even if the vector itself has multiple uses.
3364 virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const {
3365 return false;
3366 }
3367
3368 // Return true if CodeGenPrepare should consider splitting large offset of a
3369 // GEP to make the GEP fit into the addressing mode and can be sunk into the
3370 // same blocks of its users.
3371 virtual bool shouldConsiderGEPOffsetSplit() const { return false; }
3372
3373 /// Return true if creating a shift of the type by the given
3374 /// amount is not profitable.
3375 virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const {
3376 return false;
3377 }
3378
3379 // Should we fold (select_cc seteq (and x, y), 0, 0, A) -> (and (sra (shl x))
3380 // A) where y has a single bit set?
3382 const APInt &AndMask) const {
3383 unsigned ShCt = AndMask.getBitWidth() - 1;
3384 return !shouldAvoidTransformToShift(VT, ShCt);
3385 }
3386
3387 /// Does this target require the clearing of high-order bits in a register
3388 /// passed to the fp16 to fp conversion library function.
3389 virtual bool shouldKeepZExtForFP16Conv() const { return false; }
3390
3391 /// Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT
3392 /// from min(max(fptoi)) saturation patterns.
3393 virtual bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const {
3394 return isOperationLegalOrCustom(Op, VT);
3395 }
3396
3397 /// Does this target support complex deinterleaving
3398 virtual bool isComplexDeinterleavingSupported() const { return false; }
3399
3400 /// Does this target support complex deinterleaving with the given operation
3401 /// and type
3404 return false;
3405 }
3406
3407 /// Create the IR node for the given complex deinterleaving operation.
3408 /// If one cannot be created using all the given inputs, nullptr should be
3409 /// returned.
3412 ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB,
3413 Value *Accumulator = nullptr) const {
3414 return nullptr;
3415 }
3416
3417 //===--------------------------------------------------------------------===//
3418 // Runtime Library hooks
3419 //
3420
3421 /// Rename the default libcall routine name for the specified libcall.
3422 void setLibcallName(RTLIB::Libcall Call, const char *Name) {
3423 LibcallRoutineNames[Call] = Name;
3424 }
3426 for (auto Call : Calls)
3427 setLibcallName(Call, Name);
3428 }
3429
3430 /// Get the libcall routine name for the specified libcall.
3431 const char *getLibcallName(RTLIB::Libcall Call) const {
3432 return LibcallRoutineNames[Call];
3433 }
3434
3435 /// Override the default CondCode to be used to test the result of the
3436 /// comparison libcall against zero.
3438 CmpLibcallCCs[Call] = CC;
3439 }
3440
3441 /// Get the CondCode that's to be used to test the result of the comparison
3442 /// libcall against zero.
3444 return CmpLibcallCCs[Call];
3445 }
3446
3447 /// Set the CallingConv that should be used for the specified libcall.
3449 LibcallCallingConvs[Call] = CC;
3450 }
3451
3452 /// Get the CallingConv that should be used for the specified libcall.
3454 return LibcallCallingConvs[Call];
3455 }
3456
3457 /// Execute target specific actions to finalize target lowering.
3458 /// This is used to set extra flags in MachineFrameInformation and freezing
3459 /// the set of reserved registers.
3460 /// The default implementation just freezes the set of reserved registers.
3461 virtual void finalizeLowering(MachineFunction &MF) const;
3462
3463 //===----------------------------------------------------------------------===//
3464 // GlobalISel Hooks
3465 //===----------------------------------------------------------------------===//
3466 /// Check whether or not \p MI needs to be moved close to its uses.
3467 virtual bool shouldLocalize(const MachineInstr &MI, const TargetTransformInfo *TTI) const;
3468
3469
3470private:
3471 const TargetMachine &TM;
3472
3473 /// Tells the code generator that the target has multiple (allocatable)
3474 /// condition registers that can be used to store the results of comparisons
3475 /// for use by selects and conditional branches. With multiple condition
3476 /// registers, the code generator will not aggressively sink comparisons into
3477 /// the blocks of their users.
3478 bool HasMultipleConditionRegisters;
3479
3480 /// Tells the code generator that the target has BitExtract instructions.
3481 /// The code generator will aggressively sink "shift"s into the blocks of
3482 /// their users if the users will generate "and" instructions which can be
3483 /// combined with "shift" to BitExtract instructions.
3484 bool HasExtractBitsInsn;
3485
3486 /// Tells the code generator to bypass slow divide or remainder
3487 /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
3488 /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
3489 /// div/rem when the operands are positive and less than 256.
3490 DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
3491
3492 /// Tells the code generator that it shouldn't generate extra flow control
3493 /// instructions and should attempt to combine flow control instructions via
3494 /// predication.
3495 bool JumpIsExpensive;
3496
3497 /// Information about the contents of the high-bits in boolean values held in
3498 /// a type wider than i1. See getBooleanContents.
3499 BooleanContent BooleanContents;
3500
3501 /// Information about the contents of the high-bits in boolean values held in
3502 /// a type wider than i1. See getBooleanContents.
3503 BooleanContent BooleanFloatContents;
3504
3505 /// Information about the contents of the high-bits in boolean vector values
3506 /// when the element type is wider than i1. See getBooleanContents.
3507 BooleanContent BooleanVectorContents;
3508
3509 /// The target scheduling preference: shortest possible total cycles or lowest
3510 /// register usage.
3511 Sched::Preference SchedPreferenceInfo;
3512
3513 /// The minimum alignment that any argument on the stack needs to have.
3514 Align MinStackArgumentAlignment;
3515
3516 /// The minimum function alignment (used when optimizing for size, and to
3517 /// prevent explicitly provided alignment from leading to incorrect code).
3518 Align MinFunctionAlignment;
3519
3520 /// The preferred function alignment (used when alignment unspecified and
3521 /// optimizing for speed).
3522 Align PrefFunctionAlignment;
3523
3524 /// The preferred loop alignment (in log2 bot in bytes).
3525 Align PrefLoopAlignment;
3526 /// The maximum amount of bytes permitted to be emitted for alignment.
3527 unsigned MaxBytesForAlignment;
3528
3529 /// Size in bits of the maximum atomics size the backend supports.
3530 /// Accesses larger than this will be expanded by AtomicExpandPass.
3531 unsigned MaxAtomicSizeInBitsSupported;
3532
3533 /// Size in bits of the maximum div/rem size the backend supports.
3534 /// Larger operations will be expanded by ExpandLargeDivRem.
3535 unsigned MaxDivRemBitWidthSupported;
3536
3537 /// Size in bits of the maximum larget fp convert size the backend
3538 /// supports. Larger operations will be expanded by ExpandLargeFPConvert.
3539 unsigned MaxLargeFPConvertBitWidthSupported;
3540
3541 /// Size in bits of the minimum cmpxchg or ll/sc operation the
3542 /// backend supports.
3543 unsigned MinCmpXchgSizeInBits;
3544
3545 /// This indicates if the target supports unaligned atomic operations.
3546 bool SupportsUnalignedAtomics;
3547
3548 /// If set to a physical register, this specifies the register that
3549 /// llvm.savestack/llvm.restorestack should save and restore.
3550 Register StackPointerRegisterToSaveRestore;
3551
3552 /// This indicates the default register class to use for each ValueType the
3553 /// target supports natively.
3554 const TargetRegisterClass *RegClassForVT[MVT::VALUETYPE_SIZE];
3555 uint16_t NumRegistersForVT[MVT::VALUETYPE_SIZE];
3556 MVT RegisterTypeForVT[MVT::VALUETYPE_SIZE];
3557
3558 /// This indicates the "representative" register class to use for each
3559 /// ValueType the target supports natively. This information is used by the
3560 /// scheduler to track register pressure. By default, the representative
3561 /// register class is the largest legal super-reg register class of the
3562 /// register class of the specified type. e.g. On x86, i8, i16, and i32's
3563 /// representative class would be GR32.
3564 const TargetRegisterClass *RepRegClassForVT[MVT::VALUETYPE_SIZE] = {0};
3565
3566 /// This indicates the "cost" of the "representative" register class for each
3567 /// ValueType. The cost is used by the scheduler to approximate register
3568 /// pressure.
3569 uint8_t RepRegClassCostForVT[MVT::VALUETYPE_SIZE];
3570
3571 /// For any value types we are promoting or expanding, this contains the value
3572 /// type that we are changing to. For Expanded types, this contains one step
3573 /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
3574 /// (e.g. i64 -> i16). For types natively supported by the system, this holds
3575 /// the same type (e.g. i32 -> i32).
3576 MVT TransformToType[MVT::VALUETYPE_SIZE];
3577
3578 /// For each operation and each value type, keep a LegalizeAction that
3579 /// indicates how instruction selection should deal with the operation. Most
3580 /// operations are Legal (aka, supported natively by the target), but
3581 /// operations that are not should be described. Note that operations on
3582 /// non-legal value types are not described here.
3584
3585 /// For each load extension type and each value type, keep a LegalizeAction
3586 /// that indicates how instruction selection should deal with a load of a
3587 /// specific value type and extension type. Uses 4-bits to store the action
3588 /// for each of the 4 load ext types.
3590
3591 /// Similar to LoadExtActions, but for atomic loads. Only Legal or Expand
3592 /// (default) values are supported.
3593 uint16_t AtomicLoadExtActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE];
3594
3595 /// For each value type pair keep a LegalizeAction that indicates whether a
3596 /// truncating store of a specific value type and truncating type is legal.
3598
3599 /// For each indexed mode and each value type, keep a quad of LegalizeAction
3600 /// that indicates how instruction selection should deal with the load /
3601 /// store / maskedload / maskedstore.
3602 ///
3603 /// The first dimension is the value_type for the reference. The second
3604 /// dimension represents the various modes for load store.
3606
3607 /// For each condition code (ISD::CondCode) keep a LegalizeAction that
3608 /// indicates how instruction selection should deal with the condition code.
3609 ///
3610 /// Because each CC action takes up 4 bits, we need to have the array size be
3611 /// large enough to fit all of the value types. This can be done by rounding
3612 /// up the MVT::VALUETYPE_SIZE value to the next multiple of 8.
3613 uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::VALUETYPE_SIZE + 7) / 8];
3614
3615 ValueTypeActionImpl ValueTypeActions;
3616
3617private:
3618 /// Targets can specify ISD nodes that they would like PerformDAGCombine
3619 /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
3620 /// array.
3621 unsigned char
3622 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
3623
3624 /// For operations that must be promoted to a specific type, this holds the
3625 /// destination type. This map should be sparse, so don't hold it as an
3626 /// array.
3627 ///
3628 /// Targets add entries to this map with AddPromotedToType(..), clients access
3629 /// this with getTypeToPromoteTo(..).
3630 std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
3631 PromoteToType;
3632
3633 /// Stores the name each libcall.
3634 const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL + 1];
3635
3636 /// The ISD::CondCode that should be used to test the result of each of the
3637 /// comparison libcall against zero.
3638 ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
3639
3640 /// Stores the CallingConv that should be used for each libcall.
3641 CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];
3642
3643 /// Set default libcall names and calling conventions.
3644 void InitLibcalls(const Triple &TT);
3645
3646 /// The bits of IndexedModeActions used to store the legalisation actions
3647 /// We store the data as | ML | MS | L | S | each taking 4 bits.
3648 enum IndexedModeActionsBits {
3649 IMAB_Store = 0,
3650 IMAB_Load = 4,
3651 IMAB_MaskedStore = 8,
3652 IMAB_MaskedLoad = 12
3653 };
3654
3655 void setIndexedModeAction(unsigned IdxMode, MVT VT, unsigned Shift,
3656 LegalizeAction Action) {
3657 assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
3658 (unsigned)Action < 0xf && "Table isn't big enough!");
3659 unsigned Ty = (unsigned)VT.SimpleTy;
3660 IndexedModeActions[Ty][IdxMode] &= ~(0xf << Shift);
3661 IndexedModeActions[Ty][IdxMode] |= ((uint16_t)Action) << Shift;
3662 }
3663
3664 LegalizeAction getIndexedModeAction(unsigned IdxMode, MVT VT,
3665 unsigned Shift) const {
3666 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
3667 "Table isn't big enough!");
3668 unsigned Ty = (unsigned)VT.SimpleTy;
3669 return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] >> Shift) & 0xf);
3670 }
3671
3672protected:
3673 /// Return true if the extension represented by \p I is free.
3674 /// \pre \p I is a sign, zero, or fp extension and
3675 /// is[Z|FP]ExtFree of the related types is not true.
3676 virtual bool isExtFreeImpl(const Instruction *I) const { return false; }
3677
3678 /// Depth that GatherAllAliases should continue looking for chain
3679 /// dependencies when trying to find a more preferable chain. As an
3680 /// approximation, this should be more than the number of consecutive stores
3681 /// expected to be merged.
3683
3684 /// \brief Specify maximum number of store instructions per memset call.
3685 ///
3686 /// When lowering \@llvm.memset this field specifies the maximum number of
3687 /// store operations that may be substituted for the call to memset. Targets
3688 /// must set this value based on the cost threshold for that target. Targets
3689 /// should assume that the memset will be done using as many of the largest
3690 /// store operations first, followed by smaller ones, if necessary, per
3691 /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
3692 /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
3693 /// store. This only applies to setting a constant array of a constant size.
3695 /// Likewise for functions with the OptSize attribute.
3697
3698 /// \brief Specify maximum number of store instructions per memcpy call.
3699 ///
3700 /// When lowering \@llvm.memcpy this field specifies the maximum number of
3701 /// store operations that may be substituted for a call to memcpy. Targets
3702 /// must set this value based on the cost threshold for that target. Targets
3703 /// should assume that the memcpy will be done using as many of the largest
3704 /// store operations first, followed by smaller ones, if necessary, per
3705 /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
3706 /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
3707 /// and one 1-byte store. This only applies to copying a constant array of
3708 /// constant size.
3710 /// Likewise for functions with the OptSize attribute.
3712 /// \brief Specify max number of store instructions to glue in inlined memcpy.
3713 ///
3714 /// When memcpy is inlined based on MaxStoresPerMemcpy, specify maximum number
3715 /// of store instructions to keep together. This helps in pairing and
3716 // vectorization later on.
3718
3719 /// \brief Specify maximum number of load instructions per memcmp call.
3720 ///
3721 /// When lowering \@llvm.memcmp this field specifies the maximum number of
3722 /// pairs of load operations that may be substituted for a call to memcmp.
3723 /// Targets must set this value based on the cost threshold for that target.
3724 /// Targets should assume that the memcmp will be done using as many of the
3725 /// largest load operations first, followed by smaller ones, if necessary, per
3726 /// alignment restrictions. For example, loading 7 bytes on a 32-bit machine
3727 /// with 32-bit alignment would result in one 4-byte load, a one 2-byte load
3728 /// and one 1-byte load. This only applies to copying a constant array of
3729 /// constant size.
3731 /// Likewise for functions with the OptSize attribute.
3733
3734 /// \brief Specify maximum number of store instructions per memmove call.
3735 ///
3736 /// When lowering \@llvm.memmove this field specifies the maximum number of
3737 /// store instructions that may be substituted for a call to memmove. Targets
3738 /// must set this value based on the cost threshold for that target. Targets
3739 /// should assume that the memmove will be done using as many of the largest
3740 /// store operations first, followed by smaller ones, if necessary, per
3741 /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
3742 /// with 8-bit alignment would result in nine 1-byte stores. This only
3743 /// applies to copying a constant array of constant size.
3745 /// Likewise for functions with the OptSize attribute.
3747
3748 /// Tells the code generator that select is more expensive than a branch if
3749 /// the branch is usually predicted right.
3751
3752 /// \see enableExtLdPromotion.
3754
3755 /// Return true if the value types that can be represented by the specified
3756 /// register class are all legal.
3757 bool isLegalRC(const TargetRegisterInfo &TRI,
3758 const TargetRegisterClass &RC) const;
3759
3760 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
3761 /// sequence of memory operands that is recognized by PrologEpilogInserter.
3763 MachineBasicBlock *MBB) const;
3764
3766};
3767
3768/// This class defines information used to lower LLVM code to legal SelectionDAG
3769/// operators that the target instruction selector can accept natively.
3770///
3771/// This class also defines callbacks that targets must implement to lower
3772/// target-specific constructs to SelectionDAG operators.
3774public:
3775 struct DAGCombinerInfo;
3776 struct MakeLibCallOptions;
3777
3780
3781 explicit TargetLowering(const TargetMachine &TM);
3782
3783 bool isPositionIndependent() const;
3784
3787 UniformityInfo *UA) const {
3788 return false;
3789 }
3790
3791 // Lets target to control the following reassociation of operands: (op (op x,
3792 // c1), y) -> (op (op x, y), c1) where N0 is (op x, c1) and N1 is y. By
3793 // default consider profitable any case where N0 has single use. This
3794 // behavior reflects the condition replaced by this target hook call in the
3795 // DAGCombiner. Any particular target can implement its own heuristic to
3796 // restrict common combiner.
3798 SDValue N1) const {
3799 return N0.hasOneUse();
3800 }
3801
3802 // Lets target to control the following reassociation of operands: (op (op x,
3803 // c1), y) -> (op (op x, y), c1) where N0 is (op x, c1) and N1 is y. By
3804 // default consider profitable any case where N0 has single use. This
3805 // behavior reflects the condition replaced by this target hook call in the
3806 // combiner. Any particular target can implement its own heuristic to
3807 // restrict common combiner.
3809 Register N1) const {
3810 return MRI.hasOneNonDBGUse(N0);
3811 }
3812
3813 virtual bool isSDNodeAlwaysUniform(const SDNode * N) const {
3814 return false;
3815 }
3816
3817 /// Returns true by value, base pointer and offset pointer and addressing mode
3818 /// by reference if the node's address can be legally represented as
3819 /// pre-indexed load / store address.
3820 virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
3821 SDValue &/*Offset*/,
3822 ISD::MemIndexedMode &/*AM*/,
3823 SelectionDAG &/*DAG*/) const {
3824 return false;
3825 }
3826
3827 /// Returns true by value, base pointer and offset pointer and addressing mode
3828 /// by reference if this node can be combined with a load / store to form a
3829 /// post-indexed load / store.
3830 virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
3831 SDValue &/*Base*/,
3832 SDValue &/*Offset*/,
3833 ISD::MemIndexedMode &/*AM*/,
3834 SelectionDAG &/*DAG*/) const {
3835 return false;
3836 }
3837
3838 /// Returns true if the specified base+offset is a legal indexed addressing
3839 /// mode for this target. \p MI is the load or store instruction that is being
3840 /// considered for transformation.
3842 bool IsPre, MachineRegisterInfo &MRI) const {
3843 return false;
3844 }
3845
3846 /// Return the entry encoding for a jump table in the current function. The
3847 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
3848 virtual unsigned getJumpTableEncoding() const;
3849
3850 virtual const MCExpr *
3852 const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
3853 MCContext &/*Ctx*/) const {
3854 llvm_unreachable("Need to implement this hook if target has custom JTIs");
3855 }
3856
3857 /// Returns relocation base for the given PIC jumptable.
3859 SelectionDAG &DAG) const;
3860
3861 /// This returns the relocation base for the given PIC jumptable, the same as
3862 /// getPICJumpTableRelocBase, but as an MCExpr.
3863 virtual const MCExpr *
3865 unsigned JTI, MCContext &Ctx) const;
3866
3867 /// Return true if folding a constant offset with the given GlobalAddress is
3868 /// legal. It is frequently not legal in PIC relocation models.
3869 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
3870
3871 /// On x86, return true if the operand with index OpNo is a CALL or JUMP
3872 /// instruction, which can use either a memory constraint or an address
3873 /// constraint. -fasm-blocks "__asm call foo" lowers to
3874 /// call void asm sideeffect inteldialect "call ${0:P}", "*m..."
3875 ///
3876 /// This function is used by a hack to choose the address constraint,
3877 /// lowering to a direct call.
3878 virtual bool
3880 unsigned OpNo) const {
3881 return false;
3882 }
3883
3885 SDValue &Chain) const;
3886
3887 void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
3888 SDValue &NewRHS, ISD::CondCode &CCCode,
3889 const SDLoc &DL, const SDValue OldLHS,
3890 const SDValue OldRHS) const;
3891
3892 void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
3893 SDValue &NewRHS, ISD::CondCode &CCCode,
3894 const SDLoc &DL, const SDValue OldLHS,
3895 const SDValue OldRHS, SDValue &Chain,
3896 bool IsSignaling = false) const;
3897
3898 /// Returns a pair of (return value, chain).
3899 /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC.
3900 std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
3901 EVT RetVT, ArrayRef<SDValue> Ops,
3902 MakeLibCallOptions CallOptions,
3903 const SDLoc &dl,
3904 SDValue Chain = SDValue()) const;
3905
3906 /// Check whether parameters to a call that are passed in callee saved
3907 /// registers are the same as from the calling function. This needs to be
3908 /// checked for tail call eligibility.
3910 const uint32_t *CallerPreservedMask,
3911 const SmallVectorImpl<CCValAssign> &ArgLocs,
3912 const SmallVectorImpl<SDValue> &OutVals) const;
3913
3914 //===--------------------------------------------------------------------===//
3915 // TargetLowering Optimization Methods
3916 //
3917
3918 /// A convenience struct that encapsulates a DAG, and two SDValues for
3919 /// returning information from TargetLowering to its clients that want to
3920 /// combine.
3927
3929 bool LT, bool LO) :
3930 DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
3931
3932 bool LegalTypes() const { return LegalTys; }
3933 bool LegalOperations() const { return LegalOps; }
3934
3936 Old = O;
3937 New = N;
3938 return true;
3939 }
3940 };
3941
3942 /// Determines the optimal series of memory ops to replace the memset / memcpy.
3943 /// Return true if the number of memory ops is below the threshold (Limit).
3944 /// Note that this is always the case when Limit is ~0.
3945 /// It returns the types of the sequence of memory ops to perform
3946 /// memset / memcpy by reference.
3947 virtual bool
3948 findOptimalMemOpLowering(std::vector<EVT> &MemOps, unsigned Limit,
3949 const MemOp &Op, unsigned DstAS, unsigned SrcAS,
3950 const AttributeList &FuncAttributes) const;
3951
3952 /// Check to see if the specified operand of the specified instruction is a
3953 /// constant integer. If so, check to see if there are any bits set in the
3954 /// constant that are not demanded. If so, shrink the constant and return
3955 /// true.
3957 const APInt &DemandedElts,
3958 TargetLoweringOpt &TLO) const;
3959
3960 /// Helper wrapper around ShrinkDemandedConstant, demanding all elements.
3962 TargetLoweringOpt &TLO) const;
3963
3964 // Target hook to do target-specific const optimization, which is called by
3965 // ShrinkDemandedConstant. This function should return true if the target
3966 // doesn't want ShrinkDemandedConstant to further optimize the constant.
3968 const APInt &DemandedBits,
3969 const APInt &DemandedElts,
3970 TargetLoweringOpt &TLO) const {
3971 return false;
3972 }
3973
3974 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
3975 /// This uses isTruncateFree/isZExtFree and ANY_EXTEND for the widening cast,
3976 /// but it could be generalized for targets with other types of implicit
3977 /// widening casts.
3978 bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth,
3979 const APInt &DemandedBits,
3980 TargetLoweringOpt &TLO) const;
3981
3982 /// Look at Op. At this point, we know that only the DemandedBits bits of the
3983 /// result of Op are ever used downstream. If we can use this information to
3984 /// simplify Op, create a new simplified DAG node and return true, returning
3985 /// the original and new nodes in Old and New. Otherwise, analyze the
3986 /// expression and return a mask of KnownOne and KnownZero bits for the
3987 /// expression (used to simplify the caller). The KnownZero/One bits may only
3988 /// be accurate for those bits in the Demanded masks.
3989 /// \p AssumeSingleUse When this parameter is true, this function will
3990 /// attempt to simplify \p Op even if there are multiple uses.
3991 /// Callers are responsible for correctly updating the DAG based on the
3992 /// results of this function, because simply replacing TLO.Old
3993 /// with TLO.New will be incorrect when this parameter is true and TLO.Old
3994 /// has multiple uses.
3996 const APInt &DemandedElts, KnownBits &Known,
3997 TargetLoweringOpt &TLO, unsigned Depth = 0,
3998 bool AssumeSingleUse = false) const;
3999
4000 /// Helper wrapper around SimplifyDemandedBits, demanding all elements.
4001 /// Adds Op back to the worklist upon success.
4003 KnownBits &Known, TargetLoweringOpt &TLO,
4004 unsigned Depth = 0,
4005 bool AssumeSingleUse = false) const;
4006
4007 /// Helper wrapper around SimplifyDemandedBits.
4008 /// Adds Op back to the worklist upon success.
4010 DAGCombinerInfo &DCI) const;
4011
4012 /// Helper wrapper around SimplifyDemandedBits.
4013 /// Adds Op back to the worklist upon success.
4015 const APInt &DemandedElts,
4016 DAGCombinerInfo &DCI) const;
4017
4018 /// More limited version of SimplifyDemandedBits that can be used to "look
4019 /// through" ops that don't contribute to the DemandedBits/DemandedElts -
4020 /// bitwise ops etc.
4022 const APInt &DemandedElts,
4023 SelectionDAG &DAG,
4024 unsigned Depth = 0) const;
4025
4026 /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all
4027 /// elements.
4029 SelectionDAG &DAG,
4030 unsigned Depth = 0) const;
4031
4032 /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all
4033 /// bits from only some vector elements.
4035 const APInt &DemandedElts,
4036 SelectionDAG &DAG,
4037 unsigned Depth = 0) const;
4038
4039 /// Look at Vector Op. At this point, we know that only the DemandedElts
4040 /// elements of the result of Op are ever used downstream. If we can use
4041 /// this information to simplify Op, create a new simplified DAG node and
4042 /// return true, storing the original and new nodes in TLO.
4043 /// Otherwise, analyze the expression and return a mask of KnownUndef and
4044 /// KnownZero elements for the expression (used to simplify the caller).
4045 /// The KnownUndef/Zero elements may only be accurate for those bits
4046 /// in the DemandedMask.
4047 /// \p AssumeSingleUse When this parameter is true, this function will
4048 /// attempt to simplify \p Op even if there are multiple uses.
4049 /// Callers are responsible for correctly updating the DAG based on the
4050 /// results of this function, because simply replacing TLO.Old
4051 /// with TLO.New will be incorrect when this parameter is true and TLO.Old
4052 /// has multiple uses.
4053 bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask,
4054 APInt &KnownUndef, APInt &KnownZero,
4055 TargetLoweringOpt &TLO, unsigned Depth = 0,
4056 bool AssumeSingleUse = false) const;
4057
4058 /// Helper wrapper around SimplifyDemandedVectorElts.
4059 /// Adds Op back to the worklist upon success.
4060 bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts,
4061 DAGCombinerInfo &DCI) const;
4062
4063 /// Return true if the target supports simplifying demanded vector elements by
4064 /// converting them to undefs.
4065 virtual bool
4067 const TargetLoweringOpt &TLO) const {
4068 return true;
4069 }
4070
4071 /// Determine which of the bits specified in Mask are known to be either zero
4072 /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
4073 /// argument allows us to only collect the known bits that are shared by the
4074 /// requested vector elements.
4075 virtual void computeKnownBitsForTargetNode(const SDValue Op,
4076 KnownBits &Known,
4077 const APInt &DemandedElts,
4078 const SelectionDAG &DAG,
4079 unsigned Depth = 0) const;
4080
4081 /// Determine which of the bits specified in Mask are known to be either zero
4082 /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
4083 /// argument allows us to only collect the known bits that are shared by the
4084 /// requested vector elements. This is for GISel.
4086 Register R, KnownBits &Known,
4087 const APInt &DemandedElts,
4088 const MachineRegisterInfo &MRI,
4089 unsigned Depth = 0) const;
4090
4091 /// Determine the known alignment for the pointer value \p R. This is can
4092 /// typically be inferred from the number of low known 0 bits. However, for a
4093 /// pointer with a non-integral address space, the alignment value may be
4094 /// independent from the known low bits.
4096 Register R,
4097 const MachineRegisterInfo &MRI,
4098 unsigned Depth = 0) const;
4099
4100 /// Determine which of the bits of FrameIndex \p FIOp are known to be 0.
4101 /// Default implementation computes low bits based on alignment
4102 /// information. This should preserve known bits passed into it.
4103 virtual void computeKnownBitsForFrameIndex(int FIOp,
4104 KnownBits &Known,
4105 const MachineFunction &MF) const;
4106
4107 /// This method can be implemented by targets that want to expose additional
4108 /// information about sign bits to the DAG Combiner. The DemandedElts
4109 /// argument allows us to only collect the minimum sign bits that are shared
4110 /// by the requested vector elements.
4112 const APInt &DemandedElts,
4113 const SelectionDAG &DAG,
4114 unsigned Depth = 0) const;
4115
4116 /// This method can be implemented by targets that want to expose additional
4117 /// information about sign bits to GlobalISel combiners. The DemandedElts
4118 /// argument allows us to only collect the minimum sign bits that are shared
4119 /// by the requested vector elements.
4121 Register R,
4122 const APInt &DemandedElts,
4123 const MachineRegisterInfo &MRI,
4124 unsigned Depth = 0) const;
4125
4126 /// Attempt to simplify any target nodes based on the demanded vector
4127 /// elements, returning true on success. Otherwise, analyze the expression and
4128 /// return a mask of KnownUndef and KnownZero elements for the expression
4129 /// (used to simplify the caller). The KnownUndef/Zero elements may only be
4130 /// accurate for those bits in the DemandedMask.
4132 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef,
4133 APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth = 0) const;
4134
4135 /// Attempt to simplify any target nodes based on the demanded bits/elts,
4136 /// returning true on success. Otherwise, analyze the
4137 /// expression and return a mask of KnownOne and KnownZero bits for the
4138 /// expression (used to simplify the caller). The KnownZero/One bits may only
4139 /// be accurate for those bits in the Demanded masks.
4141 const APInt &DemandedBits,
4142 const APInt &DemandedElts,
4143 KnownBits &Known,
4144 TargetLoweringOpt &TLO,
4145 unsigned Depth = 0) const;
4146
4147 /// More limited version of SimplifyDemandedBits that can be used to "look
4148 /// through" ops that don't contribute to the DemandedBits/DemandedElts -
4149 /// bitwise ops etc.
4151 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
4152 SelectionDAG &DAG, unsigned Depth) const;
4153
4154 /// Return true if this function can prove that \p Op is never poison
4155 /// and, if \p PoisonOnly is false, does not have undef bits. The DemandedElts
4156 /// argument limits the check to the requested vector elements.
4158 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
4159 bool PoisonOnly, unsigned Depth) const;
4160
4161 /// Return true if Op can create undef or poison from non-undef & non-poison
4162 /// operands. The DemandedElts argument limits the check to the requested
4163 /// vector elements.
4164 virtual bool
4166 const SelectionDAG &DAG, bool PoisonOnly,
4167 bool ConsiderFlags, unsigned Depth) const;
4168
4169 /// Tries to build a legal vector shuffle using the provided parameters
4170 /// or equivalent variations. The Mask argument maybe be modified as the
4171 /// function tries different variations.
4172 /// Returns an empty SDValue if the operation fails.
4175 SelectionDAG &DAG) const;
4176
4177 /// This method returns the constant pool value that will be loaded by LD.
4178 /// NOTE: You must check for implicit extensions of the constant by LD.
4179 virtual const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const;
4180
4181 /// If \p SNaN is false, \returns true if \p Op is known to never be any
4182 /// NaN. If \p sNaN is true, returns if \p Op is known to never be a signaling
4183 /// NaN.
4185 const SelectionDAG &DAG,
4186 bool SNaN = false,
4187 unsigned Depth = 0) const;
4188
4189 /// Return true if vector \p Op has the same value across all \p DemandedElts,
4190 /// indicating any elements which may be undef in the output \p UndefElts.
4191 virtual bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts,
4192 APInt &UndefElts,
4193 const SelectionDAG &DAG,
4194 unsigned Depth = 0) const;
4195
4196 /// Returns true if the given Opc is considered a canonical constant for the
4197 /// target, which should not be transformed back into a BUILD_VECTOR.
4199 return Op.getOpcode() == ISD::SPLAT_VECTOR ||
4200 Op.getOpcode() == ISD::SPLAT_VECTOR_PARTS;
4201 }
4202
4204 void *DC; // The DAG Combiner object.
4207
4208 public:
4210
4211 DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc)
4212 : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
4213
4214 bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
4215 bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; }
4216 bool isAfterLegalizeDAG() const { return Level >= AfterLegalizeDAG; }
4219
4220 void AddToWorklist(SDNode *N);
4221 SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo = true);
4222 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
4223 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
4224
4226
4228 };
4229
4230 /// Return if the N is a constant or constant vector equal to the true value
4231 /// from getBooleanContents().
4232 bool isConstTrueVal(SDValue N) const;
4233
4234 /// Return if the N is a constant or constant vector equal to the false value
4235 /// from getBooleanContents().
4236 bool isConstFalseVal(SDValue N) const;
4237
4238 /// Return if \p N is a True value when extended to \p VT.
4239 bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const;
4240
4241 /// Try to simplify a setcc built with the specified operands and cc. If it is
4242 /// unable to simplify it, return a null SDValue.
4244 bool foldBooleans, DAGCombinerInfo &DCI,
4245 const SDLoc &dl) const;
4246
4247 // For targets which wrap address, unwrap for analysis.
4248 virtual SDValue unwrapAddress(SDValue N) const { return N; }
4249
4250 /// Returns true (and the GlobalValue and the offset) if the node is a
4251 /// GlobalAddress + offset.
4252 virtual bool
4253 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
4254
4255 /// This method will be invoked for all target nodes and for any
4256 /// target-independent nodes that the target has registered with invoke it
4257 /// for.
4258 ///
4259 /// The semantics are as follows:
4260 /// Return Value:
4261 /// SDValue.Val == 0 - No change was made
4262 /// SDValue.Val == N - N was replaced, is dead, and is already handled.
4263 /// otherwise - N should be replaced by the returned Operand.
4264 ///
4265 /// In addition, methods provided by DAGCombinerInfo may be used to perform
4266 /// more complex transformations.
4267 ///
4268 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
4269
4270 /// Return true if it is profitable to move this shift by a constant amount
4271 /// through its operand, adjusting any immediate operands as necessary to
4272 /// preserve semantics. This transformation may not be desirable if it
4273 /// disrupts a particularly auspicious target-specific tree (e.g. bitfield
4274 /// extraction in AArch64). By default, it returns true.
4275 ///
4276 /// @param N the shift node
4277 /// @param Level the current DAGCombine legalization level.
4279 CombineLevel Level) const {
4280 return true;
4281 }
4282
4283 /// GlobalISel - return true if it is profitable to move this shift by a
4284 /// constant amount through its operand, adjusting any immediate operands as
4285 /// necessary to preserve semantics. This transformation may not be desirable
4286 /// if it disrupts a particularly auspicious target-specific tree (e.g.
4287 /// bitfield extraction in AArch64). By default, it returns true.
4288 ///
4289 /// @param MI the shift instruction
4290 /// @param IsAfterLegal true if running after legalization.
4292 bool IsAfterLegal) const {
4293 return true;
4294 }
4295
4296 /// GlobalISel - return true if it's profitable to perform the combine:
4297 /// shl ([sza]ext x), y => zext (shl x, y)
4298 virtual bool isDesirableToPullExtFromShl(const MachineInstr &MI) const {
4299 return true;
4300 }
4301
4302 // Return AndOrSETCCFoldKind::{AddAnd, ABS} if its desirable to try and
4303 // optimize LogicOp(SETCC0, SETCC1). An example (what is implemented as of
4304 // writing this) is:
4305 // With C as a power of 2 and C != 0 and C != INT_MIN:
4306 // AddAnd:
4307 // (icmp eq A, C) | (icmp eq A, -C)
4308 // -> (icmp eq and(add(A, C), ~(C + C)), 0)
4309 // (icmp ne A, C) & (icmp ne A, -C)w
4310 // -> (icmp ne and(add(A, C), ~(C + C)), 0)
4311 // ABS:
4312 // (icmp eq A, C) | (icmp eq A, -C)
4313 // -> (icmp eq Abs(A), C)
4314 // (icmp ne A, C) & (icmp ne A, -C)w
4315 // -> (icmp ne Abs(A), C)
4316 //
4317 // @param LogicOp the logic op
4318 // @param SETCC0 the first of the SETCC nodes
4319 // @param SETCC0 the second of the SETCC nodes
4321 const SDNode *LogicOp, const SDNode *SETCC0, const SDNode *SETCC1) const {
4323 }
4324
4325 /// Return true if it is profitable to combine an XOR of a logical shift
4326 /// to create a logical shift of NOT. This transformation may not be desirable
4327 /// if it disrupts a particularly auspicious target-specific tree (e.g.
4328 /// BIC on ARM/AArch64). By default, it returns true.
4329 virtual bool isDesirableToCommuteXorWithShift(const SDNode *N) const {
4330 return true;
4331 }
4332
4333 /// Return true if the target has native support for the specified value type
4334 /// and it is 'desirable' to use the type for the given node type. e.g. On x86
4335 /// i16 is legal, but undesirable since i16 instruction encodings are longer
4336 /// and some i16 instructions are slow.
4337 virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
4338 // By default, assume all legal types are desirable.
4339 return isTypeLegal(VT);
4340 }
4341
4342 /// Return true if it is profitable for dag combiner to transform a floating
4343 /// point op of specified opcode to a equivalent op of an integer
4344 /// type. e.g. f32 load -> i32 load can be profitable on ARM.
4345 virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
4346 EVT /*VT*/) const {
4347 return false;
4348 }
4349
4350 /// This method query the target whether it is beneficial for dag combiner to
4351 /// promote the specified node. If true, it should return the desired
4352 /// promotion type by reference.
4353 virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
4354 return false;
4355 }
4356
4357 /// Return true if the target supports swifterror attribute. It optimizes
4358 /// loads and stores to reading and writing a specific register.
4359 virtual bool supportSwiftError() const {
4360 return false;
4361 }
4362
4363 /// Return true if the target supports that a subset of CSRs for the given
4364 /// machine function is handled explicitly via copies.
4365 virtual bool supportSplitCSR(MachineFunction *MF) const {
4366 return false;
4367 }
4368
4369 /// Return true if the target supports kcfi operand bundles.
4370 virtual bool supportKCFIBundles() const { return false; }
4371
4372 /// Return true if the target supports ptrauth operand bundles.
4373 virtual bool supportPtrAuthBundles() const { return false; }
4374
4375 /// Perform necessary initialization to handle a subset of CSRs explicitly
4376 /// via copies. This function is called at the beginning of instruction
4377 /// selection.
4378 virtual void initializeSplitCSR(MachineBasicBlock *Entry) const {
4379 llvm_unreachable("Not Implemented");
4380 }
4381
4382 /// Insert explicit copies in entry and exit blocks. We copy a subset of
4383 /// CSRs to virtual registers in the entry block, and copy them back to
4384 /// physical registers in the exit blocks. This function is called at the end
4385 /// of instruction selection.
4387 MachineBasicBlock *Entry,
4388 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
4389 llvm_unreachable("Not Implemented");
4390 }
4391
4392 /// Return the newly negated expression if the cost is not expensive and
4393 /// set the cost in \p Cost to indicate that if it is cheaper or neutral to
4394 /// do the negation.
4396 bool LegalOps, bool OptForSize,
4398 unsigned Depth = 0) const;
4399
4401 SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize,
4403 unsigned Depth = 0) const {
4405 SDValue Neg =
4406 getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth);
4407 if (!Neg)
4408 return SDValue();
4409
4410 if (Cost <= CostThreshold)
4411 return Neg;
4412
4413 // Remove the new created node to avoid the side effect to the DAG.
4414 if (Neg->use_empty())