LLVM 20.0.0git
TargetLowering.h
Go to the documentation of this file.
1//===- llvm/CodeGen/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file describes how to lower LLVM code to machine code. This has two
11/// main components:
12///
13/// 1. Which ValueTypes are natively supported by the target.
14/// 2. Which operations are supported for supported ValueTypes.
15/// 3. Cost thresholds for alternative implementations of certain operations.
16///
17/// In addition it has a few other components, like information about FP
18/// immediates.
19///
20//===----------------------------------------------------------------------===//
21
22#ifndef LLVM_CODEGEN_TARGETLOWERING_H
23#define LLVM_CODEGEN_TARGETLOWERING_H
24
25#include "llvm/ADT/APInt.h"
26#include "llvm/ADT/ArrayRef.h"
27#include "llvm/ADT/DenseMap.h"
29#include "llvm/ADT/StringRef.h"
40#include "llvm/IR/Attributes.h"
41#include "llvm/IR/CallingConv.h"
42#include "llvm/IR/DataLayout.h"
44#include "llvm/IR/Function.h"
45#include "llvm/IR/InlineAsm.h"
46#include "llvm/IR/Instruction.h"
49#include "llvm/IR/Type.h"
54#include <algorithm>
55#include <cassert>
56#include <climits>
57#include <cstdint>
58#include <iterator>
59#include <map>
60#include <string>
61#include <utility>
62#include <vector>
63
64namespace llvm {
65
66class AssumptionCache;
67class CCState;
68class CCValAssign;
71class Constant;
72class FastISel;
73class FunctionLoweringInfo;
74class GlobalValue;
75class Loop;
76class GISelKnownBits;
77class IntrinsicInst;
78class IRBuilderBase;
79struct KnownBits;
80class LLVMContext;
81class MachineBasicBlock;
82class MachineFunction;
83class MachineInstr;
84class MachineJumpTableInfo;
85class MachineLoop;
86class MachineRegisterInfo;
87class MCContext;
88class MCExpr;
89class Module;
90class ProfileSummaryInfo;
91class TargetLibraryInfo;
92class TargetMachine;
93class TargetRegisterClass;
94class TargetRegisterInfo;
95class TargetTransformInfo;
96class Value;
97
98namespace Sched {
99
101 None, // No preference
102 Source, // Follow source order.
103 RegPressure, // Scheduling for lowest register pressure.
104 Hybrid, // Scheduling for both latency and register pressure.
105 ILP, // Scheduling for ILP in low register pressure mode.
106 VLIW, // Scheduling for VLIW targets.
107 Fast, // Fast suboptimal list scheduling
108 Linearize, // Linearize DAG, no scheduling
109 Last = Linearize // Marker for the last Sched::Preference
111
112} // end namespace Sched
113
114// MemOp models a memory operation, either memset or memcpy/memmove.
115struct MemOp {
116private:
117 // Shared
118 uint64_t Size;
119 bool DstAlignCanChange; // true if destination alignment can satisfy any
120 // constraint.
121 Align DstAlign; // Specified alignment of the memory operation.
122
123 bool AllowOverlap;
124 // memset only
125 bool IsMemset; // If setthis memory operation is a memset.
126 bool ZeroMemset; // If set clears out memory with zeros.
127 // memcpy only
128 bool MemcpyStrSrc; // Indicates whether the memcpy source is an in-register
129 // constant so it does not need to be loaded.
130 Align SrcAlign; // Inferred alignment of the source or default value if the
131 // memory operation does not need to load the value.
132public:
133 static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign,
134 Align SrcAlign, bool IsVolatile,
135 bool MemcpyStrSrc = false) {
136 MemOp Op;
137 Op.Size = Size;
138 Op.DstAlignCanChange = DstAlignCanChange;
139 Op.DstAlign = DstAlign;
140 Op.AllowOverlap = !IsVolatile;
141 Op.IsMemset = false;
142 Op.ZeroMemset = false;
143 Op.MemcpyStrSrc = MemcpyStrSrc;
144 Op.SrcAlign = SrcAlign;
145 return Op;
146 }
147
148 static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign,
149 bool IsZeroMemset, bool IsVolatile) {
150 MemOp Op;
151 Op.Size = Size;
152 Op.DstAlignCanChange = DstAlignCanChange;
153 Op.DstAlign = DstAlign;
154 Op.AllowOverlap = !IsVolatile;
155 Op.IsMemset = true;
156 Op.ZeroMemset = IsZeroMemset;
157 Op.MemcpyStrSrc = false;
158 return Op;
159 }
160
161 uint64_t size() const { return Size; }
163 assert(!DstAlignCanChange);
164 return DstAlign;
165 }
166 bool isFixedDstAlign() const { return !DstAlignCanChange; }
167 bool allowOverlap() const { return AllowOverlap; }
168 bool isMemset() const { return IsMemset; }
169 bool isMemcpy() const { return !IsMemset; }
171 return isMemcpy() && !DstAlignCanChange;
172 }
173 bool isZeroMemset() const { return isMemset() && ZeroMemset; }
174 bool isMemcpyStrSrc() const {
175 assert(isMemcpy() && "Must be a memcpy");
176 return MemcpyStrSrc;
177 }
179 assert(isMemcpy() && "Must be a memcpy");
180 return SrcAlign;
181 }
182 bool isSrcAligned(Align AlignCheck) const {
183 return isMemset() || llvm::isAligned(AlignCheck, SrcAlign.value());
184 }
185 bool isDstAligned(Align AlignCheck) const {
186 return DstAlignCanChange || llvm::isAligned(AlignCheck, DstAlign.value());
187 }
188 bool isAligned(Align AlignCheck) const {
189 return isSrcAligned(AlignCheck) && isDstAligned(AlignCheck);
190 }
191};
192
193/// This base class for TargetLowering contains the SelectionDAG-independent
194/// parts that can be used from the rest of CodeGen.
196public:
197 /// This enum indicates whether operations are valid for a target, and if not,
198 /// what action should be used to make them valid.
200 Legal, // The target natively supports this operation.
201 Promote, // This operation should be executed in a larger type.
202 Expand, // Try to expand this to other ops, otherwise use a libcall.
203 LibCall, // Don't try to expand this to other ops, always use a libcall.
204 Custom // Use the LowerOperation hook to implement custom lowering.
205 };
206
207 /// This enum indicates whether a types are legal for a target, and if not,
208 /// what action should be used to make them valid.
210 TypeLegal, // The target natively supports this type.
211 TypePromoteInteger, // Replace this integer with a larger one.
212 TypeExpandInteger, // Split this integer into two of half the size.
213 TypeSoftenFloat, // Convert this float to a same size integer type.
214 TypeExpandFloat, // Split this float into two of half the size.
215 TypeScalarizeVector, // Replace this one-element vector with its element.
216 TypeSplitVector, // Split this vector into two of half the size.
217 TypeWidenVector, // This vector should be widened into a larger vector.
218 TypePromoteFloat, // Replace this float with a larger one.
219 TypeSoftPromoteHalf, // Soften half to i16 and use float to do arithmetic.
220 TypeScalarizeScalableVector, // This action is explicitly left unimplemented.
221 // While it is theoretically possible to
222 // legalize operations on scalable types with a
223 // loop that handles the vscale * #lanes of the
224 // vector, this is non-trivial at SelectionDAG
225 // level and these types are better to be
226 // widened or promoted.
227 };
228
229 /// LegalizeKind holds the legalization kind that needs to happen to EVT
230 /// in order to type-legalize it.
231 using LegalizeKind = std::pair<LegalizeTypeAction, EVT>;
232
233 /// Enum that describes how the target represents true/false values.
235 UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage.
236 ZeroOrOneBooleanContent, // All bits zero except for bit 0.
237 ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
238 };
239
240 /// Enum that describes what type of support for selects the target has.
242 ScalarValSelect, // The target supports scalar selects (ex: cmov).
243 ScalarCondVectorVal, // The target supports selects with a scalar condition
244 // and vector values (ex: cmov).
245 VectorMaskSelect // The target supports vector selects with a vector
246 // mask (ex: x86 blends).
247 };
248
249 /// Enum that specifies what an atomic load/AtomicRMWInst is expanded
250 /// to, if at all. Exists because different targets have different levels of
251 /// support for these atomic instructions, and also have different options
252 /// w.r.t. what they should expand to.
254 None, // Don't expand the instruction.
255 CastToInteger, // Cast the atomic instruction to another type, e.g. from
256 // floating-point to integer type.
257 LLSC, // Expand the instruction into loadlinked/storeconditional; used
258 // by ARM/AArch64.
259 LLOnly, // Expand the (load) instruction into just a load-linked, which has
260 // greater atomic guarantees than a normal load.
261 CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
262 MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop.
263 BitTestIntrinsic, // Use a target-specific intrinsic for special bit
264 // operations; used by X86.
265 CmpArithIntrinsic,// Use a target-specific intrinsic for special compare
266 // operations; used by X86.
267 Expand, // Generic expansion in terms of other atomic operations.
268
269 // Rewrite to a non-atomic form for use in a known non-preemptible
270 // environment.
272 };
273
274 /// Enum that specifies when a multiplication should be expanded.
275 enum class MulExpansionKind {
276 Always, // Always expand the instruction.
277 OnlyLegalOrCustom, // Only expand when the resulting instructions are legal
278 // or custom.
279 };
280
281 /// Enum that specifies when a float negation is beneficial.
282 enum class NegatibleCost {
283 Cheaper = 0, // Negated expression is cheaper.
284 Neutral = 1, // Negated expression has the same cost.
285 Expensive = 2 // Negated expression is more expensive.
286 };
287
288 /// Enum of different potentially desirable ways to fold (and/or (setcc ...),
289 /// (setcc ...)).
291 None = 0, // No fold is preferable.
292 AddAnd = 1, // Fold with `Add` op and `And` op is preferable.
293 NotAnd = 2, // Fold with `Not` op and `And` op is preferable.
294 ABS = 4, // Fold with `llvm.abs` op is preferable.
295 };
296
298 public:
299 Value *Val = nullptr;
301 Type *Ty = nullptr;
302 bool IsSExt : 1;
303 bool IsZExt : 1;
304 bool IsNoExt : 1;
305 bool IsInReg : 1;
306 bool IsSRet : 1;
307 bool IsNest : 1;
308 bool IsByVal : 1;
309 bool IsByRef : 1;
310 bool IsInAlloca : 1;
312 bool IsReturned : 1;
313 bool IsSwiftSelf : 1;
314 bool IsSwiftAsync : 1;
315 bool IsSwiftError : 1;
317 MaybeAlign Alignment = std::nullopt;
318 Type *IndirectType = nullptr;
319
326
327 void setAttributes(const CallBase *Call, unsigned ArgIdx);
328 };
329 using ArgListTy = std::vector<ArgListEntry>;
330
331 virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC,
332 ArgListTy &Args) const {};
333
335 switch (Content) {
337 // Extend by adding rubbish bits.
338 return ISD::ANY_EXTEND;
340 // Extend by adding zero bits.
341 return ISD::ZERO_EXTEND;
343 // Extend by copying the sign bit.
344 return ISD::SIGN_EXTEND;
345 }
346 llvm_unreachable("Invalid content kind");
347 }
348
349 explicit TargetLoweringBase(const TargetMachine &TM);
352 virtual ~TargetLoweringBase() = default;
353
354 /// Return true if the target support strict float operation
355 bool isStrictFPEnabled() const {
356 return IsStrictFPEnabled;
357 }
358
359protected:
360 /// Initialize all of the actions to default values.
361 void initActions();
362
363public:
364 const TargetMachine &getTargetMachine() const { return TM; }
365
366 virtual bool useSoftFloat() const { return false; }
367
368 /// Return the pointer type for the given address space, defaults to
369 /// the pointer type from the data layout.
370 /// FIXME: The default needs to be removed once all the code is updated.
371 virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const {
372 return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
373 }
374
375 /// Return the in-memory pointer type for the given address space, defaults to
376 /// the pointer type from the data layout.
377 /// FIXME: The default needs to be removed once all the code is updated.
378 virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS = 0) const {
379 return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
380 }
381
382 /// Return the type for frame index, which is determined by
383 /// the alloca address space specified through the data layout.
385 return getPointerTy(DL, DL.getAllocaAddrSpace());
386 }
387
388 /// Return the type for code pointers, which is determined by the program
389 /// address space specified through the data layout.
391 return getPointerTy(DL, DL.getProgramAddressSpace());
392 }
393
394 /// Return the type for operands of fence.
395 /// TODO: Let fence operands be of i32 type and remove this.
396 virtual MVT getFenceOperandTy(const DataLayout &DL) const {
397 return getPointerTy(DL);
398 }
399
400 /// Return the type to use for a scalar shift opcode, given the shifted amount
401 /// type. Targets should return a legal type if the input type is legal.
402 /// Targets can return a type that is too small if the input type is illegal.
403 virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const;
404
405 /// Returns the type for the shift amount of a shift opcode. For vectors,
406 /// returns the input type. For scalars, calls getScalarShiftAmountTy.
407 /// If getScalarShiftAmountTy type cannot represent all possible shift
408 /// amounts, returns MVT::i32.
409 EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const;
410
411 /// Return the preferred type to use for a shift opcode, given the shifted
412 /// amount type is \p ShiftValueTy.
414 virtual LLT getPreferredShiftAmountTy(LLT ShiftValueTy) const {
415 return ShiftValueTy;
416 }
417
418 /// Returns the type to be used for the index operand of:
419 /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
420 /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
421 virtual MVT getVectorIdxTy(const DataLayout &DL) const {
422 return getPointerTy(DL);
423 }
424
425 /// Returns the type to be used for the EVL/AVL operand of VP nodes:
426 /// ISD::VP_ADD, ISD::VP_SUB, etc. It must be a legal scalar integer type,
427 /// and must be at least as large as i32. The EVL is implicitly zero-extended
428 /// to any larger type.
429 virtual MVT getVPExplicitVectorLengthTy() const { return MVT::i32; }
430
431 /// This callback is used to inspect load/store instructions and add
432 /// target-specific MachineMemOperand flags to them. The default
433 /// implementation does nothing.
436 }
437
438 /// This callback is used to inspect load/store SDNode.
439 /// The default implementation does nothing.
443 }
444
447 AssumptionCache *AC = nullptr,
448 const TargetLibraryInfo *LibInfo = nullptr) const;
450 const DataLayout &DL) const;
452 const DataLayout &DL) const;
453
454 virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
455 return true;
456 }
457
458 /// Return true if the @llvm.experimental.vector.partial.reduce.* intrinsic
459 /// should be expanded using generic code in SelectionDAGBuilder.
460 virtual bool
462 return true;
463 }
464
465 /// Return true if the @llvm.get.active.lane.mask intrinsic should be expanded
466 /// using generic code in SelectionDAGBuilder.
467 virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const {
468 return true;
469 }
470
471 virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF,
472 bool IsScalable) const {
473 return true;
474 }
475
476 /// Return true if the @llvm.experimental.cttz.elts intrinsic should be
477 /// expanded using generic code in SelectionDAGBuilder.
478 virtual bool shouldExpandCttzElements(EVT VT) const { return true; }
479
480 /// Return the minimum number of bits required to hold the maximum possible
481 /// number of trailing zero vector elements.
483 bool ZeroIsPoison,
484 const ConstantRange *VScaleRange) const;
485
486 /// Return true if the @llvm.experimental.vector.match intrinsic should be
487 /// expanded for vector type `VT' and search size `SearchSize' using generic
488 /// code in SelectionDAGBuilder.
489 virtual bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const {
490 return true;
491 }
492
493 // Return true if op(vecreduce(x), vecreduce(y)) should be reassociated to
494 // vecreduce(op(x, y)) for the reduction opcode RedOpc.
495 virtual bool shouldReassociateReduction(unsigned RedOpc, EVT VT) const {
496 return true;
497 }
498
499 /// Return true if it is profitable to convert a select of FP constants into
500 /// a constant pool load whose address depends on the select condition. The
501 /// parameter may be used to differentiate a select with FP compare from
502 /// integer compare.
503 virtual bool reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {
504 return true;
505 }
506
507 /// Return true if multiple condition registers are available.
509 return HasMultipleConditionRegisters;
510 }
511
512 /// Return true if the target has BitExtract instructions.
513 bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
514
515 /// Return the preferred vector type legalization action.
518 // The default action for one element vectors is to scalarize
520 return TypeScalarizeVector;
521 // The default action for an odd-width vector is to widen.
522 if (!VT.isPow2VectorType())
523 return TypeWidenVector;
524 // The default action for other vectors is to promote
525 return TypePromoteInteger;
526 }
527
528 // Return true if the half type should be promoted using soft promotion rules
529 // where each operation is promoted to f32 individually, then converted to
530 // fp16. The default behavior is to promote chains of operations, keeping
531 // intermediate results in f32 precision and range.
532 virtual bool softPromoteHalfType() const { return false; }
533
534 // Return true if, for soft-promoted half, the half type should be passed
535 // passed to and returned from functions as f32. The default behavior is to
536 // pass as i16. If soft-promoted half is not used, this function is ignored
537 // and values are always passed and returned as f32.
538 virtual bool useFPRegsForHalfType() const { return false; }
539
540 // There are two general methods for expanding a BUILD_VECTOR node:
541 // 1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
542 // them together.
543 // 2. Build the vector on the stack and then load it.
544 // If this function returns true, then method (1) will be used, subject to
545 // the constraint that all of the necessary shuffles are legal (as determined
546 // by isShuffleMaskLegal). If this function returns false, then method (2) is
547 // always used. The vector type, and the number of defined values, are
548 // provided.
549 virtual bool
551 unsigned DefinedValues) const {
552 return DefinedValues < 3;
553 }
554
555 /// Return true if integer divide is usually cheaper than a sequence of
556 /// several shifts, adds, and multiplies for this target.
557 /// The definition of "cheaper" may depend on whether we're optimizing
558 /// for speed or for size.
559 virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const { return false; }
560
561 /// Return true if the target can handle a standalone remainder operation.
562 virtual bool hasStandaloneRem(EVT VT) const {
563 return true;
564 }
565
566 /// Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
567 virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const {
568 // Default behavior is to replace SQRT(X) with X*RSQRT(X).
569 return false;
570 }
571
572 /// Reciprocal estimate status values used by the functions below.
576 Enabled = 1
577 };
578
579 /// Return a ReciprocalEstimate enum value for a square root of the given type
580 /// based on the function's attributes. If the operation is not overridden by
581 /// the function's attributes, "Unspecified" is returned and target defaults
582 /// are expected to be used for instruction selection.
584
585 /// Return a ReciprocalEstimate enum value for a division of the given type
586 /// based on the function's attributes. If the operation is not overridden by
587 /// the function's attributes, "Unspecified" is returned and target defaults
588 /// are expected to be used for instruction selection.
590
591 /// Return the refinement step count for a square root of the given type based
592 /// on the function's attributes. If the operation is not overridden by
593 /// the function's attributes, "Unspecified" is returned and target defaults
594 /// are expected to be used for instruction selection.
595 int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const;
596
597 /// Return the refinement step count for a division of the given type based
598 /// on the function's attributes. If the operation is not overridden by
599 /// the function's attributes, "Unspecified" is returned and target defaults
600 /// are expected to be used for instruction selection.
601 int getDivRefinementSteps(EVT VT, MachineFunction &MF) const;
602
603 /// Returns true if target has indicated at least one type should be bypassed.
604 bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
605
606 /// Returns map of slow types for division or remainder with corresponding
607 /// fast types
609 return BypassSlowDivWidths;
610 }
611
612 /// Return true only if vscale must be a power of two.
613 virtual bool isVScaleKnownToBeAPowerOfTwo() const { return false; }
614
615 /// Return true if Flow Control is an expensive operation that should be
616 /// avoided.
617 bool isJumpExpensive() const { return JumpIsExpensive; }
618
619 // Costs parameters used by
620 // SelectionDAGBuilder::shouldKeepJumpConditionsTogether.
621 // shouldKeepJumpConditionsTogether will use these parameter value to
622 // determine if two conditions in the form `br (and/or cond1, cond2)` should
623 // be split into two branches or left as one.
624 //
625 // BaseCost is the cost threshold (in latency). If the estimated latency of
626 // computing both `cond1` and `cond2` is below the cost of just computing
627 // `cond1` + BaseCost, the two conditions will be kept together. Otherwise
628 // they will be split.
629 //
630 // LikelyBias increases BaseCost if branch probability info indicates that it
631 // is likely that both `cond1` and `cond2` will be computed.
632 //
633 // UnlikelyBias decreases BaseCost if branch probability info indicates that
634 // it is likely that both `cond1` and `cond2` will be computed.
635 //
636 // Set any field to -1 to make it ignored (setting BaseCost to -1 results in
637 // `shouldKeepJumpConditionsTogether` always returning false).
642 };
643 // Return params for deciding if we should keep two branch conditions merged
644 // or split them into two separate branches.
645 // Arg0: The binary op joining the two conditions (and/or).
646 // Arg1: The first condition (cond1)
647 // Arg2: The second condition (cond2)
648 virtual CondMergingParams
650 const Value *) const {
651 // -1 will always result in splitting.
652 return {-1, -1, -1};
653 }
654
655 /// Return true if selects are only cheaper than branches if the branch is
656 /// unlikely to be predicted right.
659 }
660
661 virtual bool fallBackToDAGISel(const Instruction &Inst) const {
662 return false;
663 }
664
665 /// Return true if the following transform is beneficial:
666 /// fold (conv (load x)) -> (load (conv*)x)
667 /// On architectures that don't natively support some vector loads
668 /// efficiently, casting the load to a smaller vector of larger types and
669 /// loading is more efficient, however, this can be undone by optimizations in
670 /// dag combiner.
671 virtual bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
672 const SelectionDAG &DAG,
673 const MachineMemOperand &MMO) const;
674
675 /// Return true if the following transform is beneficial:
676 /// (store (y (conv x)), y*)) -> (store x, (x*))
677 virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT,
678 const SelectionDAG &DAG,
679 const MachineMemOperand &MMO) const {
680 // Default to the same logic as loads.
681 return isLoadBitCastBeneficial(StoreVT, BitcastVT, DAG, MMO);
682 }
683
684 /// Return true if it is expected to be cheaper to do a store of vector
685 /// constant with the given size and type for the address space than to
686 /// store the individual scalar element constants.
687 virtual bool storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT,
688 unsigned NumElem,
689 unsigned AddrSpace) const {
690 return IsZero;
691 }
692
693 /// Allow store merging for the specified type after legalization in addition
694 /// to before legalization. This may transform stores that do not exist
695 /// earlier (for example, stores created from intrinsics).
696 virtual bool mergeStoresAfterLegalization(EVT MemVT) const {
697 return true;
698 }
699
700 /// Returns if it's reasonable to merge stores to MemVT size.
701 virtual bool canMergeStoresTo(unsigned AS, EVT MemVT,
702 const MachineFunction &MF) const {
703 return true;
704 }
705
706 /// Return true if it is cheap to speculate a call to intrinsic cttz.
707 virtual bool isCheapToSpeculateCttz(Type *Ty) const {
708 return false;
709 }
710
711 /// Return true if it is cheap to speculate a call to intrinsic ctlz.
712 virtual bool isCheapToSpeculateCtlz(Type *Ty) const {
713 return false;
714 }
715
716 /// Return true if ctlz instruction is fast.
717 virtual bool isCtlzFast() const {
718 return false;
719 }
720
721 /// Return true if ctpop instruction is fast.
722 virtual bool isCtpopFast(EVT VT) const {
723 return isOperationLegal(ISD::CTPOP, VT);
724 }
725
726 /// Return the maximum number of "x & (x - 1)" operations that can be done
727 /// instead of deferring to a custom CTPOP.
728 virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const {
729 return 1;
730 }
731
732 /// Return true if instruction generated for equality comparison is folded
733 /// with instruction generated for signed comparison.
734 virtual bool isEqualityCmpFoldedWithSignedCmp() const { return true; }
735
736 /// Return true if the heuristic to prefer icmp eq zero should be used in code
737 /// gen prepare.
738 virtual bool preferZeroCompareBranch() const { return false; }
739
740 /// Return true if it is cheaper to split the store of a merged int val
741 /// from a pair of smaller values into multiple stores.
742 virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const {
743 return false;
744 }
745
746 /// Return if the target supports combining a
747 /// chain like:
748 /// \code
749 /// %andResult = and %val1, #mask
750 /// %icmpResult = icmp %andResult, 0
751 /// \endcode
752 /// into a single machine instruction of a form like:
753 /// \code
754 /// cc = test %register, #mask
755 /// \endcode
756 virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
757 return false;
758 }
759
760 /// Return true if it is valid to merge the TargetMMOFlags in two SDNodes.
761 virtual bool
763 const MemSDNode &NodeY) const {
764 return true;
765 }
766
767 /// Use bitwise logic to make pairs of compares more efficient. For example:
768 /// and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0
769 /// This should be true when it takes more than one instruction to lower
770 /// setcc (cmp+set on x86 scalar), when bitwise ops are faster than logic on
771 /// condition bits (crand on PowerPC), and/or when reducing cmp+br is a win.
772 virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const {
773 return false;
774 }
775
776 /// Return the preferred operand type if the target has a quick way to compare
777 /// integer values of the given size. Assume that any legal integer type can
778 /// be compared efficiently. Targets may override this to allow illegal wide
779 /// types to return a vector type if there is support to compare that type.
780 virtual MVT hasFastEqualityCompare(unsigned NumBits) const {
781 MVT VT = MVT::getIntegerVT(NumBits);
783 }
784
785 /// Return true if the target should transform:
786 /// (X & Y) == Y ---> (~X & Y) == 0
787 /// (X & Y) != Y ---> (~X & Y) != 0
788 ///
789 /// This may be profitable if the target has a bitwise and-not operation that
790 /// sets comparison flags. A target may want to limit the transformation based
791 /// on the type of Y or if Y is a constant.
792 ///
793 /// Note that the transform will not occur if Y is known to be a power-of-2
794 /// because a mask and compare of a single bit can be handled by inverting the
795 /// predicate, for example:
796 /// (X & 8) == 8 ---> (X & 8) != 0
797 virtual bool hasAndNotCompare(SDValue Y) const {
798 return false;
799 }
800
801 /// Return true if the target has a bitwise and-not operation:
802 /// X = ~A & B
803 /// This can be used to simplify select or other instructions.
804 virtual bool hasAndNot(SDValue X) const {
805 // If the target has the more complex version of this operation, assume that
806 // it has this operation too.
807 return hasAndNotCompare(X);
808 }
809
810 /// Return true if the target has a bit-test instruction:
811 /// (X & (1 << Y)) ==/!= 0
812 /// This knowledge can be used to prevent breaking the pattern,
813 /// or creating it if it could be recognized.
814 virtual bool hasBitTest(SDValue X, SDValue Y) const { return false; }
815
816 /// There are two ways to clear extreme bits (either low or high):
817 /// Mask: x & (-1 << y) (the instcombine canonical form)
818 /// Shifts: x >> y << y
819 /// Return true if the variant with 2 variable shifts is preferred.
820 /// Return false if there is no preference.
822 // By default, let's assume that no one prefers shifts.
823 return false;
824 }
825
826 /// Return true if it is profitable to fold a pair of shifts into a mask.
827 /// This is usually true on most targets. But some targets, like Thumb1,
828 /// have immediate shift instructions, but no immediate "and" instruction;
829 /// this makes the fold unprofitable.
831 CombineLevel Level) const {
832 return true;
833 }
834
835 /// Should we tranform the IR-optimal check for whether given truncation
836 /// down into KeptBits would be truncating or not:
837 /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits)
838 /// Into it's more traditional form:
839 /// ((%x << C) a>> C) dstcond %x
840 /// Return true if we should transform.
841 /// Return false if there is no preference.
843 unsigned KeptBits) const {
844 // By default, let's assume that no one prefers shifts.
845 return false;
846 }
847
848 /// Given the pattern
849 /// (X & (C l>>/<< Y)) ==/!= 0
850 /// return true if it should be transformed into:
851 /// ((X <</l>> Y) & C) ==/!= 0
852 /// WARNING: if 'X' is a constant, the fold may deadlock!
853 /// FIXME: we could avoid passing XC, but we can't use isConstOrConstSplat()
854 /// here because it can end up being not linked in.
857 unsigned OldShiftOpcode, unsigned NewShiftOpcode,
858 SelectionDAG &DAG) const {
859 if (hasBitTest(X, Y)) {
860 // One interesting pattern that we'd want to form is 'bit test':
861 // ((1 << Y) & C) ==/!= 0
862 // But we also need to be careful not to try to reverse that fold.
863
864 // Is this '1 << Y' ?
865 if (OldShiftOpcode == ISD::SHL && CC->isOne())
866 return false; // Keep the 'bit test' pattern.
867
868 // Will it be '1 << Y' after the transform ?
869 if (XC && NewShiftOpcode == ISD::SHL && XC->isOne())
870 return true; // Do form the 'bit test' pattern.
871 }
872
873 // If 'X' is a constant, and we transform, then we will immediately
874 // try to undo the fold, thus causing endless combine loop.
875 // So by default, let's assume everyone prefers the fold
876 // iff 'X' is not a constant.
877 return !XC;
878 }
879
880 // Return true if its desirable to perform the following transform:
881 // (fmul C, (uitofp Pow2))
882 // -> (bitcast_to_FP (add (bitcast_to_INT C), Log2(Pow2) << mantissa))
883 // (fdiv C, (uitofp Pow2))
884 // -> (bitcast_to_FP (sub (bitcast_to_INT C), Log2(Pow2) << mantissa))
885 //
886 // This is only queried after we have verified the transform will be bitwise
887 // equals.
888 //
889 // SDNode *N : The FDiv/FMul node we want to transform.
890 // SDValue FPConst: The Float constant operand in `N`.
891 // SDValue IntPow2: The Integer power of 2 operand in `N`.
893 SDValue IntPow2) const {
894 // Default to avoiding fdiv which is often very expensive.
895 return N->getOpcode() == ISD::FDIV;
896 }
897
898 // Given:
899 // (icmp eq/ne (and X, C0), (shift X, C1))
900 // or
901 // (icmp eq/ne X, (rotate X, CPow2))
902
903 // If C0 is a mask or shifted mask and the shift amt (C1) isolates the
904 // remaining bits (i.e something like `(x64 & UINT32_MAX) == (x64 >> 32)`)
905 // Do we prefer the shift to be shift-right, shift-left, or rotate.
906 // Note: Its only valid to convert the rotate version to the shift version iff
907 // the shift-amt (`C1`) is a power of 2 (including 0).
908 // If ShiftOpc (current Opcode) is returned, do nothing.
910 EVT VT, unsigned ShiftOpc, bool MayTransformRotate,
911 const APInt &ShiftOrRotateAmt,
912 const std::optional<APInt> &AndMask) const {
913 return ShiftOpc;
914 }
915
916 /// These two forms are equivalent:
917 /// sub %y, (xor %x, -1)
918 /// add (add %x, 1), %y
919 /// The variant with two add's is IR-canonical.
920 /// Some targets may prefer one to the other.
921 virtual bool preferIncOfAddToSubOfNot(EVT VT) const {
922 // By default, let's assume that everyone prefers the form with two add's.
923 return true;
924 }
925
926 // By default prefer folding (abs (sub nsw x, y)) -> abds(x, y). Some targets
927 // may want to avoid this to prevent loss of sub_nsw pattern.
928 virtual bool preferABDSToABSWithNSW(EVT VT) const {
929 return true;
930 }
931
932 // Return true if the target wants to transform Op(Splat(X)) -> Splat(Op(X))
933 virtual bool preferScalarizeSplat(SDNode *N) const { return true; }
934
935 // Return true if the target wants to transform:
936 // (TruncVT truncate(sext_in_reg(VT X, ExtVT))
937 // -> (TruncVT sext_in_reg(truncate(VT X), ExtVT))
938 // Some targets might prefer pre-sextinreg to improve truncation/saturation.
939 virtual bool preferSextInRegOfTruncate(EVT TruncVT, EVT VT, EVT ExtVT) const {
940 return true;
941 }
942
943 /// Return true if the target wants to use the optimization that
944 /// turns ext(promotableInst1(...(promotableInstN(load)))) into
945 /// promotedInst1(...(promotedInstN(ext(load)))).
947
948 /// Return true if the target can combine store(extractelement VectorTy,
949 /// Idx).
950 /// \p Cost[out] gives the cost of that transformation when this is true.
951 virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
952 unsigned &Cost) const {
953 return false;
954 }
955
956 /// Return true if the target shall perform extract vector element and store
957 /// given that the vector is known to be splat of constant.
958 /// \p Index[out] gives the index of the vector element to be extracted when
959 /// this is true.
961 Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const {
962 return false;
963 }
964
965 /// Return true if inserting a scalar into a variable element of an undef
966 /// vector is more efficiently handled by splatting the scalar instead.
967 virtual bool shouldSplatInsEltVarIndex(EVT) const {
968 return false;
969 }
970
971 /// Return true if target always benefits from combining into FMA for a
972 /// given value type. This must typically return false on targets where FMA
973 /// takes more cycles to execute than FADD.
974 virtual bool enableAggressiveFMAFusion(EVT VT) const { return false; }
975
976 /// Return true if target always benefits from combining into FMA for a
977 /// given value type. This must typically return false on targets where FMA
978 /// takes more cycles to execute than FADD.
979 virtual bool enableAggressiveFMAFusion(LLT Ty) const { return false; }
980
981 /// Return the ValueType of the result of SETCC operations.
982 virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
983 EVT VT) const;
984
985 /// Return the ValueType for comparison libcalls. Comparison libcalls include
986 /// floating point comparison calls, and Ordered/Unordered check calls on
987 /// floating point numbers.
988 virtual
990
991 /// For targets without i1 registers, this gives the nature of the high-bits
992 /// of boolean values held in types wider than i1.
993 ///
994 /// "Boolean values" are special true/false values produced by nodes like
995 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
996 /// Not to be confused with general values promoted from i1. Some cpus
997 /// distinguish between vectors of boolean and scalars; the isVec parameter
998 /// selects between the two kinds. For example on X86 a scalar boolean should
999 /// be zero extended from i1, while the elements of a vector of booleans
1000 /// should be sign extended from i1.
1001 ///
1002 /// Some cpus also treat floating point types the same way as they treat
1003 /// vectors instead of the way they treat scalars.
1004 BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
1005 if (isVec)
1006 return BooleanVectorContents;
1007 return isFloat ? BooleanFloatContents : BooleanContents;
1008 }
1009
1011 return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
1012 }
1013
1014 /// Promote the given target boolean to a target boolean of the given type.
1015 /// A target boolean is an integer value, not necessarily of type i1, the bits
1016 /// of which conform to getBooleanContents.
1017 ///
1018 /// ValVT is the type of values that produced the boolean.
1020 EVT ValVT) const {
1021 SDLoc dl(Bool);
1022 EVT BoolVT =
1023 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), ValVT);
1025 return DAG.getNode(ExtendCode, dl, BoolVT, Bool);
1026 }
1027
1028 /// Return target scheduling preference.
1030 return SchedPreferenceInfo;
1031 }
1032
1033 /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
1034 /// for different nodes. This function returns the preference (or none) for
1035 /// the given node.
1037 return Sched::None;
1038 }
1039
1040 /// Return the register class that should be used for the specified value
1041 /// type.
1042 virtual const TargetRegisterClass *getRegClassFor(MVT VT, bool isDivergent = false) const {
1043 (void)isDivergent;
1044 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
1045 assert(RC && "This value type is not natively supported!");
1046 return RC;
1047 }
1048
1049 /// Allows target to decide about the register class of the
1050 /// specific value that is live outside the defining block.
1051 /// Returns true if the value needs uniform register class.
1053 const Value *) const {
1054 return false;
1055 }
1056
1057 /// Return the 'representative' register class for the specified value
1058 /// type.
1059 ///
1060 /// The 'representative' register class is the largest legal super-reg
1061 /// register class for the register class of the value type. For example, on
1062 /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
1063 /// register class is GR64 on x86_64.
1064 virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
1065 const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
1066 return RC;
1067 }
1068
1069 /// Return the cost of the 'representative' register class for the specified
1070 /// value type.
1072 return RepRegClassCostForVT[VT.SimpleTy];
1073 }
1074
1075 /// Return the preferred strategy to legalize tihs SHIFT instruction, with
1076 /// \p ExpansionFactor being the recursion depth - how many expansion needed.
1081 };
1084 unsigned ExpansionFactor) const {
1085 if (ExpansionFactor == 1)
1088 }
1089
1090 /// Return true if the target has native support for the specified value type.
1091 /// This means that it has a register that directly holds it without
1092 /// promotions or expansions.
1093 bool isTypeLegal(EVT VT) const {
1094 assert(!VT.isSimple() ||
1095 (unsigned)VT.getSimpleVT().SimpleTy < std::size(RegClassForVT));
1096 return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
1097 }
1098
1100 /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
1101 /// that indicates how instruction selection should deal with the type.
1102 LegalizeTypeAction ValueTypeActions[MVT::VALUETYPE_SIZE];
1103
1104 public:
1106 std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions),
1107 TypeLegal);
1108 }
1109
1111 return ValueTypeActions[VT.SimpleTy];
1112 }
1113
1115 ValueTypeActions[VT.SimpleTy] = Action;
1116 }
1117 };
1118
1120 return ValueTypeActions;
1121 }
1122
1123 /// Return pair that represents the legalization kind (first) that needs to
1124 /// happen to EVT (second) in order to type-legalize it.
1125 ///
1126 /// First: how we should legalize values of this type, either it is already
1127 /// legal (return 'Legal') or we need to promote it to a larger type (return
1128 /// 'Promote'), or we need to expand it into multiple registers of smaller
1129 /// integer type (return 'Expand'). 'Custom' is not an option.
1130 ///
1131 /// Second: for types supported by the target, this is an identity function.
1132 /// For types that must be promoted to larger types, this returns the larger
1133 /// type to promote to. For integer types that are larger than the largest
1134 /// integer register, this contains one step in the expansion to get to the
1135 /// smaller register. For illegal floating point types, this returns the
1136 /// integer type to transform to.
1137 LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const;
1138
1139 /// Return how we should legalize values of this type, either it is already
1140 /// legal (return 'Legal') or we need to promote it to a larger type (return
1141 /// 'Promote'), or we need to expand it into multiple registers of smaller
1142 /// integer type (return 'Expand'). 'Custom' is not an option.
1144 return getTypeConversion(Context, VT).first;
1145 }
1147 return ValueTypeActions.getTypeAction(VT);
1148 }
1149
1150 /// For types supported by the target, this is an identity function. For
1151 /// types that must be promoted to larger types, this returns the larger type
1152 /// to promote to. For integer types that are larger than the largest integer
1153 /// register, this contains one step in the expansion to get to the smaller
1154 /// register. For illegal floating point types, this returns the integer type
1155 /// to transform to.
1156 virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
1157 return getTypeConversion(Context, VT).second;
1158 }
1159
1160 /// For types supported by the target, this is an identity function. For
1161 /// types that must be expanded (i.e. integer types that are larger than the
1162 /// largest integer register or illegal floating point types), this returns
1163 /// the largest legal type it will be expanded to.
1164 EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
1165 assert(!VT.isVector());
1166 while (true) {
1167 switch (getTypeAction(Context, VT)) {
1168 case TypeLegal:
1169 return VT;
1170 case TypeExpandInteger:
1171 VT = getTypeToTransformTo(Context, VT);
1172 break;
1173 default:
1174 llvm_unreachable("Type is not legal nor is it to be expanded!");
1175 }
1176 }
1177 }
1178
1179 /// Vector types are broken down into some number of legal first class types.
1180 /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
1181 /// promoted EVT::f64 values with the X86 FP stack. Similarly, EVT::v2i64
1182 /// turns into 4 EVT::i32 values with both PPC and X86.
1183 ///
1184 /// This method returns the number of registers needed, and the VT for each
1185 /// register. It also returns the VT and quantity of the intermediate values
1186 /// before they are promoted/expanded.
1187 unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
1188 EVT &IntermediateVT,
1189 unsigned &NumIntermediates,
1190 MVT &RegisterVT) const;
1191
1192 /// Certain targets such as MIPS require that some types such as vectors are
1193 /// always broken down into scalars in some contexts. This occurs even if the
1194 /// vector type is legal.
1196 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
1197 unsigned &NumIntermediates, MVT &RegisterVT) const {
1198 return getVectorTypeBreakdown(Context, VT, IntermediateVT, NumIntermediates,
1199 RegisterVT);
1200 }
1201
1203 unsigned opc = 0; // target opcode
1204 EVT memVT; // memory VT
1205
1206 // value representing memory location
1208
1209 // Fallback address space for use if ptrVal is nullptr. std::nullopt means
1210 // unknown address space.
1211 std::optional<unsigned> fallbackAddressSpace;
1212
1213 int offset = 0; // offset off of ptrVal
1214 uint64_t size = 0; // the size of the memory location
1215 // (taken from memVT if zero)
1216 MaybeAlign align = Align(1); // alignment
1217
1219 IntrinsicInfo() = default;
1220 };
1221
1222 /// Given an intrinsic, checks if on the target the intrinsic will need to map
1223 /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
1224 /// true and store the intrinsic information into the IntrinsicInfo that was
1225 /// passed to the function.
1228 unsigned /*Intrinsic*/) const {
1229 return false;
1230 }
1231
1232 /// Returns true if the target can instruction select the specified FP
1233 /// immediate natively. If false, the legalizer will materialize the FP
1234 /// immediate as a load from a constant pool.
1235 virtual bool isFPImmLegal(const APFloat & /*Imm*/, EVT /*VT*/,
1236 bool ForCodeSize = false) const {
1237 return false;
1238 }
1239
1240 /// Targets can use this to indicate that they only support *some*
1241 /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a
1242 /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
1243 /// legal.
1244 virtual bool isShuffleMaskLegal(ArrayRef<int> /*Mask*/, EVT /*VT*/) const {
1245 return true;
1246 }
1247
1248 /// Returns true if the operation can trap for the value type.
1249 ///
1250 /// VT must be a legal type. By default, we optimistically assume most
1251 /// operations don't trap except for integer divide and remainder.
1252 virtual bool canOpTrap(unsigned Op, EVT VT) const;
1253
1254 /// Similar to isShuffleMaskLegal. Targets can use this to indicate if there
1255 /// is a suitable VECTOR_SHUFFLE that can be used to replace a VAND with a
1256 /// constant pool entry.
1258 EVT /*VT*/) const {
1259 return false;
1260 }
1261
1262 /// How to legalize this custom operation?
1264 return Legal;
1265 }
1266
1267 /// Return how this operation should be treated: either it is legal, needs to
1268 /// be promoted to a larger size, needs to be expanded to some other code
1269 /// sequence, or the target has a custom expander for it.
1271 // If a target-specific SDNode requires legalization, require the target
1272 // to provide custom legalization for it.
1273 if (Op >= std::size(OpActions[0]))
1274 return Custom;
1275 if (VT.isExtended())
1276 return Expand;
1277 return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op];
1278 }
1279
1280 /// Custom method defined by each target to indicate if an operation which
1281 /// may require a scale is supported natively by the target.
1282 /// If not, the operation is illegal.
1283 virtual bool isSupportedFixedPointOperation(unsigned Op, EVT VT,
1284 unsigned Scale) const {
1285 return false;
1286 }
1287
1288 /// Some fixed point operations may be natively supported by the target but
1289 /// only for specific scales. This method allows for checking
1290 /// if the width is supported by the target for a given operation that may
1291 /// depend on scale.
1293 unsigned Scale) const {
1294 auto Action = getOperationAction(Op, VT);
1295 if (Action != Legal)
1296 return Action;
1297
1298 // This operation is supported in this type but may only work on specific
1299 // scales.
1300 bool Supported;
1301 switch (Op) {
1302 default:
1303 llvm_unreachable("Unexpected fixed point operation.");
1304 case ISD::SMULFIX:
1305 case ISD::SMULFIXSAT:
1306 case ISD::UMULFIX:
1307 case ISD::UMULFIXSAT:
1308 case ISD::SDIVFIX:
1309 case ISD::SDIVFIXSAT:
1310 case ISD::UDIVFIX:
1311 case ISD::UDIVFIXSAT:
1312 Supported = isSupportedFixedPointOperation(Op, VT, Scale);
1313 break;
1314 }
1315
1316 return Supported ? Action : Expand;
1317 }
1318
1319 // If Op is a strict floating-point operation, return the result
1320 // of getOperationAction for the equivalent non-strict operation.
1322 unsigned EqOpc;
1323 switch (Op) {
1324 default: llvm_unreachable("Unexpected FP pseudo-opcode");
1325#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1326 case ISD::STRICT_##DAGN: EqOpc = ISD::DAGN; break;
1327#define CMP_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
1328 case ISD::STRICT_##DAGN: EqOpc = ISD::SETCC; break;
1329#include "llvm/IR/ConstrainedOps.def"
1330 }
1331
1332 return getOperationAction(EqOpc, VT);
1333 }
1334
1335 /// Return true if the specified operation is legal on this target or can be
1336 /// made legal with custom lowering. This is used to help guide high-level
1337 /// lowering decisions. LegalOnly is an optional convenience for code paths
1338 /// traversed pre and post legalisation.
1340 bool LegalOnly = false) const {
1341 if (LegalOnly)
1342 return isOperationLegal(Op, VT);
1343
1344 return (VT == MVT::Other || isTypeLegal(VT)) &&
1345 (getOperationAction(Op, VT) == Legal ||
1346 getOperationAction(Op, VT) == Custom);
1347 }
1348
1349 /// Return true if the specified operation is legal on this target or can be
1350 /// made legal using promotion. This is used to help guide high-level lowering
1351 /// decisions. LegalOnly is an optional convenience for code paths traversed
1352 /// pre and post legalisation.
1354 bool LegalOnly = false) const {
1355 if (LegalOnly)
1356 return isOperationLegal(Op, VT);
1357
1358 return (VT == MVT::Other || isTypeLegal(VT)) &&
1359 (getOperationAction(Op, VT) == Legal ||
1360 getOperationAction(Op, VT) == Promote);
1361 }
1362
1363 /// Return true if the specified operation is legal on this target or can be
1364 /// made legal with custom lowering or using promotion. This is used to help
1365 /// guide high-level lowering decisions. LegalOnly is an optional convenience
1366 /// for code paths traversed pre and post legalisation.
1368 bool LegalOnly = false) const {
1369 if (LegalOnly)
1370 return isOperationLegal(Op, VT);
1371
1372 return (VT == MVT::Other || isTypeLegal(VT)) &&
1373 (getOperationAction(Op, VT) == Legal ||
1374 getOperationAction(Op, VT) == Custom ||
1375 getOperationAction(Op, VT) == Promote);
1376 }
1377
1378 /// Return true if the operation uses custom lowering, regardless of whether
1379 /// the type is legal or not.
1380 bool isOperationCustom(unsigned Op, EVT VT) const {
1381 return getOperationAction(Op, VT) == Custom;
1382 }
1383
1384 /// Return true if lowering to a jump table is allowed.
1385 virtual bool areJTsAllowed(const Function *Fn) const {
1386 if (Fn->getFnAttribute("no-jump-tables").getValueAsBool())
1387 return false;
1388
1389 return isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
1391 }
1392
1393 /// Check whether the range [Low,High] fits in a machine word.
1394 bool rangeFitsInWord(const APInt &Low, const APInt &High,
1395 const DataLayout &DL) const {
1396 // FIXME: Using the pointer type doesn't seem ideal.
1397 uint64_t BW = DL.getIndexSizeInBits(0u);
1398 uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1;
1399 return Range <= BW;
1400 }
1401
1402 /// Return true if lowering to a jump table is suitable for a set of case
1403 /// clusters which may contain \p NumCases cases, \p Range range of values.
1404 virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases,
1406 BlockFrequencyInfo *BFI) const;
1407
1408 /// Returns preferred type for switch condition.
1410 EVT ConditionVT) const;
1411
1412 /// Return true if lowering to a bit test is suitable for a set of case
1413 /// clusters which contains \p NumDests unique destinations, \p Low and
1414 /// \p High as its lowest and highest case values, and expects \p NumCmps
1415 /// case value comparisons. Check if the number of destinations, comparison
1416 /// metric, and range are all suitable.
1417 bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps,
1418 const APInt &Low, const APInt &High,
1419 const DataLayout &DL) const {
1420 // FIXME: I don't think NumCmps is the correct metric: a single case and a
1421 // range of cases both require only one branch to lower. Just looking at the
1422 // number of clusters and destinations should be enough to decide whether to
1423 // build bit tests.
1424
1425 // To lower a range with bit tests, the range must fit the bitwidth of a
1426 // machine word.
1427 if (!rangeFitsInWord(Low, High, DL))
1428 return false;
1429
1430 // Decide whether it's profitable to lower this range with bit tests. Each
1431 // destination requires a bit test and branch, and there is an overall range
1432 // check branch. For a small number of clusters, separate comparisons might
1433 // be cheaper, and for many destinations, splitting the range might be
1434 // better.
1435 return (NumDests == 1 && NumCmps >= 3) || (NumDests == 2 && NumCmps >= 5) ||
1436 (NumDests == 3 && NumCmps >= 6);
1437 }
1438
1439 /// Return true if the specified operation is illegal on this target or
1440 /// unlikely to be made legal with custom lowering. This is used to help guide
1441 /// high-level lowering decisions.
1442 bool isOperationExpand(unsigned Op, EVT VT) const {
1443 return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
1444 }
1445
1446 /// Return true if the specified operation is legal on this target.
1447 bool isOperationLegal(unsigned Op, EVT VT) const {
1448 return (VT == MVT::Other || isTypeLegal(VT)) &&
1449 getOperationAction(Op, VT) == Legal;
1450 }
1451
1452 /// Return how this load with extension should be treated: either it is legal,
1453 /// needs to be promoted to a larger size, needs to be expanded to some other
1454 /// code sequence, or the target has a custom expander for it.
1455 LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT,
1456 EVT MemVT) const {
1457 if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
1458 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
1459 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
1461 MemI < MVT::VALUETYPE_SIZE && "Table isn't big enough!");
1462 unsigned Shift = 4 * ExtType;
1463 return (LegalizeAction)((LoadExtActions[ValI][MemI] >> Shift) & 0xf);
1464 }
1465
1466 /// Return true if the specified load with extension is legal on this target.
1467 bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
1468 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal;
1469 }
1470
1471 /// Return true if the specified load with extension is legal or custom
1472 /// on this target.
1473 bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const {
1474 return getLoadExtAction(ExtType, ValVT, MemVT) == Legal ||
1475 getLoadExtAction(ExtType, ValVT, MemVT) == Custom;
1476 }
1477
1478 /// Same as getLoadExtAction, but for atomic loads.
1480 EVT MemVT) const {
1481 if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
1482 unsigned ValI = (unsigned)ValVT.getSimpleVT().SimpleTy;
1483 unsigned MemI = (unsigned)MemVT.getSimpleVT().SimpleTy;
1485 MemI < MVT::VALUETYPE_SIZE && "Table isn't big enough!");
1486 unsigned Shift = 4 * ExtType;
1487 LegalizeAction Action =
1488 (LegalizeAction)((AtomicLoadExtActions[ValI][MemI] >> Shift) & 0xf);
1489 assert((Action == Legal || Action == Expand) &&
1490 "Unsupported atomic load extension action.");
1491 return Action;
1492 }
1493
1494 /// Return true if the specified atomic load with extension is legal on
1495 /// this target.
1496 bool isAtomicLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
1497 return getAtomicLoadExtAction(ExtType, ValVT, MemVT) == Legal;
1498 }
1499
1500 /// Return how this store with truncation should be treated: either it is
1501 /// legal, needs to be promoted to a larger size, needs to be expanded to some
1502 /// other code sequence, or the target has a custom expander for it.
1504 if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
1505 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
1506 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
1508 "Table isn't big enough!");
1509 return TruncStoreActions[ValI][MemI];
1510 }
1511
1512 /// Return true if the specified store with truncation is legal on this
1513 /// target.
1514 bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
1515 return isTypeLegal(ValVT) && getTruncStoreAction(ValVT, MemVT) == Legal;
1516 }
1517
1518 /// Return true if the specified store with truncation has solution on this
1519 /// target.
1520 bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT) const {
1521 return isTypeLegal(ValVT) &&
1522 (getTruncStoreAction(ValVT, MemVT) == Legal ||
1523 getTruncStoreAction(ValVT, MemVT) == Custom);
1524 }
1525
1526 virtual bool canCombineTruncStore(EVT ValVT, EVT MemVT,
1527 bool LegalOnly) const {
1528 if (LegalOnly)
1529 return isTruncStoreLegal(ValVT, MemVT);
1530
1531 return isTruncStoreLegalOrCustom(ValVT, MemVT);
1532 }
1533
1534 /// Return how the indexed load should be treated: either it is legal, needs
1535 /// to be promoted to a larger size, needs to be expanded to some other code
1536 /// sequence, or the target has a custom expander for it.
1537 LegalizeAction getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
1538 return getIndexedModeAction(IdxMode, VT, IMAB_Load);
1539 }
1540
1541 /// Return true if the specified indexed load is legal on this target.
1542 bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
1543 return VT.isSimple() &&
1544 (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
1545 getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
1546 }
1547
1548 /// Return how the indexed store should be treated: either it is legal, needs
1549 /// to be promoted to a larger size, needs to be expanded to some other code
1550 /// sequence, or the target has a custom expander for it.
1551 LegalizeAction getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
1552 return getIndexedModeAction(IdxMode, VT, IMAB_Store);
1553 }
1554
1555 /// Return true if the specified indexed load is legal on this target.
1556 bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
1557 return VT.isSimple() &&
1558 (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
1559 getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
1560 }
1561
1562 /// Return how the indexed load should be treated: either it is legal, needs
1563 /// to be promoted to a larger size, needs to be expanded to some other code
1564 /// sequence, or the target has a custom expander for it.
1565 LegalizeAction getIndexedMaskedLoadAction(unsigned IdxMode, MVT VT) const {
1566 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad);
1567 }
1568
1569 /// Return true if the specified indexed load is legal on this target.
1570 bool isIndexedMaskedLoadLegal(unsigned IdxMode, EVT VT) const {
1571 return VT.isSimple() &&
1572 (getIndexedMaskedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
1574 }
1575
1576 /// Return how the indexed store should be treated: either it is legal, needs
1577 /// to be promoted to a larger size, needs to be expanded to some other code
1578 /// sequence, or the target has a custom expander for it.
1579 LegalizeAction getIndexedMaskedStoreAction(unsigned IdxMode, MVT VT) const {
1580 return getIndexedModeAction(IdxMode, VT, IMAB_MaskedStore);
1581 }
1582
1583 /// Return true if the specified indexed load is legal on this target.
1584 bool isIndexedMaskedStoreLegal(unsigned IdxMode, EVT VT) const {
1585 return VT.isSimple() &&
1586 (getIndexedMaskedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
1588 }
1589
1590 /// Returns true if the index type for a masked gather/scatter requires
1591 /// extending
1592 virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const { return false; }
1593
1594 // Returns true if Extend can be folded into the index of a masked gathers/scatters
1595 // on this target.
1596 virtual bool shouldRemoveExtendFromGSIndex(SDValue Extend, EVT DataVT) const {
1597 return false;
1598 }
1599
1600 // Return true if the target supports a scatter/gather instruction with
1601 // indices which are scaled by the particular value. Note that all targets
1602 // must by definition support scale of 1.
1604 uint64_t ElemSize) const {
1605 // MGATHER/MSCATTER are only required to support scaling by one or by the
1606 // element size.
1607 if (Scale != ElemSize && Scale != 1)
1608 return false;
1609 return true;
1610 }
1611
1612 /// Return how the condition code should be treated: either it is legal, needs
1613 /// to be expanded to some other code sequence, or the target has a custom
1614 /// expander for it.
1617 assert((unsigned)CC < std::size(CondCodeActions) &&
1618 ((unsigned)VT.SimpleTy >> 3) < std::size(CondCodeActions[0]) &&
1619 "Table isn't big enough!");
1620 // See setCondCodeAction for how this is encoded.
1621 uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
1622 uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 3];
1623 LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0xF);
1624 assert(Action != Promote && "Can't promote condition code!");
1625 return Action;
1626 }
1627
1628 /// Return true if the specified condition code is legal for a comparison of
1629 /// the specified types on this target.
1631 return getCondCodeAction(CC, VT) == Legal;
1632 }
1633
1634 /// Return true if the specified condition code is legal or custom for a
1635 /// comparison of the specified types on this target.
1637 return getCondCodeAction(CC, VT) == Legal ||
1638 getCondCodeAction(CC, VT) == Custom;
1639 }
1640
1641 /// If the action for this operation is to promote, this method returns the
1642 /// ValueType to promote to.
1643 MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
1645 "This operation isn't promoted!");
1646
1647 // See if this has an explicit type specified.
1648 std::map<std::pair<unsigned, MVT::SimpleValueType>,
1650 PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
1651 if (PTTI != PromoteToType.end()) return PTTI->second;
1652
1653 assert((VT.isInteger() || VT.isFloatingPoint()) &&
1654 "Cannot autopromote this type, add it with AddPromotedToType.");
1655
1656 uint64_t VTBits = VT.getScalarSizeInBits();
1657 MVT NVT = VT;
1658 do {
1659 NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
1660 assert(NVT.isInteger() == VT.isInteger() &&
1661 NVT.isFloatingPoint() == VT.isFloatingPoint() &&
1662 "Didn't find type to promote to!");
1663 } while (VTBits >= NVT.getScalarSizeInBits() || !isTypeLegal(NVT) ||
1664 getOperationAction(Op, NVT) == Promote);
1665 return NVT;
1666 }
1667
1669 bool AllowUnknown = false) const {
1670 return getValueType(DL, Ty, AllowUnknown);
1671 }
1672
1673 /// Return the EVT corresponding to this LLVM type. This is fixed by the LLVM
1674 /// operations except for the pointer size. If AllowUnknown is true, this
1675 /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
1676 /// otherwise it will assert.
1678 bool AllowUnknown = false) const {
1679 // Lower scalar pointers to native pointer types.
1680 if (auto *PTy = dyn_cast<PointerType>(Ty))
1681 return getPointerTy(DL, PTy->getAddressSpace());
1682
1683 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
1684 Type *EltTy = VTy->getElementType();
1685 // Lower vectors of pointers to native pointer types.
1686 if (auto *PTy = dyn_cast<PointerType>(EltTy)) {
1687 EVT PointerTy(getPointerTy(DL, PTy->getAddressSpace()));
1688 EltTy = PointerTy.getTypeForEVT(Ty->getContext());
1689 }
1690 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(EltTy, false),
1691 VTy->getElementCount());
1692 }
1693
1694 return EVT::getEVT(Ty, AllowUnknown);
1695 }
1696
1698 bool AllowUnknown = false) const {
1699 // Lower scalar pointers to native pointer types.
1700 if (auto *PTy = dyn_cast<PointerType>(Ty))
1701 return getPointerMemTy(DL, PTy->getAddressSpace());
1702
1703 if (auto *VTy = dyn_cast<VectorType>(Ty)) {
1704 Type *EltTy = VTy->getElementType();
1705 if (auto *PTy = dyn_cast<PointerType>(EltTy)) {
1706 EVT PointerTy(getPointerMemTy(DL, PTy->getAddressSpace()));
1707 EltTy = PointerTy.getTypeForEVT(Ty->getContext());
1708 }
1709 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(EltTy, false),
1710 VTy->getElementCount());
1711 }
1712
1713 return getValueType(DL, Ty, AllowUnknown);
1714 }
1715
1716
1717 /// Return the MVT corresponding to this LLVM type. See getValueType.
1719 bool AllowUnknown = false) const {
1720 return getValueType(DL, Ty, AllowUnknown).getSimpleVT();
1721 }
1722
1723 /// Returns the desired alignment for ByVal or InAlloca aggregate function
1724 /// arguments in the caller parameter area.
1725 virtual Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const;
1726
1727 /// Return the type of registers that this ValueType will eventually require.
1729 assert((unsigned)VT.SimpleTy < std::size(RegisterTypeForVT));
1730 return RegisterTypeForVT[VT.SimpleTy];
1731 }
1732
1733 /// Return the type of registers that this ValueType will eventually require.
1734 MVT getRegisterType(LLVMContext &Context, EVT VT) const {
1735 if (VT.isSimple())
1736 return getRegisterType(VT.getSimpleVT());
1737 if (VT.isVector()) {
1738 EVT VT1;
1739 MVT RegisterVT;
1740 unsigned NumIntermediates;
1741 (void)getVectorTypeBreakdown(Context, VT, VT1,
1742 NumIntermediates, RegisterVT);
1743 return RegisterVT;
1744 }
1745 if (VT.isInteger()) {
1746 return getRegisterType(Context, getTypeToTransformTo(Context, VT));
1747 }
1748 llvm_unreachable("Unsupported extended type!");
1749 }
1750
1751 /// Return the number of registers that this ValueType will eventually
1752 /// require.
1753 ///
1754 /// This is one for any types promoted to live in larger registers, but may be
1755 /// more than one for types (like i64) that are split into pieces. For types
1756 /// like i140, which are first promoted then expanded, it is the number of
1757 /// registers needed to hold all the bits of the original type. For an i140
1758 /// on a 32 bit machine this means 5 registers.
1759 ///
1760 /// RegisterVT may be passed as a way to override the default settings, for
1761 /// instance with i128 inline assembly operands on SystemZ.
1762 virtual unsigned
1764 std::optional<MVT> RegisterVT = std::nullopt) const {
1765 if (VT.isSimple()) {
1766 assert((unsigned)VT.getSimpleVT().SimpleTy <
1767 std::size(NumRegistersForVT));
1768 return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
1769 }
1770 if (VT.isVector()) {
1771 EVT VT1;
1772 MVT VT2;
1773 unsigned NumIntermediates;
1774 return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
1775 }
1776 if (VT.isInteger()) {
1777 unsigned BitWidth = VT.getSizeInBits();
1778 unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
1779 return (BitWidth + RegWidth - 1) / RegWidth;
1780 }
1781 llvm_unreachable("Unsupported extended type!");
1782 }
1783
1784 /// Certain combinations of ABIs, Targets and features require that types
1785 /// are legal for some operations and not for other operations.
1786 /// For MIPS all vector types must be passed through the integer register set.
1788 CallingConv::ID CC, EVT VT) const {
1789 return getRegisterType(Context, VT);
1790 }
1791
1792 /// Certain targets require unusual breakdowns of certain types. For MIPS,
1793 /// this occurs when a vector type is used, as vector are passed through the
1794 /// integer register set.
1797 EVT VT) const {
1798 return getNumRegisters(Context, VT);
1799 }
1800
1801 /// Certain targets have context sensitive alignment requirements, where one
1802 /// type has the alignment requirement of another type.
1804 const DataLayout &DL) const {
1805 return DL.getABITypeAlign(ArgTy);
1806 }
1807
1808 /// If true, then instruction selection should seek to shrink the FP constant
1809 /// of the specified type to a smaller type in order to save space and / or
1810 /// reduce runtime.
1811 virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
1812
1813 /// Return true if it is profitable to reduce a load to a smaller type.
1814 /// Example: (i16 (trunc (i32 (load x))) -> i16 load x
1816 EVT NewVT) const {
1817 // By default, assume that it is cheaper to extract a subvector from a wide
1818 // vector load rather than creating multiple narrow vector loads.
1819 if (NewVT.isVector() && !Load->hasOneUse())
1820 return false;
1821
1822 return true;
1823 }
1824
1825 /// Return true (the default) if it is profitable to remove a sext_inreg(x)
1826 /// where the sext is redundant, and use x directly.
1827 virtual bool shouldRemoveRedundantExtend(SDValue Op) const { return true; }
1828
1829 /// Indicates if any padding is guaranteed to go at the most significant bits
1830 /// when storing the type to memory and the type size isn't equal to the store
1831 /// size.
1833 return VT.isScalarInteger() && !VT.isByteSized();
1834 }
1835
1836 /// When splitting a value of the specified type into parts, does the Lo
1837 /// or Hi part come first? This usually follows the endianness, except
1838 /// for ppcf128, where the Hi part always comes first.
1840 return DL.isBigEndian() || VT == MVT::ppcf128;
1841 }
1842
1843 /// If true, the target has custom DAG combine transformations that it can
1844 /// perform for the specified node.
1846 assert(unsigned(NT >> 3) < std::size(TargetDAGCombineArray));
1847 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
1848 }
1849
1852 }
1853
1854 /// Returns the size of the platform's va_list object.
1855 virtual unsigned getVaListSizeInBits(const DataLayout &DL) const {
1856 return getPointerTy(DL).getSizeInBits();
1857 }
1858
1859 /// Get maximum # of store operations permitted for llvm.memset
1860 ///
1861 /// This function returns the maximum number of store operations permitted
1862 /// to replace a call to llvm.memset. The value is set by the target at the
1863 /// performance threshold for such a replacement. If OptSize is true,
1864 /// return the limit for functions that have OptSize attribute.
1865 unsigned getMaxStoresPerMemset(bool OptSize) const {
1867 }
1868
1869 /// Get maximum # of store operations permitted for llvm.memcpy
1870 ///
1871 /// This function returns the maximum number of store operations permitted
1872 /// to replace a call to llvm.memcpy. The value is set by the target at the
1873 /// performance threshold for such a replacement. If OptSize is true,
1874 /// return the limit for functions that have OptSize attribute.
1875 unsigned getMaxStoresPerMemcpy(bool OptSize) const {
1877 }
1878
1879 /// \brief Get maximum # of store operations to be glued together
1880 ///
1881 /// This function returns the maximum number of store operations permitted
1882 /// to glue together during lowering of llvm.memcpy. The value is set by
1883 // the target at the performance threshold for such a replacement.
1884 virtual unsigned getMaxGluedStoresPerMemcpy() const {
1886 }
1887
1888 /// Get maximum # of load operations permitted for memcmp
1889 ///
1890 /// This function returns the maximum number of load operations permitted
1891 /// to replace a call to memcmp. The value is set by the target at the
1892 /// performance threshold for such a replacement. If OptSize is true,
1893 /// return the limit for functions that have OptSize attribute.
1894 unsigned getMaxExpandSizeMemcmp(bool OptSize) const {
1896 }
1897
1898 /// Get maximum # of store operations permitted for llvm.memmove
1899 ///
1900 /// This function returns the maximum number of store operations permitted
1901 /// to replace a call to llvm.memmove. The value is set by the target at the
1902 /// performance threshold for such a replacement. If OptSize is true,
1903 /// return the limit for functions that have OptSize attribute.
1904 unsigned getMaxStoresPerMemmove(bool OptSize) const {
1906 }
1907
1908 /// Determine if the target supports unaligned memory accesses.
1909 ///
1910 /// This function returns true if the target allows unaligned memory accesses
1911 /// of the specified type in the given address space. If true, it also returns
1912 /// a relative speed of the unaligned memory access in the last argument by
1913 /// reference. The higher the speed number the faster the operation comparing
1914 /// to a number returned by another such call. This is used, for example, in
1915 /// situations where an array copy/move/set is converted to a sequence of
1916 /// store operations. Its use helps to ensure that such replacements don't
1917 /// generate code that causes an alignment error (trap) on the target machine.
1919 EVT, unsigned AddrSpace = 0, Align Alignment = Align(1),
1921 unsigned * /*Fast*/ = nullptr) const {
1922 return false;
1923 }
1924
1925 /// LLT handling variant.
1927 LLT, unsigned AddrSpace = 0, Align Alignment = Align(1),
1929 unsigned * /*Fast*/ = nullptr) const {
1930 return false;
1931 }
1932
1933 /// This function returns true if the memory access is aligned or if the
1934 /// target allows this specific unaligned memory access. If the access is
1935 /// allowed, the optional final parameter returns a relative speed of the
1936 /// access (as defined by the target).
1938 LLVMContext &Context, const DataLayout &DL, EVT VT,
1939 unsigned AddrSpace = 0, Align Alignment = Align(1),
1941 unsigned *Fast = nullptr) const;
1942
1943 /// Return true if the memory access of this type is aligned or if the target
1944 /// allows this specific unaligned access for the given MachineMemOperand.
1945 /// If the access is allowed, the optional final parameter returns a relative
1946 /// speed of the access (as defined by the target).
1948 const DataLayout &DL, EVT VT,
1949 const MachineMemOperand &MMO,
1950 unsigned *Fast = nullptr) const;
1951
1952 /// Return true if the target supports a memory access of this type for the
1953 /// given address space and alignment. If the access is allowed, the optional
1954 /// final parameter returns the relative speed of the access (as defined by
1955 /// the target).
1956 virtual bool
1957 allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
1958 unsigned AddrSpace = 0, Align Alignment = Align(1),
1960 unsigned *Fast = nullptr) const;
1961
1962 /// Return true if the target supports a memory access of this type for the
1963 /// given MachineMemOperand. If the access is allowed, the optional
1964 /// final parameter returns the relative access speed (as defined by the
1965 /// target).
1966 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
1967 const MachineMemOperand &MMO,
1968 unsigned *Fast = nullptr) const;
1969
1970 /// LLT handling variant.
1971 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, LLT Ty,
1972 const MachineMemOperand &MMO,
1973 unsigned *Fast = nullptr) const;
1974
1975 /// Returns the target specific optimal type for load and store operations as
1976 /// a result of memset, memcpy, and memmove lowering.
1977 /// It returns EVT::Other if the type should be determined using generic
1978 /// target-independent logic.
1979 virtual EVT
1981 const AttributeList & /*FuncAttributes*/) const {
1982 return MVT::Other;
1983 }
1984
1985 /// LLT returning variant.
1986 virtual LLT
1988 const AttributeList & /*FuncAttributes*/) const {
1989 return LLT();
1990 }
1991
1992 /// Returns true if it's safe to use load / store of the specified type to
1993 /// expand memcpy / memset inline.
1994 ///
1995 /// This is mostly true for all types except for some special cases. For
1996 /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
1997 /// fstpl which also does type conversion. Note the specified type doesn't
1998 /// have to be legal as the hook is used before type legalization.
1999 virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
2000
2001 /// Return lower limit for number of blocks in a jump table.
2002 virtual unsigned getMinimumJumpTableEntries() const;
2003
2004 /// Return lower limit of the density in a jump table.
2005 unsigned getMinimumJumpTableDensity(bool OptForSize) const;
2006
2007 /// Return upper limit for number of entries in a jump table.
2008 /// Zero if no limit.
2009 unsigned getMaximumJumpTableSize() const;
2010
2011 virtual bool isJumpTableRelative() const;
2012
2013 /// If a physical register, this specifies the register that
2014 /// llvm.savestack/llvm.restorestack should save and restore.
2016 return StackPointerRegisterToSaveRestore;
2017 }
2018
2019 /// If a physical register, this returns the register that receives the
2020 /// exception address on entry to an EH pad.
2021 virtual Register
2022 getExceptionPointerRegister(const Constant *PersonalityFn) const {
2023 return Register();
2024 }
2025
2026 /// If a physical register, this returns the register that receives the
2027 /// exception typeid on entry to a landing pad.
2028 virtual Register
2029 getExceptionSelectorRegister(const Constant *PersonalityFn) const {
2030 return Register();
2031 }
2032
2033 virtual bool needsFixedCatchObjects() const {
2034 report_fatal_error("Funclet EH is not implemented for this target");
2035 }
2036
2037 /// Return the minimum stack alignment of an argument.
2039 return MinStackArgumentAlignment;
2040 }
2041
2042 /// Return the minimum function alignment.
2043 Align getMinFunctionAlignment() const { return MinFunctionAlignment; }
2044
2045 /// Return the preferred function alignment.
2046 Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; }
2047
2048 /// Return the preferred loop alignment.
2049 virtual Align getPrefLoopAlignment(MachineLoop *ML = nullptr) const;
2050
2051 /// Return the maximum amount of bytes allowed to be emitted when padding for
2052 /// alignment
2053 virtual unsigned
2055
2056 /// Should loops be aligned even when the function is marked OptSize (but not
2057 /// MinSize).
2058 virtual bool alignLoopsWithOptSize() const { return false; }
2059
2060 /// If the target has a standard location for the stack protector guard,
2061 /// returns the address of that location. Otherwise, returns nullptr.
2062 /// DEPRECATED: please override useLoadStackGuardNode and customize
2063 /// LOAD_STACK_GUARD, or customize \@llvm.stackguard().
2064 virtual Value *getIRStackGuard(IRBuilderBase &IRB) const;
2065
2066 /// Inserts necessary declarations for SSP (stack protection) purpose.
2067 /// Should be used only when getIRStackGuard returns nullptr.
2068 virtual void insertSSPDeclarations(Module &M) const;
2069
2070 /// Return the variable that's previously inserted by insertSSPDeclarations,
2071 /// if any, otherwise return nullptr. Should be used only when
2072 /// getIRStackGuard returns nullptr.
2073 virtual Value *getSDagStackGuard(const Module &M) const;
2074
2075 /// If this function returns true, stack protection checks should XOR the
2076 /// frame pointer (or whichever pointer is used to address locals) into the
2077 /// stack guard value before checking it. getIRStackGuard must return nullptr
2078 /// if this returns true.
2079 virtual bool useStackGuardXorFP() const { return false; }
2080
2081 /// If the target has a standard stack protection check function that
2082 /// performs validation and error handling, returns the function. Otherwise,
2083 /// returns nullptr. Must be previously inserted by insertSSPDeclarations.
2084 /// Should be used only when getIRStackGuard returns nullptr.
2085 virtual Function *getSSPStackGuardCheck(const Module &M) const;
2086
2087protected:
2089 bool UseTLS) const;
2090
2091public:
2092 /// Returns the target-specific address of the unsafe stack pointer.
2093 virtual Value *getSafeStackPointerLocation(IRBuilderBase &IRB) const;
2094
2095 /// Returns the name of the symbol used to emit stack probes or the empty
2096 /// string if not applicable.
2097 virtual bool hasStackProbeSymbol(const MachineFunction &MF) const { return false; }
2098
2099 virtual bool hasInlineStackProbe(const MachineFunction &MF) const { return false; }
2100
2102 return "";
2103 }
2104
2105 /// Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g. we
2106 /// are happy to sink it into basic blocks. A cast may be free, but not
2107 /// necessarily a no-op. e.g. a free truncate from a 64-bit to 32-bit pointer.
2108 virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const;
2109
2110 /// Return true if the pointer arguments to CI should be aligned by aligning
2111 /// the object whose address is being passed. If so then MinSize is set to the
2112 /// minimum size the object must be to be aligned and PrefAlign is set to the
2113 /// preferred alignment.
2114 virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/,
2115 Align & /*PrefAlign*/) const {
2116 return false;
2117 }
2118
2119 //===--------------------------------------------------------------------===//
2120 /// \name Helpers for TargetTransformInfo implementations
2121 /// @{
2122
2123 /// Get the ISD node that corresponds to the Instruction class opcode.
2124 int InstructionOpcodeToISD(unsigned Opcode) const;
2125
2126 /// @}
2127
2128 //===--------------------------------------------------------------------===//
2129 /// \name Helpers for atomic expansion.
2130 /// @{
2131
2132 /// Returns the maximum atomic operation size (in bits) supported by
2133 /// the backend. Atomic operations greater than this size (as well
2134 /// as ones that are not naturally aligned), will be expanded by
2135 /// AtomicExpandPass into an __atomic_* library call.
2137 return MaxAtomicSizeInBitsSupported;
2138 }
2139
2140 /// Returns the size in bits of the maximum div/rem the backend supports.
2141 /// Larger operations will be expanded by ExpandLargeDivRem.
2143 return MaxDivRemBitWidthSupported;
2144 }
2145
2146 /// Returns the size in bits of the maximum larget fp convert the backend
2147 /// supports. Larger operations will be expanded by ExpandLargeFPConvert.
2149 return MaxLargeFPConvertBitWidthSupported;
2150 }
2151
2152 /// Returns the size of the smallest cmpxchg or ll/sc instruction
2153 /// the backend supports. Any smaller operations are widened in
2154 /// AtomicExpandPass.
2155 ///
2156 /// Note that *unlike* operations above the maximum size, atomic ops
2157 /// are still natively supported below the minimum; they just
2158 /// require a more complex expansion.
2159 unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; }
2160
2161 /// Whether the target supports unaligned atomic operations.
2162 bool supportsUnalignedAtomics() const { return SupportsUnalignedAtomics; }
2163
2164 /// Whether AtomicExpandPass should automatically insert fences and reduce
2165 /// ordering for this atomic. This should be true for most architectures with
2166 /// weak memory ordering. Defaults to false.
2167 virtual bool shouldInsertFencesForAtomic(const Instruction *I) const {
2168 return false;
2169 }
2170
2171 /// Whether AtomicExpandPass should automatically insert a trailing fence
2172 /// without reducing the ordering for this atomic. Defaults to false.
2173 virtual bool
2175 return false;
2176 }
2177
2178 /// Perform a load-linked operation on Addr, returning a "Value *" with the
2179 /// corresponding pointee type. This may entail some non-trivial operations to
2180 /// truncate or reconstruct types that will be illegal in the backend. See
2181 /// ARMISelLowering for an example implementation.
2182 virtual Value *emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy,
2183 Value *Addr, AtomicOrdering Ord) const {
2184 llvm_unreachable("Load linked unimplemented on this target");
2185 }
2186
2187 /// Perform a store-conditional operation to Addr. Return the status of the
2188 /// store. This should be 0 if the store succeeded, non-zero otherwise.
2190 Value *Addr, AtomicOrdering Ord) const {
2191 llvm_unreachable("Store conditional unimplemented on this target");
2192 }
2193
2194 /// Perform a masked atomicrmw using a target-specific intrinsic. This
2195 /// represents the core LL/SC loop which will be lowered at a late stage by
2196 /// the backend. The target-specific intrinsic returns the loaded value and
2197 /// is not responsible for masking and shifting the result.
2199 AtomicRMWInst *AI,
2200 Value *AlignedAddr, Value *Incr,
2201 Value *Mask, Value *ShiftAmt,
2202 AtomicOrdering Ord) const {
2203 llvm_unreachable("Masked atomicrmw expansion unimplemented on this target");
2204 }
2205
2206 /// Perform a atomicrmw expansion using a target-specific way. This is
2207 /// expected to be called when masked atomicrmw and bit test atomicrmw don't
2208 /// work, and the target supports another way to lower atomicrmw.
2209 virtual void emitExpandAtomicRMW(AtomicRMWInst *AI) const {
2211 "Generic atomicrmw expansion unimplemented on this target");
2212 }
2213
2214 /// Perform a cmpxchg expansion using a target-specific method.
2216 llvm_unreachable("Generic cmpxchg expansion unimplemented on this target");
2217 }
2218
2219 /// Perform a bit test atomicrmw using a target-specific intrinsic. This
2220 /// represents the combined bit test intrinsic which will be lowered at a late
2221 /// stage by the backend.
2224 "Bit test atomicrmw expansion unimplemented on this target");
2225 }
2226
2227 /// Perform a atomicrmw which the result is only used by comparison, using a
2228 /// target-specific intrinsic. This represents the combined atomic and compare
2229 /// intrinsic which will be lowered at a late stage by the backend.
2232 "Compare arith atomicrmw expansion unimplemented on this target");
2233 }
2234
2235 /// Perform a masked cmpxchg using a target-specific intrinsic. This
2236 /// represents the core LL/SC loop which will be lowered at a late stage by
2237 /// the backend. The target-specific intrinsic returns the loaded value and
2238 /// is not responsible for masking and shifting the result.
2240 IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
2241 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
2242 llvm_unreachable("Masked cmpxchg expansion unimplemented on this target");
2243 }
2244
2245 //===--------------------------------------------------------------------===//
2246 /// \name KCFI check lowering.
2247 /// @{
2248
2251 const TargetInstrInfo *TII) const {
2252 llvm_unreachable("KCFI is not supported on this target");
2253 }
2254
2255 /// @}
2256
2257 /// Inserts in the IR a target-specific intrinsic specifying a fence.
2258 /// It is called by AtomicExpandPass before expanding an
2259 /// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad
2260 /// if shouldInsertFencesForAtomic returns true.
2261 ///
2262 /// Inst is the original atomic instruction, prior to other expansions that
2263 /// may be performed.
2264 ///
2265 /// This function should either return a nullptr, or a pointer to an IR-level
2266 /// Instruction*. Even complex fence sequences can be represented by a
2267 /// single Instruction* through an intrinsic to be lowered later.
2268 ///
2269 /// The default implementation emits an IR fence before any release (or
2270 /// stronger) operation that stores, and after any acquire (or stronger)
2271 /// operation. This is generally a correct implementation, but backends may
2272 /// override if they wish to use alternative schemes (e.g. the PowerPC
2273 /// standard ABI uses a fence before a seq_cst load instead of after a
2274 /// seq_cst store).
2275 /// @{
2276 virtual Instruction *emitLeadingFence(IRBuilderBase &Builder,
2277 Instruction *Inst,
2278 AtomicOrdering Ord) const;
2279
2281 Instruction *Inst,
2282 AtomicOrdering Ord) const;
2283 /// @}
2284
2285 // Emits code that executes when the comparison result in the ll/sc
2286 // expansion of a cmpxchg instruction is such that the store-conditional will
2287 // not execute. This makes it possible to balance out the load-linked with
2288 // a dedicated instruction, if desired.
2289 // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would
2290 // be unnecessarily held, except if clrex, inserted by this hook, is executed.
2291 virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const {}
2292
2293 /// Returns true if arguments should be sign-extended in lib calls.
2294 virtual bool shouldSignExtendTypeInLibCall(Type *Ty, bool IsSigned) const {
2295 return IsSigned;
2296 }
2297
2298 /// Returns true if arguments should be extended in lib calls.
2299 virtual bool shouldExtendTypeInLibCall(EVT Type) const {
2300 return true;
2301 }
2302
2303 /// Returns how the given (atomic) load should be expanded by the
2304 /// IR-level AtomicExpand pass.
2307 }
2308
2309 /// Returns how the given (atomic) load should be cast by the IR-level
2310 /// AtomicExpand pass.
2312 if (LI->getType()->isFloatingPointTy())
2315 }
2316
2317 /// Returns how the given (atomic) store should be expanded by the IR-level
2318 /// AtomicExpand pass into. For instance AtomicExpansionKind::Expand will try
2319 /// to use an atomicrmw xchg.
2322 }
2323
2324 /// Returns how the given (atomic) store should be cast by the IR-level
2325 /// AtomicExpand pass into. For instance AtomicExpansionKind::CastToInteger
2326 /// will try to cast the operands to integer values.
2328 if (SI->getValueOperand()->getType()->isFloatingPointTy())
2331 }
2332
2333 /// Returns how the given atomic cmpxchg should be expanded by the IR-level
2334 /// AtomicExpand pass.
2335 virtual AtomicExpansionKind
2338 }
2339
2340 /// Returns how the IR-level AtomicExpand pass should expand the given
2341 /// AtomicRMW, if at all. Default is to never expand.
2343 return RMW->isFloatingPointOperation() ?
2345 }
2346
2347 /// Returns how the given atomic atomicrmw should be cast by the IR-level
2348 /// AtomicExpand pass.
2349 virtual AtomicExpansionKind
2351 if (RMWI->getOperation() == AtomicRMWInst::Xchg &&
2352 (RMWI->getValOperand()->getType()->isFloatingPointTy() ||
2353 RMWI->getValOperand()->getType()->isPointerTy()))
2355
2357 }
2358
2359 /// On some platforms, an AtomicRMW that never actually modifies the value
2360 /// (such as fetch_add of 0) can be turned into a fence followed by an
2361 /// atomic load. This may sound useless, but it makes it possible for the
2362 /// processor to keep the cacheline shared, dramatically improving
2363 /// performance. And such idempotent RMWs are useful for implementing some
2364 /// kinds of locks, see for example (justification + benchmarks):
2365 /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
2366 /// This method tries doing that transformation, returning the atomic load if
2367 /// it succeeds, and nullptr otherwise.
2368 /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
2369 /// another round of expansion.
2370 virtual LoadInst *
2372 return nullptr;
2373 }
2374
2375 /// Returns how the platform's atomic operations are extended (ZERO_EXTEND,
2376 /// SIGN_EXTEND, or ANY_EXTEND).
2378 return ISD::ZERO_EXTEND;
2379 }
2380
2381 /// Returns how the platform's atomic compare and swap expects its comparison
2382 /// value to be extended (ZERO_EXTEND, SIGN_EXTEND, or ANY_EXTEND). This is
2383 /// separate from getExtendForAtomicOps, which is concerned with the
2384 /// sign-extension of the instruction's output, whereas here we are concerned
2385 /// with the sign-extension of the input. For targets with compare-and-swap
2386 /// instructions (or sub-word comparisons in their LL/SC loop expansions),
2387 /// the input can be ANY_EXTEND, but the output will still have a specific
2388 /// extension.
2390 return ISD::ANY_EXTEND;
2391 }
2392
2393 /// @}
2394
2395 /// Returns true if we should normalize
2396 /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
2397 /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely
2398 /// that it saves us from materializing N0 and N1 in an integer register.
2399 /// Targets that are able to perform and/or on flags should return false here.
2401 EVT VT) const {
2402 // If a target has multiple condition registers, then it likely has logical
2403 // operations on those registers.
2405 return false;
2406 // Only do the transform if the value won't be split into multiple
2407 // registers.
2408 LegalizeTypeAction Action = getTypeAction(Context, VT);
2409 return Action != TypeExpandInteger && Action != TypeExpandFloat &&
2410 Action != TypeSplitVector;
2411 }
2412
2413 virtual bool isProfitableToCombineMinNumMaxNum(EVT VT) const { return true; }
2414
2415 /// Return true if a select of constants (select Cond, C1, C2) should be
2416 /// transformed into simple math ops with the condition value. For example:
2417 /// select Cond, C1, C1-1 --> add (zext Cond), C1-1
2418 virtual bool convertSelectOfConstantsToMath(EVT VT) const {
2419 return false;
2420 }
2421
2422 /// Return true if it is profitable to transform an integer
2423 /// multiplication-by-constant into simpler operations like shifts and adds.
2424 /// This may be true if the target does not directly support the
2425 /// multiplication operation for the specified type or the sequence of simpler
2426 /// ops is faster than the multiply.
2428 EVT VT, SDValue C) const {
2429 return false;
2430 }
2431
2432 /// Return true if it may be profitable to transform
2433 /// (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2).
2434 /// This may not be true if c1 and c2 can be represented as immediates but
2435 /// c1*c2 cannot, for example.
2436 /// The target should check if c1, c2 and c1*c2 can be represented as
2437 /// immediates, or have to be materialized into registers. If it is not sure
2438 /// about some cases, a default true can be returned to let the DAGCombiner
2439 /// decide.
2440 /// AddNode is (add x, c1), and ConstNode is c2.
2442 SDValue ConstNode) const {
2443 return true;
2444 }
2445
2446 /// Return true if it is more correct/profitable to use strict FP_TO_INT
2447 /// conversion operations - canonicalizing the FP source value instead of
2448 /// converting all cases and then selecting based on value.
2449 /// This may be true if the target throws exceptions for out of bounds
2450 /// conversions or has fast FP CMOV.
2451 virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT,
2452 bool IsSigned) const {
2453 return false;
2454 }
2455
2456 /// Return true if it is beneficial to expand an @llvm.powi.* intrinsic.
2457 /// If not optimizing for size, expanding @llvm.powi.* intrinsics is always
2458 /// considered beneficial.
2459 /// If optimizing for size, expansion is only considered beneficial for upto
2460 /// 5 multiplies and a divide (if the exponent is negative).
2461 bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const {
2462 if (Exponent < 0)
2463 Exponent = -Exponent;
2464 uint64_t E = static_cast<uint64_t>(Exponent);
2465 return !OptForSize || (llvm::popcount(E) + Log2_64(E) < 7);
2466 }
2467
2468 //===--------------------------------------------------------------------===//
2469 // TargetLowering Configuration Methods - These methods should be invoked by
2470 // the derived class constructor to configure this object for the target.
2471 //
2472protected:
2473 /// Specify how the target extends the result of integer and floating point
2474 /// boolean values from i1 to a wider type. See getBooleanContents.
2476 BooleanContents = Ty;
2477 BooleanFloatContents = Ty;
2478 }
2479
2480 /// Specify how the target extends the result of integer and floating point
2481 /// boolean values from i1 to a wider type. See getBooleanContents.
2483 BooleanContents = IntTy;
2484 BooleanFloatContents = FloatTy;
2485 }
2486
2487 /// Specify how the target extends the result of a vector boolean value from a
2488 /// vector of i1 to a wider type. See getBooleanContents.
2490 BooleanVectorContents = Ty;
2491 }
2492
2493 /// Specify the target scheduling preference.
2495 SchedPreferenceInfo = Pref;
2496 }
2497
2498 /// Indicate the minimum number of blocks to generate jump tables.
2499 void setMinimumJumpTableEntries(unsigned Val);
2500
2501 /// Indicate the maximum number of entries in jump tables.
2502 /// Set to zero to generate unlimited jump tables.
2503 void setMaximumJumpTableSize(unsigned);
2504
2505 /// If set to a physical register, this specifies the register that
2506 /// llvm.savestack/llvm.restorestack should save and restore.
2508 StackPointerRegisterToSaveRestore = R;
2509 }
2510
2511 /// Tells the code generator that the target has multiple (allocatable)
2512 /// condition registers that can be used to store the results of comparisons
2513 /// for use by selects and conditional branches. With multiple condition
2514 /// registers, the code generator will not aggressively sink comparisons into
2515 /// the blocks of their users.
2516 void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
2517 HasMultipleConditionRegisters = hasManyRegs;
2518 }
2519
2520 /// Tells the code generator that the target has BitExtract instructions.
2521 /// The code generator will aggressively sink "shift"s into the blocks of
2522 /// their users if the users will generate "and" instructions which can be
2523 /// combined with "shift" to BitExtract instructions.
2524 void setHasExtractBitsInsn(bool hasExtractInsn = true) {
2525 HasExtractBitsInsn = hasExtractInsn;
2526 }
2527
2528 /// Tells the code generator not to expand logic operations on comparison
2529 /// predicates into separate sequences that increase the amount of flow
2530 /// control.
2531 void setJumpIsExpensive(bool isExpensive = true);
2532
2533 /// Tells the code generator which bitwidths to bypass.
2534 void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
2535 BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
2536 }
2537
2538 /// Add the specified register class as an available regclass for the
2539 /// specified value type. This indicates the selector can handle values of
2540 /// that class natively.
2542 assert((unsigned)VT.SimpleTy < std::size(RegClassForVT));
2543 RegClassForVT[VT.SimpleTy] = RC;
2544 }
2545
2546 /// Return the largest legal super-reg register class of the register class
2547 /// for the specified type and its associated "cost".
2548 virtual std::pair<const TargetRegisterClass *, uint8_t>
2550
2551 /// Once all of the register classes are added, this allows us to compute
2552 /// derived properties we expose.
2554
2555 /// Indicate that the specified operation does not work with the specified
2556 /// type and indicate what to do about it. Note that VT may refer to either
2557 /// the type of a result or that of an operand of Op.
2558 void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action) {
2559 assert(Op < std::size(OpActions[0]) && "Table isn't big enough!");
2560 OpActions[(unsigned)VT.SimpleTy][Op] = Action;
2561 }
2563 LegalizeAction Action) {
2564 for (auto Op : Ops)
2565 setOperationAction(Op, VT, Action);
2566 }
2568 LegalizeAction Action) {
2569 for (auto VT : VTs)
2570 setOperationAction(Ops, VT, Action);
2571 }
2572
2573 /// Indicate that the specified load with extension does not work with the
2574 /// specified type and indicate what to do about it.
2575 void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
2576 LegalizeAction Action) {
2577 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
2578 MemVT.isValid() && "Table isn't big enough!");
2579 assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
2580 unsigned Shift = 4 * ExtType;
2581 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &= ~((uint16_t)0xF << Shift);
2582 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |= (uint16_t)Action << Shift;
2583 }
2584 void setLoadExtAction(ArrayRef<unsigned> ExtTypes, MVT ValVT, MVT MemVT,
2585 LegalizeAction Action) {
2586 for (auto ExtType : ExtTypes)
2587 setLoadExtAction(ExtType, ValVT, MemVT, Action);
2588 }
2590 ArrayRef<MVT> MemVTs, LegalizeAction Action) {
2591 for (auto MemVT : MemVTs)
2592 setLoadExtAction(ExtTypes, ValVT, MemVT, Action);
2593 }
2594
2595 /// Let target indicate that an extending atomic load of the specified type
2596 /// is legal.
2597 void setAtomicLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
2598 LegalizeAction Action) {
2599 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
2600 MemVT.isValid() && "Table isn't big enough!");
2601 assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
2602 unsigned Shift = 4 * ExtType;
2603 AtomicLoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] &=
2604 ~((uint16_t)0xF << Shift);
2605 AtomicLoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy] |=
2606 ((uint16_t)Action << Shift);
2607 }
2609 LegalizeAction Action) {
2610 for (auto ExtType : ExtTypes)
2611 setAtomicLoadExtAction(ExtType, ValVT, MemVT, Action);
2612 }
2614 ArrayRef<MVT> MemVTs, LegalizeAction Action) {
2615 for (auto MemVT : MemVTs)
2616 setAtomicLoadExtAction(ExtTypes, ValVT, MemVT, Action);
2617 }
2618
2619 /// Indicate that the specified truncating store does not work with the
2620 /// specified type and indicate what to do about it.
2621 void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action) {
2622 assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!");
2623 TruncStoreActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy] = Action;
2624 }
2625
2626 /// Indicate that the specified indexed load does or does not work with the
2627 /// specified type and indicate what to do abort it.
2628 ///
2629 /// NOTE: All indexed mode loads are initialized to Expand in
2630 /// TargetLowering.cpp
2632 LegalizeAction Action) {
2633 for (auto IdxMode : IdxModes)
2634 setIndexedModeAction(IdxMode, VT, IMAB_Load, Action);
2635 }
2636
2638 LegalizeAction Action) {
2639 for (auto VT : VTs)
2640 setIndexedLoadAction(IdxModes, VT, Action);
2641 }
2642
2643 /// Indicate that the specified indexed store does or does not work with the
2644 /// specified type and indicate what to do about it.
2645 ///
2646 /// NOTE: All indexed mode stores are initialized to Expand in
2647 /// TargetLowering.cpp
2649 LegalizeAction Action) {
2650 for (auto IdxMode : IdxModes)
2651 setIndexedModeAction(IdxMode, VT, IMAB_Store, Action);
2652 }
2653
2655 LegalizeAction Action) {
2656 for (auto VT : VTs)
2657 setIndexedStoreAction(IdxModes, VT, Action);
2658 }
2659
2660 /// Indicate that the specified indexed masked load does or does not work with
2661 /// the specified type and indicate what to do about it.
2662 ///
2663 /// NOTE: All indexed mode masked loads are initialized to Expand in
2664 /// TargetLowering.cpp
2665 void setIndexedMaskedLoadAction(unsigned IdxMode, MVT VT,
2666 LegalizeAction Action) {
2667 setIndexedModeAction(IdxMode, VT, IMAB_MaskedLoad, Action);
2668 }
2669
2670 /// Indicate that the specified indexed masked store does or does not work
2671 /// with the specified type and indicate what to do about it.
2672 ///
2673 /// NOTE: All indexed mode masked stores are initialized to Expand in
2674 /// TargetLowering.cpp
2675 void setIndexedMaskedStoreAction(unsigned IdxMode, MVT VT,
2676 LegalizeAction Action) {
2677 setIndexedModeAction(IdxMode, VT, IMAB_MaskedStore, Action);
2678 }
2679
2680 /// Indicate that the specified condition code is or isn't supported on the
2681 /// target and indicate what to do about it.
2683 LegalizeAction Action) {
2684 for (auto CC : CCs) {
2685 assert(VT.isValid() && (unsigned)CC < std::size(CondCodeActions) &&
2686 "Table isn't big enough!");
2687 assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
2688 /// The lower 3 bits of the SimpleTy index into Nth 4bit set from the
2689 /// 32-bit value and the upper 29 bits index into the second dimension of
2690 /// the array to select what 32-bit value to use.
2691 uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
2692 CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift);
2693 CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift;
2694 }
2695 }
2697 LegalizeAction Action) {
2698 for (auto VT : VTs)
2699 setCondCodeAction(CCs, VT, Action);
2700 }
2701
2702 /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
2703 /// to trying a larger integer/fp until it can find one that works. If that
2704 /// default is insufficient, this method can be used by the target to override
2705 /// the default.
2706 void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
2707 PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
2708 }
2709
2710 /// Convenience method to set an operation to Promote and specify the type
2711 /// in a single call.
2712 void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
2713 setOperationAction(Opc, OrigVT, Promote);
2714 AddPromotedToType(Opc, OrigVT, DestVT);
2715 }
2717 MVT DestVT) {
2718 for (auto Op : Ops) {
2719 setOperationAction(Op, OrigVT, Promote);
2720 AddPromotedToType(Op, OrigVT, DestVT);
2721 }
2722 }
2723
2724 /// Targets should invoke this method for each target independent node that
2725 /// they want to provide a custom DAG combiner for by implementing the
2726 /// PerformDAGCombine virtual method.
2728 for (auto NT : NTs) {
2729 assert(unsigned(NT >> 3) < std::size(TargetDAGCombineArray));
2730 TargetDAGCombineArray[NT >> 3] |= 1 << (NT & 7);
2731 }
2732 }
2733
2734 /// Set the target's minimum function alignment.
2736 MinFunctionAlignment = Alignment;
2737 }
2738
2739 /// Set the target's preferred function alignment. This should be set if
2740 /// there is a performance benefit to higher-than-minimum alignment
2742 PrefFunctionAlignment = Alignment;
2743 }
2744
2745 /// Set the target's preferred loop alignment. Default alignment is one, it
2746 /// means the target does not care about loop alignment. The target may also
2747 /// override getPrefLoopAlignment to provide per-loop values.
2748 void setPrefLoopAlignment(Align Alignment) { PrefLoopAlignment = Alignment; }
2749 void setMaxBytesForAlignment(unsigned MaxBytes) {
2750 MaxBytesForAlignment = MaxBytes;
2751 }
2752
2753 /// Set the minimum stack alignment of an argument.
2755 MinStackArgumentAlignment = Alignment;
2756 }
2757
2758 /// Set the maximum atomic operation size supported by the
2759 /// backend. Atomic operations greater than this size (as well as
2760 /// ones that are not naturally aligned), will be expanded by
2761 /// AtomicExpandPass into an __atomic_* library call.
2762 void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits) {
2763 MaxAtomicSizeInBitsSupported = SizeInBits;
2764 }
2765
2766 /// Set the size in bits of the maximum div/rem the backend supports.
2767 /// Larger operations will be expanded by ExpandLargeDivRem.
2768 void setMaxDivRemBitWidthSupported(unsigned SizeInBits) {
2769 MaxDivRemBitWidthSupported = SizeInBits;
2770 }
2771
2772 /// Set the size in bits of the maximum fp convert the backend supports.
2773 /// Larger operations will be expanded by ExpandLargeFPConvert.
2774 void setMaxLargeFPConvertBitWidthSupported(unsigned SizeInBits) {
2775 MaxLargeFPConvertBitWidthSupported = SizeInBits;
2776 }
2777
2778 /// Sets the minimum cmpxchg or ll/sc size supported by the backend.
2779 void setMinCmpXchgSizeInBits(unsigned SizeInBits) {
2780 MinCmpXchgSizeInBits = SizeInBits;
2781 }
2782
2783 /// Sets whether unaligned atomic operations are supported.
2784 void setSupportsUnalignedAtomics(bool UnalignedSupported) {
2785 SupportsUnalignedAtomics = UnalignedSupported;
2786 }
2787
2788public:
2789 //===--------------------------------------------------------------------===//
2790 // Addressing mode description hooks (used by LSR etc).
2791 //
2792
2793 /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
2794 /// instructions reading the address. This allows as much computation as
2795 /// possible to be done in the address mode for that operand. This hook lets
2796 /// targets also pass back when this should be done on intrinsics which
2797 /// load/store.
2799 SmallVectorImpl<Value*> &/*Ops*/,
2800 Type *&/*AccessTy*/) const {
2801 return false;
2802 }
2803
2804 /// This represents an addressing mode of:
2805 /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*vscale
2806 /// If BaseGV is null, there is no BaseGV.
2807 /// If BaseOffs is zero, there is no base offset.
2808 /// If HasBaseReg is false, there is no base register.
2809 /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with
2810 /// no scale.
2811 /// If ScalableOffset is zero, there is no scalable offset.
2812 struct AddrMode {
2814 int64_t BaseOffs = 0;
2815 bool HasBaseReg = false;
2816 int64_t Scale = 0;
2817 int64_t ScalableOffset = 0;
2818 AddrMode() = default;
2819 };
2820
2821 /// Return true if the addressing mode represented by AM is legal for this
2822 /// target, for a load/store of the specified type.
2823 ///
2824 /// The type may be VoidTy, in which case only return true if the addressing
2825 /// mode is legal for a load/store of any legal type. TODO: Handle
2826 /// pre/postinc as well.
2827 ///
2828 /// If the address space cannot be determined, it will be -1.
2829 ///
2830 /// TODO: Remove default argument
2831 virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
2832 Type *Ty, unsigned AddrSpace,
2833 Instruction *I = nullptr) const;
2834
2835 /// Returns true if the targets addressing mode can target thread local
2836 /// storage (TLS).
2837 virtual bool addressingModeSupportsTLS(const GlobalValue &) const {
2838 return false;
2839 }
2840
2841 /// Return the prefered common base offset.
2842 virtual int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset,
2843 int64_t MaxOffset) const {
2844 return 0;
2845 }
2846
2847 /// Return true if the specified immediate is legal icmp immediate, that is
2848 /// the target has icmp instructions which can compare a register against the
2849 /// immediate without having to materialize the immediate into a register.
2850 virtual bool isLegalICmpImmediate(int64_t) const {
2851 return true;
2852 }
2853
2854 /// Return true if the specified immediate is legal add immediate, that is the
2855 /// target has add instructions which can add a register with the immediate
2856 /// without having to materialize the immediate into a register.
2857 virtual bool isLegalAddImmediate(int64_t) const {
2858 return true;
2859 }
2860
2861 /// Return true if adding the specified scalable immediate is legal, that is
2862 /// the target has add instructions which can add a register with the
2863 /// immediate (multiplied by vscale) without having to materialize the
2864 /// immediate into a register.
2865 virtual bool isLegalAddScalableImmediate(int64_t) const { return false; }
2866
2867 /// Return true if the specified immediate is legal for the value input of a
2868 /// store instruction.
2869 virtual bool isLegalStoreImmediate(int64_t Value) const {
2870 // Default implementation assumes that at least 0 works since it is likely
2871 // that a zero register exists or a zero immediate is allowed.
2872 return Value == 0;
2873 }
2874
2875 /// Given a shuffle vector SVI representing a vector splat, return a new
2876 /// scalar type of size equal to SVI's scalar type if the new type is more
2877 /// profitable. Returns nullptr otherwise. For example under MVE float splats
2878 /// are converted to integer to prevent the need to move from SPR to GPR
2879 /// registers.
2881 return nullptr;
2882 }
2883
2884 /// Given a set in interconnected phis of type 'From' that are loaded/stored
2885 /// or bitcast to type 'To', return true if the set should be converted to
2886 /// 'To'.
2887 virtual bool shouldConvertPhiType(Type *From, Type *To) const {
2888 return (From->isIntegerTy() || From->isFloatingPointTy()) &&
2889 (To->isIntegerTy() || To->isFloatingPointTy());
2890 }
2891
2892 /// Returns true if the opcode is a commutative binary operation.
2893 virtual bool isCommutativeBinOp(unsigned Opcode) const {
2894 // FIXME: This should get its info from the td file.
2895 switch (Opcode) {
2896 case ISD::ADD:
2897 case ISD::SMIN:
2898 case ISD::SMAX:
2899 case ISD::UMIN:
2900 case ISD::UMAX:
2901 case ISD::MUL:
2902 case ISD::MULHU:
2903 case ISD::MULHS:
2904 case ISD::SMUL_LOHI:
2905 case ISD::UMUL_LOHI:
2906 case ISD::FADD:
2907 case ISD::FMUL:
2908 case ISD::AND:
2909 case ISD::OR:
2910 case ISD::XOR:
2911 case ISD::SADDO:
2912 case ISD::UADDO:
2913 case ISD::ADDC:
2914 case ISD::ADDE:
2915 case ISD::SADDSAT:
2916 case ISD::UADDSAT:
2917 case ISD::FMINNUM:
2918 case ISD::FMAXNUM:
2919 case ISD::FMINNUM_IEEE:
2920 case ISD::FMAXNUM_IEEE:
2921 case ISD::FMINIMUM:
2922 case ISD::FMAXIMUM:
2923 case ISD::FMINIMUMNUM:
2924 case ISD::FMAXIMUMNUM:
2925 case ISD::AVGFLOORS:
2926 case ISD::AVGFLOORU:
2927 case ISD::AVGCEILS:
2928 case ISD::AVGCEILU:
2929 case ISD::ABDS:
2930 case ISD::ABDU:
2931 return true;
2932 default: return false;
2933 }
2934 }
2935
2936 /// Return true if the node is a math/logic binary operator.
2937 virtual bool isBinOp(unsigned Opcode) const {
2938 // A commutative binop must be a binop.
2939 if (isCommutativeBinOp(Opcode))
2940 return true;
2941 // These are non-commutative binops.
2942 switch (Opcode) {
2943 case ISD::SUB:
2944 case ISD::SHL:
2945 case ISD::SRL:
2946 case ISD::SRA:
2947 case ISD::ROTL:
2948 case ISD::ROTR:
2949 case ISD::SDIV:
2950 case ISD::UDIV:
2951 case ISD::SREM:
2952 case ISD::UREM:
2953 case ISD::SSUBSAT:
2954 case ISD::USUBSAT:
2955 case ISD::FSUB:
2956 case ISD::FDIV:
2957 case ISD::FREM:
2958 return true;
2959 default:
2960 return false;
2961 }
2962 }
2963
2964 /// Return true if it's free to truncate a value of type FromTy to type
2965 /// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
2966 /// by referencing its sub-register AX.
2967 /// Targets must return false when FromTy <= ToTy.
2968 virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const {
2969 return false;
2970 }
2971
2972 /// Return true if a truncation from FromTy to ToTy is permitted when deciding
2973 /// whether a call is in tail position. Typically this means that both results
2974 /// would be assigned to the same register or stack slot, but it could mean
2975 /// the target performs adequate checks of its own before proceeding with the
2976 /// tail call. Targets must return false when FromTy <= ToTy.
2977 virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const {
2978 return false;
2979 }
2980
2981 virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const { return false; }
2982 virtual bool isTruncateFree(LLT FromTy, LLT ToTy, LLVMContext &Ctx) const {
2983 return isTruncateFree(getApproximateEVTForLLT(FromTy, Ctx),
2984 getApproximateEVTForLLT(ToTy, Ctx));
2985 }
2986
2987 /// Return true if truncating the specific node Val to type VT2 is free.
2988 virtual bool isTruncateFree(SDValue Val, EVT VT2) const {
2989 // Fallback to type matching.
2990 return isTruncateFree(Val.getValueType(), VT2);
2991 }
2992
2993 virtual bool isProfitableToHoist(Instruction *I) const { return true; }
2994
2995 /// Return true if the extension represented by \p I is free.
2996 /// Unlikely the is[Z|FP]ExtFree family which is based on types,
2997 /// this method can use the context provided by \p I to decide
2998 /// whether or not \p I is free.
2999 /// This method extends the behavior of the is[Z|FP]ExtFree family.
3000 /// In other words, if is[Z|FP]Free returns true, then this method
3001 /// returns true as well. The converse is not true.
3002 /// The target can perform the adequate checks by overriding isExtFreeImpl.
3003 /// \pre \p I must be a sign, zero, or fp extension.
3004 bool isExtFree(const Instruction *I) const {
3005 switch (I->getOpcode()) {
3006 case Instruction::FPExt:
3007 if (isFPExtFree(EVT::getEVT(I->getType()),
3008 EVT::getEVT(I->getOperand(0)->getType())))
3009 return true;
3010 break;
3011 case Instruction::ZExt:
3012 if (isZExtFree(I->getOperand(0)->getType(), I->getType()))
3013 return true;
3014 break;
3015 case Instruction::SExt:
3016 break;
3017 default:
3018 llvm_unreachable("Instruction is not an extension");
3019 }
3020 return isExtFreeImpl(I);
3021 }
3022
3023 /// Return true if \p Load and \p Ext can form an ExtLoad.
3024 /// For example, in AArch64
3025 /// %L = load i8, i8* %ptr
3026 /// %E = zext i8 %L to i32
3027 /// can be lowered into one load instruction
3028 /// ldrb w0, [x0]
3029 bool isExtLoad(const LoadInst *Load, const Instruction *Ext,
3030 const DataLayout &DL) const {
3031 EVT VT = getValueType(DL, Ext->getType());
3032 EVT LoadVT = getValueType(DL, Load->getType());
3033
3034 // If the load has other users and the truncate is not free, the ext
3035 // probably isn't free.
3036 if (!Load->hasOneUse() && (isTypeLegal(LoadVT) || !isTypeLegal(VT)) &&
3037 !isTruncateFree(Ext->getType(), Load->getType()))
3038 return false;
3039
3040 // Check whether the target supports casts folded into loads.
3041 unsigned LType;
3042 if (isa<ZExtInst>(Ext))
3043 LType = ISD::ZEXTLOAD;
3044 else {
3045 assert(isa<SExtInst>(Ext) && "Unexpected ext type!");
3046 LType = ISD::SEXTLOAD;
3047 }
3048
3049 return isLoadExtLegal(LType, VT, LoadVT);
3050 }
3051
3052 /// Return true if any actual instruction that defines a value of type FromTy
3053 /// implicitly zero-extends the value to ToTy in the result register.
3054 ///
3055 /// The function should return true when it is likely that the truncate can
3056 /// be freely folded with an instruction defining a value of FromTy. If
3057 /// the defining instruction is unknown (because you're looking at a
3058 /// function argument, PHI, etc.) then the target may require an
3059 /// explicit truncate, which is not necessarily free, but this function
3060 /// does not deal with those cases.
3061 /// Targets must return false when FromTy >= ToTy.
3062 virtual bool isZExtFree(Type *FromTy, Type *ToTy) const {
3063 return false;
3064 }
3065
3066 virtual bool isZExtFree(EVT FromTy, EVT ToTy) const { return false; }
3067 virtual bool isZExtFree(LLT FromTy, LLT ToTy, LLVMContext &Ctx) const {
3068 return isZExtFree(getApproximateEVTForLLT(FromTy, Ctx),
3069 getApproximateEVTForLLT(ToTy, Ctx));
3070 }
3071
3072 /// Return true if zero-extending the specific node Val to type VT2 is free
3073 /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
3074 /// because it's folded such as X86 zero-extending loads).
3075 virtual bool isZExtFree(SDValue Val, EVT VT2) const {
3076 return isZExtFree(Val.getValueType(), VT2);
3077 }
3078
3079 /// Return true if sign-extension from FromTy to ToTy is cheaper than
3080 /// zero-extension.
3081 virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const {
3082 return false;
3083 }
3084
3085 /// Return true if this constant should be sign extended when promoting to
3086 /// a larger type.
3087 virtual bool signExtendConstant(const ConstantInt *C) const { return false; }
3088
3089 /// Try to optimize extending or truncating conversion instructions (like
3090 /// zext, trunc, fptoui, uitofp) for the target.
3091 virtual bool
3093 const TargetTransformInfo &TTI) const {
3094 return false;
3095 }
3096
3097 /// Return true if the target supplies and combines to a paired load
3098 /// two loaded values of type LoadedType next to each other in memory.
3099 /// RequiredAlignment gives the minimal alignment constraints that must be met
3100 /// to be able to select this paired load.
3101 ///
3102 /// This information is *not* used to generate actual paired loads, but it is
3103 /// used to generate a sequence of loads that is easier to combine into a
3104 /// paired load.
3105 /// For instance, something like this:
3106 /// a = load i64* addr
3107 /// b = trunc i64 a to i32
3108 /// c = lshr i64 a, 32
3109 /// d = trunc i64 c to i32
3110 /// will be optimized into:
3111 /// b = load i32* addr1
3112 /// d = load i32* addr2
3113 /// Where addr1 = addr2 +/- sizeof(i32).
3114 ///
3115 /// In other words, unless the target performs a post-isel load combining,
3116 /// this information should not be provided because it will generate more
3117 /// loads.
3118 virtual bool hasPairedLoad(EVT /*LoadedType*/,
3119 Align & /*RequiredAlignment*/) const {
3120 return false;
3121 }
3122
3123 /// Return true if the target has a vector blend instruction.
3124 virtual bool hasVectorBlend() const { return false; }
3125
3126 /// Get the maximum supported factor for interleaved memory accesses.
3127 /// Default to be the minimum interleave factor: 2.
3128 virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; }
3129
3130 /// Lower an interleaved load to target specific intrinsics. Return
3131 /// true on success.
3132 ///
3133 /// \p LI is the vector load instruction.
3134 /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector.
3135 /// \p Indices is the corresponding indices for each shufflevector.
3136 /// \p Factor is the interleave factor.
3139 ArrayRef<unsigned> Indices,
3140 unsigned Factor) const {
3141 return false;
3142 }
3143
3144 /// Lower an interleaved store to target specific intrinsics. Return
3145 /// true on success.
3146 ///
3147 /// \p SI is the vector store instruction.
3148 /// \p SVI is the shufflevector to RE-interleave the stored vector.
3149 /// \p Factor is the interleave factor.
3151 unsigned Factor) const {
3152 return false;
3153 }
3154
3155 /// Lower a deinterleave intrinsic to a target specific load intrinsic.
3156 /// Return true on success. Currently only supports
3157 /// llvm.vector.deinterleave2
3158 ///
3159 /// \p DI is the deinterleave intrinsic.
3160 /// \p LI is the accompanying load instruction
3161 /// \p DeadInsts is a reference to a vector that keeps track of dead
3162 /// instruction during transformations.
3164 IntrinsicInst *DI, LoadInst *LI,
3165 SmallVectorImpl<Instruction *> &DeadInsts) const {
3166 return false;
3167 }
3168
3169 /// Lower an interleave intrinsic to a target specific store intrinsic.
3170 /// Return true on success. Currently only supports
3171 /// llvm.vector.interleave2
3172 ///
3173 /// \p II is the interleave intrinsic.
3174 /// \p SI is the accompanying store instruction
3175 /// \p DeadInsts is a reference to a vector that keeps track of dead
3176 /// instruction during transformations.
3179 SmallVectorImpl<Instruction *> &DeadInsts) const {
3180 return false;
3181 }
3182
3183 /// Return true if an fpext operation is free (for instance, because
3184 /// single-precision floating-point numbers are implicitly extended to
3185 /// double-precision).
3186 virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const {
3187 assert(SrcVT.isFloatingPoint() && DestVT.isFloatingPoint() &&
3188 "invalid fpext types");
3189 return false;
3190 }
3191
3192 /// Return true if an fpext operation input to an \p Opcode operation is free
3193 /// (for instance, because half-precision floating-point numbers are
3194 /// implicitly extended to float-precision) for an FMA instruction.
3195 virtual bool isFPExtFoldable(const MachineInstr &MI, unsigned Opcode,
3196 LLT DestTy, LLT SrcTy) const {
3197 return false;
3198 }
3199
3200 /// Return true if an fpext operation input to an \p Opcode operation is free
3201 /// (for instance, because half-precision floating-point numbers are
3202 /// implicitly extended to float-precision) for an FMA instruction.
3203 virtual bool isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode,
3204 EVT DestVT, EVT SrcVT) const {
3205 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
3206 "invalid fpext types");
3207 return isFPExtFree(DestVT, SrcVT);
3208 }
3209
3210 /// Return true if folding a vector load into ExtVal (a sign, zero, or any
3211 /// extend node) is profitable.
3212 virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; }
3213
3214 /// Return true if an fneg operation is free to the point where it is never
3215 /// worthwhile to replace it with a bitwise operation.
3216 virtual bool isFNegFree(EVT VT) const {
3217 assert(VT.isFloatingPoint());
3218 return false;
3219 }
3220
3221 /// Return true if an fabs operation is free to the point where it is never
3222 /// worthwhile to replace it with a bitwise operation.
3223 virtual bool isFAbsFree(EVT VT) const {
3224 assert(VT.isFloatingPoint());
3225 return false;
3226 }
3227
3228 /// Return true if an FMA operation is faster than a pair of fmul and fadd
3229 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
3230 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
3231 ///
3232 /// NOTE: This may be called before legalization on types for which FMAs are
3233 /// not legal, but should return true if those types will eventually legalize
3234 /// to types that support FMAs. After legalization, it will only be called on
3235 /// types that support FMAs (via Legal or Custom actions)
3236 ///
3237 /// Targets that care about soft float support should return false when soft
3238 /// float code is being generated (i.e. use-soft-float).
3240 EVT) const {
3241 return false;
3242 }
3243
3244 /// Return true if an FMA operation is faster than a pair of fmul and fadd
3245 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
3246 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
3247 ///
3248 /// NOTE: This may be called before legalization on types for which FMAs are
3249 /// not legal, but should return true if those types will eventually legalize
3250 /// to types that support FMAs. After legalization, it will only be called on
3251 /// types that support FMAs (via Legal or Custom actions)
3253 LLT) const {
3254 return false;
3255 }
3256
3257 /// IR version
3258 virtual bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *) const {
3259 return false;
3260 }
3261
3262 /// Returns true if \p MI can be combined with another instruction to
3263 /// form TargetOpcode::G_FMAD. \p N may be an TargetOpcode::G_FADD,
3264 /// TargetOpcode::G_FSUB, or an TargetOpcode::G_FMUL which will be
3265 /// distributed into an fadd/fsub.
3266 virtual bool isFMADLegal(const MachineInstr &MI, LLT Ty) const {
3267 assert((MI.getOpcode() == TargetOpcode::G_FADD ||
3268 MI.getOpcode() == TargetOpcode::G_FSUB ||
3269 MI.getOpcode() == TargetOpcode::G_FMUL) &&
3270 "unexpected node in FMAD forming combine");
3271 switch (Ty.getScalarSizeInBits()) {
3272 case 16:
3273 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f16);
3274 case 32:
3275 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f32);
3276 case 64:
3277 return isOperationLegal(TargetOpcode::G_FMAD, MVT::f64);
3278 default:
3279 break;
3280 }
3281
3282 return false;
3283 }
3284
3285 /// Returns true if be combined with to form an ISD::FMAD. \p N may be an
3286 /// ISD::FADD, ISD::FSUB, or an ISD::FMUL which will be distributed into an
3287 /// fadd/fsub.
3288 virtual bool isFMADLegal(const SelectionDAG &DAG, const SDNode *N) const {
3289 assert((N->getOpcode() == ISD::FADD || N->getOpcode() == ISD::FSUB ||
3290 N->getOpcode() == ISD::FMUL) &&
3291 "unexpected node in FMAD forming combine");
3292 return isOperationLegal(ISD::FMAD, N->getValueType(0));
3293 }
3294
3295 // Return true when the decision to generate FMA's (or FMS, FMLA etc) rather
3296 // than FMUL and ADD is delegated to the machine combiner.
3298 CodeGenOptLevel OptLevel) const {
3299 return false;
3300 }
3301
3302 /// Return true if it's profitable to narrow operations of type SrcVT to
3303 /// DestVT. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
3304 /// i32 to i16.
3305 virtual bool isNarrowingProfitable(SDNode *N, EVT SrcVT, EVT DestVT) const {
3306 return false;
3307 }
3308
3309 /// Return true if pulling a binary operation into a select with an identity
3310 /// constant is profitable. This is the inverse of an IR transform.
3311 /// Example: X + (Cond ? Y : 0) --> Cond ? (X + Y) : X
3312 virtual bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode,
3313 EVT VT) const {
3314 return false;
3315 }
3316
3317 /// Return true if it is beneficial to convert a load of a constant to
3318 /// just the constant itself.
3319 /// On some targets it might be more efficient to use a combination of
3320 /// arithmetic instructions to materialize the constant instead of loading it
3321 /// from a constant pool.
3323 Type *Ty) const {
3324 return false;
3325 }
3326
3327 /// Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type
3328 /// from this source type with this index. This is needed because
3329 /// EXTRACT_SUBVECTOR usually has custom lowering that depends on the index of
3330 /// the first element, and only the target knows which lowering is cheap.
3331 virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
3332 unsigned Index) const {
3333 return false;
3334 }
3335
3336 /// Try to convert an extract element of a vector binary operation into an
3337 /// extract element followed by a scalar operation.
3338 virtual bool shouldScalarizeBinop(SDValue VecOp) const {
3339 return false;
3340 }
3341
3342 /// Return true if extraction of a scalar element from the given vector type
3343 /// at the given index is cheap. For example, if scalar operations occur on
3344 /// the same register file as vector operations, then an extract element may
3345 /// be a sub-register rename rather than an actual instruction.
3346 virtual bool isExtractVecEltCheap(EVT VT, unsigned Index) const {
3347 return false;
3348 }
3349
3350 /// Try to convert math with an overflow comparison into the corresponding DAG
3351 /// node operation. Targets may want to override this independently of whether
3352 /// the operation is legal/custom for the given type because it may obscure
3353 /// matching of other patterns.
3354 virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT,
3355 bool MathUsed) const {
3356 // TODO: The default logic is inherited from code in CodeGenPrepare.
3357 // The opcode should not make a difference by default?
3358 if (Opcode != ISD::UADDO)
3359 return false;
3360
3361 // Allow the transform as long as we have an integer type that is not
3362 // obviously illegal and unsupported and if the math result is used
3363 // besides the overflow check. On some targets (e.g. SPARC), it is
3364 // not profitable to form on overflow op if the math result has no
3365 // concrete users.
3366 if (VT.isVector())
3367 return false;
3368 return MathUsed && (VT.isSimple() || !isOperationExpand(Opcode, VT));
3369 }
3370
3371 // Return true if it is profitable to use a scalar input to a BUILD_VECTOR
3372 // even if the vector itself has multiple uses.
3373 virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const {
3374 return false;
3375 }
3376
3377 // Return true if CodeGenPrepare should consider splitting large offset of a
3378 // GEP to make the GEP fit into the addressing mode and can be sunk into the
3379 // same blocks of its users.
3380 virtual bool shouldConsiderGEPOffsetSplit() const { return false; }
3381
3382 /// Return true if creating a shift of the type by the given
3383 /// amount is not profitable.
3384 virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const {
3385 return false;
3386 }
3387
3388 // Should we fold (select_cc seteq (and x, y), 0, 0, A) -> (and (sra (shl x))
3389 // A) where y has a single bit set?
3391 const APInt &AndMask) const {
3392 unsigned ShCt = AndMask.getBitWidth() - 1;
3393 return !shouldAvoidTransformToShift(VT, ShCt);
3394 }
3395
3396 /// Does this target require the clearing of high-order bits in a register
3397 /// passed to the fp16 to fp conversion library function.
3398 virtual bool shouldKeepZExtForFP16Conv() const { return false; }
3399
3400 /// Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT
3401 /// from min(max(fptoi)) saturation patterns.
3402 virtual bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const {
3403 return isOperationLegalOrCustom(Op, VT);
3404 }
3405
3406 /// Should we expand [US]CMP nodes using two selects and two compares, or by
3407 /// doing arithmetic on boolean types
3408 virtual bool shouldExpandCmpUsingSelects(EVT VT) const { return false; }
3409
3410 /// Does this target support complex deinterleaving
3411 virtual bool isComplexDeinterleavingSupported() const { return false; }
3412
3413 /// Does this target support complex deinterleaving with the given operation
3414 /// and type
3417 return false;
3418 }
3419
3420 /// Create the IR node for the given complex deinterleaving operation.
3421 /// If one cannot be created using all the given inputs, nullptr should be
3422 /// returned.
3425 ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB,
3426 Value *Accumulator = nullptr) const {
3427 return nullptr;
3428 }
3429
3430 /// Rename the default libcall routine name for the specified libcall.
3431 void setLibcallName(RTLIB::Libcall Call, const char *Name) {
3432 Libcalls.setLibcallName(Call, Name);
3433 }
3434
3436 Libcalls.setLibcallName(Calls, Name);
3437 }
3438
3439 /// Get the libcall routine name for the specified libcall.
3440 const char *getLibcallName(RTLIB::Libcall Call) const {
3441 return Libcalls.getLibcallName(Call);
3442 }
3443
3444 /// Override the default CondCode to be used to test the result of the
3445 /// comparison libcall against zero.
3446 /// FIXME: This can't be merged with 'RuntimeLibcallsInfo' because of the ISD.
3448 CmpLibcallCCs[Call] = CC;
3449 }
3450
3451
3452 /// Get the CondCode that's to be used to test the result of the comparison
3453 /// libcall against zero.
3454 /// FIXME: This can't be merged with 'RuntimeLibcallsInfo' because of the ISD.
3456 return CmpLibcallCCs[Call];
3457 }
3458
3459
3460 /// Set the CallingConv that should be used for the specified libcall.
3462 Libcalls.setLibcallCallingConv(Call, CC);
3463 }
3464
3465 /// Get the CallingConv that should be used for the specified libcall.
3467 return Libcalls.getLibcallCallingConv(Call);
3468 }
3469
3470 /// Execute target specific actions to finalize target lowering.
3471 /// This is used to set extra flags in MachineFrameInformation and freezing
3472 /// the set of reserved registers.
3473 /// The default implementation just freezes the set of reserved registers.
3474 virtual void finalizeLowering(MachineFunction &MF) const;
3475
3476 //===----------------------------------------------------------------------===//
3477 // GlobalISel Hooks
3478 //===----------------------------------------------------------------------===//
3479 /// Check whether or not \p MI needs to be moved close to its uses.
3480 virtual bool shouldLocalize(const MachineInstr &MI, const TargetTransformInfo *TTI) const;
3481
3482
3483private:
3484 const TargetMachine &TM;
3485
3486 /// Tells the code generator that the target has multiple (allocatable)
3487 /// condition registers that can be used to store the results of comparisons
3488 /// for use by selects and conditional branches. With multiple condition
3489 /// registers, the code generator will not aggressively sink comparisons into
3490 /// the blocks of their users.
3491 bool HasMultipleConditionRegisters;
3492
3493 /// Tells the code generator that the target has BitExtract instructions.
3494 /// The code generator will aggressively sink "shift"s into the blocks of
3495 /// their users if the users will generate "and" instructions which can be
3496 /// combined with "shift" to BitExtract instructions.
3497 bool HasExtractBitsInsn;
3498
3499 /// Tells the code generator to bypass slow divide or remainder
3500 /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
3501 /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
3502 /// div/rem when the operands are positive and less than 256.
3503 DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
3504
3505 /// Tells the code generator that it shouldn't generate extra flow control
3506 /// instructions and should attempt to combine flow control instructions via
3507 /// predication.
3508 bool JumpIsExpensive;
3509
3510 /// Information about the contents of the high-bits in boolean values held in
3511 /// a type wider than i1. See getBooleanContents.
3512 BooleanContent BooleanContents;
3513
3514 /// Information about the contents of the high-bits in boolean values held in
3515 /// a type wider than i1. See getBooleanContents.
3516 BooleanContent BooleanFloatContents;
3517
3518 /// Information about the contents of the high-bits in boolean vector values
3519 /// when the element type is wider than i1. See getBooleanContents.
3520 BooleanContent BooleanVectorContents;
3521
3522 /// The target scheduling preference: shortest possible total cycles or lowest
3523 /// register usage.
3524 Sched::Preference SchedPreferenceInfo;
3525
3526 /// The minimum alignment that any argument on the stack needs to have.
3527 Align MinStackArgumentAlignment;
3528
3529 /// The minimum function alignment (used when optimizing for size, and to
3530 /// prevent explicitly provided alignment from leading to incorrect code).
3531 Align MinFunctionAlignment;
3532
3533 /// The preferred function alignment (used when alignment unspecified and
3534 /// optimizing for speed).
3535 Align PrefFunctionAlignment;
3536
3537 /// The preferred loop alignment (in log2 bot in bytes).
3538 Align PrefLoopAlignment;
3539 /// The maximum amount of bytes permitted to be emitted for alignment.
3540 unsigned MaxBytesForAlignment;
3541
3542 /// Size in bits of the maximum atomics size the backend supports.
3543 /// Accesses larger than this will be expanded by AtomicExpandPass.
3544 unsigned MaxAtomicSizeInBitsSupported;
3545
3546 /// Size in bits of the maximum div/rem size the backend supports.
3547 /// Larger operations will be expanded by ExpandLargeDivRem.
3548 unsigned MaxDivRemBitWidthSupported;
3549
3550 /// Size in bits of the maximum larget fp convert size the backend
3551 /// supports. Larger operations will be expanded by ExpandLargeFPConvert.
3552 unsigned MaxLargeFPConvertBitWidthSupported;
3553
3554 /// Size in bits of the minimum cmpxchg or ll/sc operation the
3555 /// backend supports.
3556 unsigned MinCmpXchgSizeInBits;
3557
3558 /// This indicates if the target supports unaligned atomic operations.
3559 bool SupportsUnalignedAtomics;
3560
3561 /// If set to a physical register, this specifies the register that
3562 /// llvm.savestack/llvm.restorestack should save and restore.
3563 Register StackPointerRegisterToSaveRestore;
3564
3565 /// This indicates the default register class to use for each ValueType the
3566 /// target supports natively.
3567 const TargetRegisterClass *RegClassForVT[MVT::VALUETYPE_SIZE];
3568 uint16_t NumRegistersForVT[MVT::VALUETYPE_SIZE];
3569 MVT RegisterTypeForVT[MVT::VALUETYPE_SIZE];
3570
3571 /// This indicates the "representative" register class to use for each
3572 /// ValueType the target supports natively. This information is used by the
3573 /// scheduler to track register pressure. By default, the representative
3574 /// register class is the largest legal super-reg register class of the
3575 /// register class of the specified type. e.g. On x86, i8, i16, and i32's
3576 /// representative class would be GR32.
3577 const TargetRegisterClass *RepRegClassForVT[MVT::VALUETYPE_SIZE] = {0};
3578
3579 /// This indicates the "cost" of the "representative" register class for each
3580 /// ValueType. The cost is used by the scheduler to approximate register
3581 /// pressure.
3582 uint8_t RepRegClassCostForVT[MVT::VALUETYPE_SIZE];
3583
3584 /// For any value types we are promoting or expanding, this contains the value
3585 /// type that we are changing to. For Expanded types, this contains one step
3586 /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
3587 /// (e.g. i64 -> i16). For types natively supported by the system, this holds
3588 /// the same type (e.g. i32 -> i32).
3589 MVT TransformToType[MVT::VALUETYPE_SIZE];
3590
3591 /// For each operation and each value type, keep a LegalizeAction that
3592 /// indicates how instruction selection should deal with the operation. Most
3593 /// operations are Legal (aka, supported natively by the target), but
3594 /// operations that are not should be described. Note that operations on
3595 /// non-legal value types are not described here.
3597
3598 /// For each load extension type and each value type, keep a LegalizeAction
3599 /// that indicates how instruction selection should deal with a load of a
3600 /// specific value type and extension type. Uses 4-bits to store the action
3601 /// for each of the 4 load ext types.
3603
3604 /// Similar to LoadExtActions, but for atomic loads. Only Legal or Expand
3605 /// (default) values are supported.
3606 uint16_t AtomicLoadExtActions[MVT::VALUETYPE_SIZE][MVT::VALUETYPE_SIZE];
3607
3608 /// For each value type pair keep a LegalizeAction that indicates whether a
3609 /// truncating store of a specific value type and truncating type is legal.
3611
3612 /// For each indexed mode and each value type, keep a quad of LegalizeAction
3613 /// that indicates how instruction selection should deal with the load /
3614 /// store / maskedload / maskedstore.
3615 ///
3616 /// The first dimension is the value_type for the reference. The second
3617 /// dimension represents the various modes for load store.
3619
3620 /// For each condition code (ISD::CondCode) keep a LegalizeAction that
3621 /// indicates how instruction selection should deal with the condition code.
3622 ///
3623 /// Because each CC action takes up 4 bits, we need to have the array size be
3624 /// large enough to fit all of the value types. This can be done by rounding
3625 /// up the MVT::VALUETYPE_SIZE value to the next multiple of 8.
3626 uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::VALUETYPE_SIZE + 7) / 8];
3627
3628 ValueTypeActionImpl ValueTypeActions;
3629
3630private:
3631 /// Targets can specify ISD nodes that they would like PerformDAGCombine
3632 /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
3633 /// array.
3634 unsigned char
3635 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
3636
3637 /// For operations that must be promoted to a specific type, this holds the
3638 /// destination type. This map should be sparse, so don't hold it as an
3639 /// array.
3640 ///
3641 /// Targets add entries to this map with AddPromotedToType(..), clients access
3642 /// this with getTypeToPromoteTo(..).
3643 std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
3644 PromoteToType;
3645
3646 /// The list of libcalls that the target will use.
3647 RTLIB::RuntimeLibcallsInfo Libcalls;
3648
3649 /// The ISD::CondCode that should be used to test the result of each of the
3650 /// comparison libcall against zero.
3651 ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
3652
3653 /// The bits of IndexedModeActions used to store the legalisation actions
3654 /// We store the data as | ML | MS | L | S | each taking 4 bits.
3655 enum IndexedModeActionsBits {
3656 IMAB_Store = 0,
3657 IMAB_Load = 4,
3658 IMAB_MaskedStore = 8,
3659 IMAB_MaskedLoad = 12
3660 };
3661
3662 void setIndexedModeAction(unsigned IdxMode, MVT VT, unsigned Shift,
3663 LegalizeAction Action) {
3664 assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
3665 (unsigned)Action < 0xf && "Table isn't big enough!");
3666 unsigned Ty = (unsigned)VT.SimpleTy;
3667 IndexedModeActions[Ty][IdxMode] &= ~(0xf << Shift);
3668 IndexedModeActions[Ty][IdxMode] |= ((uint16_t)Action) << Shift;
3669 }
3670
3671 LegalizeAction getIndexedModeAction(unsigned IdxMode, MVT VT,
3672 unsigned Shift) const {
3673 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
3674 "Table isn't big enough!");
3675 unsigned Ty = (unsigned)VT.SimpleTy;
3676 return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] >> Shift) & 0xf);
3677 }
3678
3679protected:
3680 /// Return true if the extension represented by \p I is free.
3681 /// \pre \p I is a sign, zero, or fp extension and
3682 /// is[Z|FP]ExtFree of the related types is not true.
3683 virtual bool isExtFreeImpl(const Instruction *I) const { return false; }
3684
3685 /// Depth that GatherAllAliases should continue looking for chain
3686 /// dependencies when trying to find a more preferable chain. As an
3687 /// approximation, this should be more than the number of consecutive stores
3688 /// expected to be merged.
3690
3691 /// \brief Specify maximum number of store instructions per memset call.
3692 ///
3693 /// When lowering \@llvm.memset this field specifies the maximum number of
3694 /// store operations that may be substituted for the call to memset. Targets
3695 /// must set this value based on the cost threshold for that target. Targets
3696 /// should assume that the memset will be done using as many of the largest
3697 /// store operations first, followed by smaller ones, if necessary, per
3698 /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
3699 /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
3700 /// store. This only applies to setting a constant array of a constant size.
3702 /// Likewise for functions with the OptSize attribute.
3704
3705 /// \brief Specify maximum number of store instructions per memcpy call.
3706 ///
3707 /// When lowering \@llvm.memcpy this field specifies the maximum number of
3708 /// store operations that may be substituted for a call to memcpy. Targets
3709 /// must set this value based on the cost threshold for that target. Targets
3710 /// should assume that the memcpy will be done using as many of the largest
3711 /// store operations first, followed by smaller ones, if necessary, per
3712 /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
3713 /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
3714 /// and one 1-byte store. This only applies to copying a constant array of
3715 /// constant size.
3717 /// Likewise for functions with the OptSize attribute.
3719 /// \brief Specify max number of store instructions to glue in inlined memcpy.
3720 ///
3721 /// When memcpy is inlined based on MaxStoresPerMemcpy, specify maximum number
3722 /// of store instructions to keep together. This helps in pairing and
3723 // vectorization later on.
3725
3726 /// \brief Specify maximum number of load instructions per memcmp call.
3727 ///
3728 /// When lowering \@llvm.memcmp this field specifies the maximum number of
3729 /// pairs of load operations that may be substituted for a call to memcmp.
3730 /// Targets must set this value based on the cost threshold for that target.
3731 /// Targets should assume that the memcmp will be done using as many of the
3732 /// largest load operations first, followed by smaller ones, if necessary, per
3733 /// alignment restrictions. For example, loading 7 bytes on a 32-bit machine
3734 /// with 32-bit alignment would result in one 4-byte load, a one 2-byte load
3735 /// and one 1-byte load. This only applies to copying a constant array of
3736 /// constant size.
3738 /// Likewise for functions with the OptSize attribute.
3740
3741 /// \brief Specify maximum number of store instructions per memmove call.
3742 ///
3743 /// When lowering \@llvm.memmove this field specifies the maximum number of
3744 /// store instructions that may be substituted for a call to memmove. Targets
3745 /// must set this value based on the cost threshold for that target. Targets
3746 /// should assume that the memmove will be done using as many of the largest
3747 /// store operations first, followed by smaller ones, if necessary, per
3748 /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
3749 /// with 8-bit alignment would result in nine 1-byte stores. This only
3750 /// applies to copying a constant array of constant size.
3752 /// Likewise for functions with the OptSize attribute.
3754
3755 /// Tells the code generator that select is more expensive than a branch if
3756 /// the branch is usually predicted right.
3758
3759 /// \see enableExtLdPromotion.
3761
3762 /// Return true if the value types that can be represented by the specified
3763 /// register class are all legal.
3764 bool isLegalRC(const TargetRegisterInfo &TRI,
3765 const TargetRegisterClass &RC) const;
3766
3767 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
3768 /// sequence of memory operands that is recognized by PrologEpilogInserter.
3770 MachineBasicBlock *MBB) const;
3771
3773};
3774
3775/// This class defines information used to lower LLVM code to legal SelectionDAG
3776/// operators that the target instruction selector can accept natively.
3777///
3778/// This class also defines callbacks that targets must implement to lower
3779/// target-specific constructs to SelectionDAG operators.
3781public:
3782 struct DAGCombinerInfo;
3783 struct MakeLibCallOptions;
3784
3787
3788 explicit TargetLowering(const TargetMachine &TM);
3789
3790 bool isPositionIndependent() const;
3791
3794 UniformityInfo *UA) const {
3795 return false;
3796 }
3797
3798 // Lets target to control the following reassociation of operands: (op (op x,
3799 // c1), y) -> (op (op x, y), c1) where N0 is (op x, c1) and N1 is y. By
3800 // default consider profitable any case where N0 has single use. This
3801 // behavior reflects the condition replaced by this target hook call in the
3802 // DAGCombiner. Any particular target can implement its own heuristic to
3803 // restrict common combiner.
3805 SDValue N1) const {
3806 return N0.hasOneUse();
3807 }
3808
3809 // Lets target to control the following reassociation of operands: (op (op x,
3810 // c1), y) -> (op (op x, y), c1) where N0 is (op x, c1) and N1 is y. By
3811 // default consider profitable any case where N0 has single use. This
3812 // behavior reflects the condition replaced by this target hook call in the
3813 // combiner. Any particular target can implement its own heuristic to
3814 // restrict common combiner.
3816 Register N1) const {
3817 return MRI.hasOneNonDBGUse(N0);
3818 }
3819
3820 virtual bool isSDNodeAlwaysUniform(const SDNode * N) const {
3821 return false;
3822 }
3823
3824 /// Returns true by value, base pointer and offset pointer and addressing mode
3825 /// by reference if the node's address can be legally represented as
3826 /// pre-indexed load / store address.
3827 virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
3828 SDValue &/*Offset*/,
3829 ISD::MemIndexedMode &/*AM*/,
3830 SelectionDAG &/*DAG*/) const {
3831 return false;
3832 }
3833
3834 /// Returns true by value, base pointer and offset pointer and addressing mode
3835 /// by reference if this node can be combined with a load / store to form a
3836 /// post-indexed load / store.
3837 virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
3838 SDValue &/*Base*/,
3839 SDValue &/*Offset*/,
3840 ISD::MemIndexedMode &/*AM*/,
3841 SelectionDAG &/*DAG*/) const {
3842 return false;
3843 }
3844
3845 /// Returns true if the specified base+offset is a legal indexed addressing
3846 /// mode for this target. \p MI is the load or store instruction that is being
3847 /// considered for transformation.
3849 bool IsPre, MachineRegisterInfo &MRI) const {
3850 return false;
3851 }
3852
3853 /// Return the entry encoding for a jump table in the current function. The
3854 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
3855 virtual unsigned getJumpTableEncoding() const;
3856
3857 virtual MVT getJumpTableRegTy(const DataLayout &DL) const {
3858 return getPointerTy(DL);
3859 }
3860
3861 virtual const MCExpr *
3863 const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
3864 MCContext &/*Ctx*/) const {
3865 llvm_unreachable("Need to implement this hook if target has custom JTIs");
3866 }
3867
3868 /// Returns relocation base for the given PIC jumptable.
3870 SelectionDAG &DAG) const;
3871
3872 /// This returns the relocation base for the given PIC jumptable, the same as
3873 /// getPICJumpTableRelocBase, but as an MCExpr.
3874 virtual const MCExpr *
3876 unsigned JTI, MCContext &Ctx) const;
3877
3878 /// Return true if folding a constant offset with the given GlobalAddress is
3879 /// legal. It is frequently not legal in PIC relocation models.
3880 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
3881
3882 /// On x86, return true if the operand with index OpNo is a CALL or JUMP
3883 /// instruction, which can use either a memory constraint or an address
3884 /// constraint. -fasm-blocks "__asm call foo" lowers to
3885 /// call void asm sideeffect inteldialect "call ${0:P}", "*m..."
3886 ///
3887 /// This function is used by a hack to choose the address constraint,
3888 /// lowering to a direct call.
3889 virtual bool
3891 unsigned OpNo) const {
3892 return false;
3893 }
3894
3896 SDValue &Chain) const;
3897
3898 void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
3899 SDValue &NewRHS, ISD::CondCode &CCCode,
3900 const SDLoc &DL, const SDValue OldLHS,
3901 const SDValue OldRHS) const;
3902
3903 void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS,
3904 SDValue &NewRHS, ISD::CondCode &CCCode,
3905 const SDLoc &DL, const SDValue OldLHS,
3906 const SDValue OldRHS, SDValue &Chain,
3907 bool IsSignaling = false) const;
3908
3910 SDValue Chain, MachineMemOperand *MMO,
3911 SDValue &NewLoad, SDValue Ptr,
3912 SDValue PassThru, SDValue Mask) const {
3913 llvm_unreachable("Not Implemented");
3914 }
3915
3917 SDValue Chain, MachineMemOperand *MMO,
3918 SDValue Ptr, SDValue Val,
3919 SDValue Mask) const {
3920 llvm_unreachable("Not Implemented");
3921 }
3922
3923 /// Returns a pair of (return value, chain).
3924 /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC.
3925 std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
3926 EVT RetVT, ArrayRef<SDValue> Ops,
3927 MakeLibCallOptions CallOptions,
3928 const SDLoc &dl,
3929 SDValue Chain = SDValue()) const;
3930
3931 /// Check whether parameters to a call that are passed in callee saved
3932 /// registers are the same as from the calling function. This needs to be
3933 /// checked for tail call eligibility.
3935 const uint32_t *CallerPreservedMask,
3936 const SmallVectorImpl<CCValAssign> &ArgLocs,
3937 const SmallVectorImpl<SDValue> &OutVals) const;
3938
3939 //===--------------------------------------------------------------------===//
3940 // TargetLowering Optimization Methods
3941 //
3942
3943 /// A convenience struct that encapsulates a DAG, and two SDValues for
3944 /// returning information from TargetLowering to its clients that want to
3945 /// combine.
3952
3954 bool LT, bool LO) :
3955 DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
3956
3957 bool LegalTypes() const { return LegalTys; }
3958 bool LegalOperations() const { return LegalOps; }
3959
3961 Old = O;
3962 New = N;
3963 return true;
3964 }
3965 };
3966
3967 /// Determines the optimal series of memory ops to replace the memset / memcpy.
3968 /// Return true if the number of memory ops is below the threshold (Limit).
3969 /// Note that this is always the case when Limit is ~0.
3970 /// It returns the types of the sequence of memory ops to perform
3971 /// memset / memcpy by reference.
3972 virtual bool
3973 findOptimalMemOpLowering(std::vector<EVT> &MemOps, unsigned Limit,
3974 const MemOp &Op, unsigned DstAS, unsigned SrcAS,
3975 const AttributeList &FuncAttributes) const;
3976
3977 /// Check to see if the specified operand of the specified instruction is a
3978 /// constant integer. If so, check to see if there are any bits set in the
3979 /// constant that are not demanded. If so, shrink the constant and return
3980 /// true.
3982 const APInt &DemandedElts,
3983 TargetLoweringOpt &TLO) const;
3984
3985 /// Helper wrapper around ShrinkDemandedConstant, demanding all elements.
3987 TargetLoweringOpt &TLO) const;
3988
3989 // Target hook to do target-specific const optimization, which is called by
3990 // ShrinkDemandedConstant. This function should return true if the target
3991 // doesn't want ShrinkDemandedConstant to further optimize the constant.
3993 const APInt &DemandedBits,
3994 const APInt &DemandedElts,
3995 TargetLoweringOpt &TLO) const {
3996 return false;
3997 }
3998
3999 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
4000 /// This uses isTruncateFree/isZExtFree and ANY_EXTEND for the widening cast,
4001 /// but it could be generalized for targets with other types of implicit
4002 /// widening casts.
4003 bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth,
4004 const APInt &DemandedBits,
4005 TargetLoweringOpt &TLO) const;
4006
4007 /// Look at Op. At this point, we know that only the DemandedBits bits of the
4008 /// result of Op are ever used downstream. If we can use this information to
4009 /// simplify Op, create a new simplified DAG node and return true, returning
4010 /// the original and new nodes in Old and New. Otherwise, analyze the
4011 /// expression and return a mask of KnownOne and KnownZero bits for the
4012 /// expression (used to simplify the caller). The KnownZero/One bits may only
4013 /// be accurate for those bits in the Demanded masks.
4014 /// \p AssumeSingleUse When this parameter is true, this function will
4015 /// attempt to simplify \p Op even if there are multiple uses.
4016 /// Callers are responsible for correctly updating the DAG based on the
4017 /// results of this function, because simply replacing TLO.Old
4018 /// with TLO.New will be incorrect when this parameter is true and TLO.Old
4019 /// has multiple uses.
4021 const APInt &DemandedElts, KnownBits &Known,
4022 TargetLoweringOpt &TLO, unsigned Depth = 0,
4023 bool AssumeSingleUse = false) const;
4024
4025 /// Helper wrapper around SimplifyDemandedBits, demanding all elements.
4026 /// Adds Op back to the worklist upon success.
4028 KnownBits &Known, TargetLoweringOpt &TLO,
4029 unsigned Depth = 0,
4030 bool AssumeSingleUse = false) const;
4031
4032 /// Helper wrapper around SimplifyDemandedBits.
4033 /// Adds Op back to the worklist upon success.
4035 DAGCombinerInfo &DCI) const;
4036
4037 /// Helper wrapper around SimplifyDemandedBits.
4038 /// Adds Op back to the worklist upon success.
4040 const APInt &DemandedElts,
4041 DAGCombinerInfo &DCI) const;
4042
4043 /// More limited version of SimplifyDemandedBits that can be used to "look
4044 /// through" ops that don't contribute to the DemandedBits/DemandedElts -
4045 /// bitwise ops etc.
4047 const APInt &DemandedElts,
4048 SelectionDAG &DAG,
4049 unsigned Depth = 0) const;
4050
4051 /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all
4052 /// elements.
4054 SelectionDAG &DAG,
4055 unsigned Depth = 0) const;
4056
4057 /// Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all
4058 /// bits from only some vector elements.
4060 const APInt &DemandedElts,
4061 SelectionDAG &DAG,
4062 unsigned Depth = 0) const;
4063
4064 /// Look at Vector Op. At this point, we know that only the DemandedElts
4065 /// elements of the result of Op are ever used downstream. If we can use
4066 /// this information to simplify Op, create a new simplified DAG node and
4067 /// return true, storing the original and new nodes in TLO.
4068 /// Otherwise, analyze the expression and return a mask of KnownUndef and
4069 /// KnownZero elements for the expression (used to simplify the caller).
4070 /// The KnownUndef/Zero elements may only be accurate for those bits
4071 /// in the DemandedMask.
4072 /// \p AssumeSingleUse When this parameter is true, this function will
4073 /// attempt to simplify \p Op even if there are multiple uses.
4074 /// Callers are responsible for correctly updating the DAG based on the
4075 /// results of this function, because simply replacing TLO.Old
4076 /// with TLO.New will be incorrect when this parameter is true and TLO.Old
4077 /// has multiple uses.
4078 bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask,
4079 APInt &KnownUndef, APInt &KnownZero,
4080 TargetLoweringOpt &TLO, unsigned Depth = 0,
4081 bool AssumeSingleUse = false) const;
4082
4083 /// Helper wrapper around SimplifyDemandedVectorElts.
4084 /// Adds Op back to the worklist upon success.
4085 bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedElts,
4086 DAGCombinerInfo &DCI) const;
4087
4088 /// Return true if the target supports simplifying demanded vector elements by
4089 /// converting them to undefs.
4090 virtual bool
4092 const TargetLoweringOpt &TLO) const {
4093 return true;
4094 }
4095
4096 /// Determine which of the bits specified in Mask are known to be either zero
4097 /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
4098 /// argument allows us to only collect the known bits that are shared by the
4099 /// requested vector elements.
4100 virtual void computeKnownBitsForTargetNode(const SDValue Op,
4101 KnownBits &Known,
4102 const APInt &DemandedElts,
4103 const SelectionDAG &DAG,
4104 unsigned Depth = 0) const;
4105
4106 /// Determine which of the bits specified in Mask are known to be either zero
4107 /// or one and return them in the KnownZero/KnownOne bitsets. The DemandedElts
4108 /// argument allows us to only collect the known bits that are shared by the
4109 /// requested vector elements. This is for GISel.
4111 Register R, KnownBits &Known,
4112 const APInt &DemandedElts,
4113 const MachineRegisterInfo &MRI,
4114 unsigned Depth = 0) const;
4115
4116 /// Determine the known alignment for the pointer value \p R. This is can
4117 /// typically be inferred from the number of low known 0 bits. However, for a
4118 /// pointer with a non-integral address space, the alignment value may be
4119 /// independent from the known low bits.
4121 Register R,
4122 const MachineRegisterInfo &MRI,
4123 unsigned Depth = 0) const;
4124
4125 /// Determine which of the bits of FrameIndex \p FIOp are known to be 0.
4126 /// Default implementation computes low bits based on alignment
4127 /// information. This should preserve known bits passed into it.
4128 virtual void computeKnownBitsForFrameIndex(int FIOp,
4129 KnownBits &Known,
4130 const MachineFunction &MF) const;
4131
4132 /// This method can be implemented by targets that want to expose additional
4133 /// information about sign bits to the DAG Combiner. The DemandedElts
4134 /// argument allows us to only collect the minimum sign bits that are shared
4135 /// by the requested vector elements.
4137 const APInt &DemandedElts,
4138 const SelectionDAG &DAG,
4139 unsigned Depth = 0) const;
4140
4141 /// This method can be implemented by targets that want to expose additional
4142 /// information about sign bits to GlobalISel combiners. The DemandedElts
4143 /// argument allows us to only collect the minimum sign bits that are shared
4144 /// by the requested vector elements.
4146 Register R,
4147 const APInt &DemandedElts,
4148 const MachineRegisterInfo &MRI,
4149 unsigned Depth = 0) const;
4150
4151 /// Attempt to simplify any target nodes based on the demanded vector
4152 /// elements, returning true on success. Otherwise, analyze the expression and
4153 /// return a mask of KnownUndef and KnownZero elements for the expression
4154 /// (used to simplify the caller). The KnownUndef/Zero elements may only be
4155 /// accurate for those bits in the DemandedMask.
4157 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef,
4158 APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth = 0) const;
4159
4160 /// Attempt to simplify any target nodes based on the demanded bits/elts,
4161 /// returning true on success. Otherwise, analyze the
4162 /// expression and return a mask of KnownOne and KnownZero bits for the
4163 /// expression (used to simplify the caller). The KnownZero/One bits may only
4164 /// be accurate for those bits in the Demanded masks.
4166 const APInt &DemandedBits,
4167 const APInt &DemandedElts,
4168 KnownBits &Known,
4169 TargetLoweringOpt &TLO,
4170 unsigned Depth = 0) const;
4171
4172 /// More limited version of SimplifyDemandedBits that can be used to "look
4173 /// through" ops that don't contribute to the DemandedBits/DemandedElts -
4174 /// bitwise ops etc.
4176 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
4177 SelectionDAG &DAG, unsigned Depth) const;
4178
4179 /// Return true if this function can prove that \p Op is never poison
4180 /// and, if \p PoisonOnly is false, does not have undef bits. The DemandedElts
4181 /// argument limits the check to the requested vector elements.
4183 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
4184 bool PoisonOnly, unsigned Depth) const;
4185
4186 /// Return true if Op can create undef or poison from non-undef & non-poison
4187 /// operands. The DemandedElts argument limits the check to the requested
4188 /// vector elements.
4189 virtual bool
4191 const SelectionDAG &DAG, bool PoisonOnly,
4192 bool ConsiderFlags, unsigned Depth) const;
4193
4194 /// Tries to build a legal vector shuffle using the provided parameters
4195 /// or equivalent variations. The Mask argument maybe be modified as the
4196 /// function tries different variations.
4197 /// Returns an empty SDValue if the operation fails.
4200 SelectionDAG &DAG) const;
4201
4202 /// This method returns the constant pool value that will be loaded by LD.
4203 /// NOTE: You must check for implicit extensions of the constant by LD.
4204 virtual const Constant *getTargetConstantFromLoad(LoadSDNode *LD) const;
4205
4206 /// If \p SNaN is false, \returns true if \p Op is known to never be any
4207 /// NaN. If \p sNaN is true, returns if \p Op is known to never be a signaling
4208 /// NaN.
4210 const SelectionDAG &DAG,
4211 bool SNaN = false,
4212 unsigned Depth = 0) const;
4213
4214 /// Return true if vector \p Op has the same value across all \p DemandedElts,
4215 /// indicating any elements which may be undef in the output \p UndefElts.
4216 virtual bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts,
4217 APInt &UndefElts,
4218 const SelectionDAG &DAG,
4219 unsigned Depth = 0) const;
4220
4221 /// Returns true if the given Opc is considered a canonical constant for the
4222 /// target, which should not be transformed back into a BUILD_VECTOR.
4224 return Op.getOpcode() == ISD::SPLAT_VECTOR ||
4225 Op.getOpcode() == ISD::SPLAT_VECTOR_PARTS;
4226 }
4227
4229 void *DC; // The DAG Combiner object.
4232
4233 public:
4235
4236 DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc)
4237 : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
4238
4239 bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
4240 bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; }
4241 bool isAfterLegalizeDAG() const { return Level >= AfterLegalizeDAG; }
4244
4245 void AddToWorklist(SDNode *N);
4246 SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo = true);
4247 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
4248 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
4249
4251
4253 };
4254
4255 /// Return if the N is a constant or constant vector equal to the true value
4256 /// from getBooleanContents().
4257 bool isConstTrueVal(SDValue N) const;
4258
4259 /// Return if the N is a constant or constant vector equal to the false value
4260 /// from getBooleanContents().
4261 bool isConstFalseVal(SDValue N) const;
4262
4263 /// Return if \p N is a True value when extended to \p VT.
4264 bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const;
4265
4266 /// Try to simplify a setcc built with the specified operands and cc. If it is
4267 /// unable to simplify it, return a null SDValue.
4269 bool foldBooleans, DAGCombinerInfo &DCI,
4270 const SDLoc &dl) const;
4271
4272 // For targets which wrap address, unwrap for analysis.
4273 virtual SDValue unwrapAddress(SDValue N) const { return N; }
4274
4275 /// Returns true (and the GlobalValue and the offset) if the node is a
4276 /// GlobalAddress + offset.
4277 virtual bool
4278 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
4279
4280 /// This method will be invoked for all target nodes and for any
4281 /// target-independent nodes that the target has registered with invoke it
4282 /// for.
4283 ///
4284 /// The semantics are as follows:
4285 /// Return Value:
4286 /// SDValue.Val == 0 - No change was made
4287 /// SDValue.Val == N - N was replaced, is dead, and is already handled.
4288 /// otherwise - N should be replaced by the returned Operand.
4289 ///
4290 /// In addition, methods provided by DAGCombinerInfo may be used to perform
4291 /// more complex transformations.
4292 ///
4293 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
4294
4295 /// Return true if it is profitable to move this shift by a constant amount
4296 /// through its operand, adjusting any immediate operands as necessary to
4297 /// preserve semantics. This transformation may not be desirable if it
4298 /// disrupts a particularly auspicious target-specific tree (e.g. bitfield
4299 /// extraction in AArch64). By default, it returns true.
4300 ///
4301 /// @param N the shift node
4302 /// @param Level the current DAGCombine legalization level.
4304 CombineLevel Level) const {
4305 SDValue ShiftLHS = N->getOperand(0);
4306 if (!ShiftLHS->hasOneUse())
4307 return false;
4308 if (ShiftLHS.getOpcode() == ISD::SIGN_EXTEND &&
4309 !ShiftLHS.getOperand(0)->hasOneUse())
4310 return false;
4311 return true;
4312 }
4313
4314 /// GlobalISel - return true if it is profitable to move this shift by a
4315 /// constant amount through its operand, adjusting any immediate operands as
4316 /// necessary to preserve semantics. This transformation may not be desirable
4317 /// if it disrupts a particularly auspicious target-specific tree (e.g.
4318 /// bitfield extraction in AArch64). By default, it returns true.
4319 ///
4320 /// @param MI the shift instruction
4321 /// @param IsAfterLegal true if running after legalization.
4323 bool IsAfterLegal) const {
4324 return true;
4325 }
4326
4327 /// GlobalISel - return true if it's profitable to perform the combine:
4328 /// shl ([sza]ext x), y => zext (shl x, y)
4329 virtual bool isDesirableToPullExtFromShl(const MachineInstr &MI) const {
4330 return true;
4331 }
4332
4333 // Return AndOrSETCCFoldKind::{AddAnd, ABS} if its desirable to try and
4334 // optimize LogicOp(SETCC0, SETCC1). An example (what is implemented as of
4335 // writing this) is:
4336 // With C as a power of 2 and C != 0 and C != INT_MIN:
4337 // AddAnd:
4338 // (icmp eq A, C) | (icmp eq A, -C)
4339 // -> (icmp eq and(add(A, C), ~(C + C)), 0)
4340 // (icmp ne A, C) & (icmp ne A, -C)w
4341 // -> (icmp ne and(add(A, C), ~(C + C)), 0)
4342 // ABS:
4343 // (icmp eq A, C) | (icmp eq A, -C)
4344 // -> (icmp eq Abs(A), C)
4345 // (icmp ne A, C) & (icmp ne A, -C)w
4346 // -> (icmp ne Abs(A), C)
4347 //
4348 // @param LogicOp the logic op
4349 // @param SETCC0 the first of the SETCC nodes
4350 // @param SETCC0 the second of the SETCC nodes
4352 const SDNode *LogicOp, const SDNode *SETCC0, const SDNode *SETCC1) const {
4354 }
4355
4356 /// Return true if it is profitable to combine an XOR of a logical shift
4357 /// to create a logical shift of NOT. This transformation may not be desirable
4358 /// if it disrupts a particularly auspicious target-specific tree (e.g.
4359 /// BIC on ARM/AArch64). By default, it returns true.
4360 virtual bool isDesirableToCommuteXorWithShift(const SDNode *N) const {
4361 return true;
4362 }
4363
4364 /// Return true if the target has native support for the specified value type
4365 /// and it is 'desirable' to use the type for the given node type. e.g. On x86
4366 /// i16 is legal, but undesirable since i16 instruction encodings are longer
4367 /// and some i16 instructions are slow.
4368 virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
4369 // By default, assume all legal types are desirable.
4370 return isTypeLegal(VT);
4371 }
4372
4373 /// Return true if it is profitable for dag combiner to transform a floating
4374 /// point op of specified opcode to a equivalent op of an integer
4375 /// type. e.g. f32 load -> i32 load can be profitable on ARM.
4376 virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
4377 EVT /*VT*/) const {
4378 return false;
4379 }
4380
4381 /// This method query the target whether it is beneficial for dag combiner to
4382 /// promote the specified node. If true, it should return the desired
4383 /// promotion type by reference.
4384 virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
4385 return false;
4386 }
4387
4388 /// Return true if the target supports swifterror attribute. It optimizes
4389 /// loads and stores to reading and writing a specific register.
4390 virtual bool supportSwiftError() const {
4391 return false;
4392 }
4393
4394 /// Return true if the target supports that a subset of CSRs for the given
4395 /// machine function is handled explicitly via copies.
4396 virtual bool supportSplitCSR(MachineFunction *MF) const {
4397 return false;
4398 }
4399
4400 /// Return true if the target supports kcfi operand bundles.
4401 virtual bool supportKCFIBundles() const { return false; }
4402
4403 /// Return true if the target supports ptrauth operand bundles.
4404 virtual bool supportPtrAuthBundles() const { return false; }
4405
4406 /// Perform necessary initialization to handle a subset of CSRs explicitly
4407 /// via copies. This function is called at the beginning of instruction
4408 /// selection.
4409 virtual void initializeSplitCSR(MachineBasicBlock *Entry) const {
4410 llvm_unreachable("Not Implemented");
4411 }
4412
4413 /// Insert explicit copies in entry and exit blocks. We copy a subset of
4414 /// CSRs to virtual registers in the entry block, and copy them back to
4415 /// physical registers in the exit blocks. This function is called at the end
4416 /// of instruction selection.
4418 MachineBasicBlock *Entry,
4419 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
4420 llvm_unreachable("Not Implemented");
4421 }
4422
4423 /// Return the newly negated expression if the cost is not expensive and
4424 /// set the cost in \p Cost to indicate that if it is cheaper or neutral to
4425 /// do the negation.
4427 bool LegalOps, bool OptForSize,
4429 unsigned Depth = 0) const;
4430
4432 SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize,
4434 unsigned Depth = 0) const {
4436 SDValue Neg =
4437 getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth);
4438 if (!Neg)
4439 return SDValue();
4440
4441 if (Cost <= CostThreshold)
4442 return Neg;
4443
4444 // Remove the new created node to avoid the side effect to the DAG.
4445 if (Neg->use_empty())
4446 DAG.RemoveDeadNode(Neg.getNode());
4447 return SDValue();
4448 }
4449
4450 /// This is the helper function to return the newly negated expression only
4451 /// when the cost is cheaper.
4453 bool LegalOps, bool OptForSize,
4454 unsigned Depth = 0) const {
4455 return getCheaperOrNeutralNegatedExpression(Op, DAG, LegalOps, OptForSize,
4457 }
4458
4459 /// This is the helper function to return the newly negated expression if
4460 /// the cost is not expensive.
4462 bool OptForSize, unsigned Depth = 0) const {
4464 return getNegatedExpression(Op, DAG, LegalOps, OptForSize, Cost, Depth);
4465 }
4466
4467 //===--------------------------------------------------------------------===//
4468 // Lowering methods - These methods must be implemented by targets so that
4469 // the SelectionDAGBuilder code knows how to lower these.
4470 //
4471
4472 /// Target-specific splitting of values into parts that fit a register
4473 /// storing a legal type
4475 SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
4476 unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const {
4477 return false;
4478 }
4479
4480 /// Allows the target to handle physreg-carried dependency
4481 /// in target-specific way. Used from the ScheduleDAGSDNodes to decide whether
4482 /// to add the edge to the dependency graph.
4483 /// Def - input: Selection DAG node defininfg physical register
4484 /// User - input: Selection DAG node using physical register
4485 /// Op - input: Number of User operand
4486 /// PhysReg - inout: set to the physical register if the edge is
4487 /// necessary, unchanged otherwise
4488 /// Cost - inout: physical register copy cost.
4489 /// Returns 'true' is the edge is necessary, 'false' otherwise
4490 virtual bool checkForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op,
4491 const TargetRegisterInfo *TRI,
4492 const TargetInstrInfo *TII,
4493 unsigned &PhysReg, int &Cost) const {
4494 return false;
4495 }
4496
4497 /// Target-specific combining of register parts into its original value
4498 virtual SDValue
4500 const SDValue *Parts, unsigned NumParts,
4501 MVT PartVT, EVT ValueVT,
4502 std::optional<CallingConv::ID> CC) const {
4503 return SDValue();
4504 }
4505
4506 /// This hook must be implemented to lower the incoming (formal) arguments,
4507 /// described by the Ins array, into the specified DAG. The implementation
4508 /// should fill in the InVals array with legal-type argument values, and
4509 /// return the resulting token chain value.
4511 SDValue /*Chain*/, CallingConv::ID /*CallConv*/, bool /*isVarArg*/,
4512 const SmallVectorImpl<ISD::InputArg> & /*Ins*/, const SDLoc & /*dl*/,
4513 SelectionDAG & /*DAG*/, SmallVectorImpl<SDValue> & /*InVals*/) const {
4514 llvm_unreachable("Not Implemented");
4515 }
4516
4517 /// This structure contains the information necessary for lowering
4518 /// pointer-authenticating indirect calls. It is equivalent to the "ptrauth"
4519 /// operand bundle found on the call instruction, if any.
4523 };
4524
4525 /// This structure contains all information that is necessary for lowering
4526 /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder
4527 /// needs to lower a call, and targets will see this struct in their LowerCall
4528 /// implementation.
4531 Type *RetTy = nullptr;
4532 bool RetSExt : 1;
4533 bool RetZExt : 1;
4534 bool IsVarArg : 1;
4535 bool IsInReg : 1;
4541 bool NoMerge : 1;
4542
4543 // IsTailCall should be modified by implementations of
4544 // TargetLowering::LowerCall that perform tail call conversions.
4545 bool IsTailCall = false;
4546
4547 // Is Call lowering done post SelectionDAG type legalization.
4549
4550 unsigned NumFixedArgs = -1;
4556 const CallBase *CB = nullptr;
4561 const ConstantInt *CFIType = nullptr;
4563
4564 std::optional<PtrAuthInfo> PAI;
4565
4570 DAG(DAG) {}
4571
4573 DL = dl;
4574 return *this;
4575 }
4576
4578 Chain = InChain;
4579 return *this;
4580 }
4581
4582 // setCallee with target/module-specific attributes
4584 SDValue Target, ArgListTy &&ArgsList) {
4585 RetTy = ResultType;
4586 Callee = Target;
4587 CallConv = CC;
4588 NumFixedArgs = ArgsList.size();
4589 Args = std::move(ArgsList);
4590
4592 &(DAG.getMachineFunction()), CC, Args);
4593 return *this;
4594 }
4595
4597 SDValue Target, ArgListTy &&ArgsList,
4598 AttributeSet ResultAttrs = {}) {
4599 RetTy = ResultType;
4600 IsInReg = ResultAttrs.hasAttribute(Attribute::InReg);
4601 RetSExt = ResultAttrs.hasAttribute(Attribute::SExt);
4602 RetZExt = ResultAttrs.hasAttribute(Attribute::ZExt);
4603 NoMerge = ResultAttrs.hasAttribute(Attribute::NoMerge);
4604
4605 Callee = Target;
4606 CallConv = CC;
4607 NumFixedArgs = ArgsList.size();
4608 Args = std::move(ArgsList);
4609 return *this;
4610 }
4611
4613 SDValue Target, ArgListTy &&ArgsList,
4614 const CallBase &Call) {
4615 RetTy = ResultType;
4616
4617 IsInReg = Call.hasRetAttr(Attribute::InReg);
4619 Call.doesNotReturn() ||
4620 (!isa<InvokeInst>(Call) && isa<UnreachableInst>(Call.getNextNode()));
4621 IsVarArg = FTy->isVarArg();
4622 IsReturnValueUsed = !Call.use_empty();
4623 RetSExt = Call.hasRetAttr(Attribute::SExt);
4624 RetZExt = Call.hasRetAttr(Attribute::ZExt);
4625 NoMerge = Call.hasFnAttr(Attribute::NoMerge);
4626
4627 Callee = Target;
4628
4629 CallConv = Call.getCallingConv();
4630 NumFixedArgs = FTy->getNumParams();
4631 Args = std::move(ArgsList);
4632
4633 CB = &Call;
4634
4635 return *this;
4636 }
4637
4639 IsInReg = Value;
4640 return *this;
4641 }
4642
4645 return *this;
4646 }
4647
4649 IsVarArg = Value;
4650 return *this;
4651 }
4652
4654 IsTailCall = Value;
4655 return *this;
4656 }
4657
4660 return *this;
4661 }
4662
4665 return *this;
4666 }
4667
4669 RetSExt = Value;
4670 return *this;
4671 }
4672
4674 RetZExt = Value;
4675 return *this;
4676 }
4677
4680 return *this;
4681 }
4682
4685 return *this;
4686 }
4687
4689 PAI = Value;
4690 return *this;
4691 }
4692
4695 return *this;
4696 }
4697
4699 CFIType = Type;
4700 return *this;
4701 }
4702
4705 return *this;
4706 }
4707
4709 return Args;
4710 }
4711 };
4712
4713 /// This structure is used to pass arguments to makeLibCall function.
4715 // By passing type list before soften to makeLibCall, the target hook
4716 // shouldExtendTypeInLibCall can get the original type before soften.
4719 bool IsSigned : 1;
4723 bool IsSoften : 1;
4724
4728
4730 IsSigned = Value;
4731 return *this;
4732 }
4733
4736 return *this;
4737 }
4738
4741 return *this;
4742 }
4743
4746 return *this;
4747 }
4748
4750 bool Value = true) {
4751 OpsVTBeforeSoften = OpsVT;
4752 RetVTBeforeSoften = RetVT;
4753 IsSoften = Value;
4754 return *this;
4755 }
4756 };
4757
4758 /// This function lowers an abstract call to a function into an actual call.
4759 /// This returns a pair of operands. The first element is the return value
4760 /// for the function (if RetTy is not VoidTy). The second element is the
4761 /// outgoing token chain. It calls LowerCall to do the actual lowering.
4762 std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
4763
4764 /// This hook must be implemented to lower calls into the specified
4765 /// DAG. The outgoing arguments to the call are described by the Outs array,
4766 /// and the values to be returned by the call are described by the Ins
4767 /// array. The implementation should fill in the InVals array with legal-type
4768 /// return values from the call, and return the resulting token chain value.
4769 virtual SDValue
4771 SmallVectorImpl<SDValue> &/*InVals*/) const {
4772 llvm_unreachable("Not Implemented");
4773 }
4774
4775 /// Target-specific cleanup for formal ByVal parameters.
4776 virtual void HandleByVal(CCState *, unsigned &, Align) const {}
4777
4778 /// This hook should be implemented to check whether the return values
4779 /// described by the Outs array can fit into the return registers. If false
4780 /// is returned, an sret-demotion is performed.
4781 virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
4782 MachineFunction &/*MF*/, bool /*isVarArg*/,
4783 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
4784 LLVMContext &/*Context*/) const
4785 {
4786 // Return true by default to get preexisting behavior.
4787 return true;
4788 }
4789
4790 /// This hook must be implemented to lower outgoing return values, described
4791 /// by the Outs array, into the specified DAG. The implementation should
4792 /// return the resulting token chain value.
4793 virtual SDValue LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
4794 bool /*isVarArg*/,
4795 const SmallVectorImpl<ISD::OutputArg> & /*Outs*/,
4796 const SmallVectorImpl<SDValue> & /*OutVals*/,
4797 const SDLoc & /*dl*/,
4798 SelectionDAG & /*DAG*/) const {
4799 llvm_unreachable("Not Implemented");
4800 }
4801
4802 /// Return true if result of the specified node is used by a return node
4803 /// only. It also compute and return the input chain for the tail call.
4804 ///
4805 /// This is used to determine whether it is possible to codegen a libcall as
4806 /// tail call at legalization time.
4807 virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const {
4808 return false;
4809 }
4810
4811 /// Return true if the target may be able emit the call instruction as a tail
4812 /// call. This is used by optimization passes to determine if it's profitable
4813 /// to duplicate return instructions to enable tailcall optimization.
4814 virtual bool mayBeEmittedAsTailCall(const CallInst *) const {
4815 return false;
4816 }
4817
4818 /// Return the register ID of the name passed in. Used by named register
4819 /// global variables extension. There is no target-independent behaviour
4820 /// so the default action is to bail.
4821 virtual Register getRegisterByName(const char* RegName, LLT Ty,
4822 const MachineFunction &MF) const {
4823 report_fatal_error("Named registers not implemented for this target");
4824 }
4825
4826 /// Return the type that should be used to zero or sign extend a
4827 /// zeroext/signext integer return value. FIXME: Some C calling conventions
4828 /// require the return type to be promoted, but this is not true all the time,
4829 /// e.g. i1/i8/i16 on x86/x86_64. It is also not necessary for non-C calling
4830 /// conventions. The frontend should handle this and include all of the
4831 /// necessary information.
4833 ISD::NodeType /*ExtendKind*/) const {
4834 EVT MinVT = getRegisterType(MVT::i32);
4835 return VT.bitsLT(MinVT) ? MinVT : VT;
4836 }
4837
4838 /// For some targets, an LLVM struct type must be broken down into multiple
4839 /// simple types, but the calling convention specifies that the entire struct
4840 /// must be passed in a block of consecutive registers.
4841 virtual bool
4843 bool isVarArg,
4844 const DataLayout &DL) const {
4845 return false;
4846 }
4847
4848 /// For most targets, an LLVM type must be broken down into multiple
4849 /// smaller types. Usually the halves are ordered according to the endianness
4850 /// but for some platform that would break. So this method will default to
4851 /// matching the endianness but can be overridden.
4852 virtual bool
4854 return DL.isLittleEndian();
4855 }
4856
4857 /// Returns a 0 terminated array of registers that can be safely used as
4858 /// scratch registers.
4860 return nullptr;
4861 }
4862
4863 /// Returns a 0 terminated array of rounding control registers that can be
4864 /// attached into strict FP call.
4866 return ArrayRef<MCPhysReg>();
4867 }
4868
4869 /// This callback is used to prepare for a volatile or atomic load.
4870 /// It takes a chain node as input and returns the chain for the load itself.
4871 ///
4872 /// Having a callback like this is necessary for targets like SystemZ,
4873 /// which allows a CPU to reuse the result of a previous load indefinitely,
4874 /// even if a cache-coherent store is performed by another CPU. The default
4875 /// implementation does nothing.
4877 SelectionDAG &DAG) const {
4878 return Chain;
4879 }
4880
4881 /// This callback is invoked by the type legalizer to legalize nodes with an
4882 /// illegal operand type but legal result types. It replaces the
4883 /// LowerOperation callback in the type Legalizer. The reason we can not do
4884 /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to
4885 /// use this callback.
4886 ///
4887 /// TODO: Consider merging with ReplaceNodeResults.
4888 ///
4889 /// The target places new result values for the node in Results (their number
4890 /// and types must exactly match those of the original return values of
4891 /// the node), or leaves Results empty, which indicates that the node is not
4892 /// to be custom lowered after all.
4893 /// The default implementation calls LowerOperation.
4894 virtual void LowerOperationWrapper(SDNode *N,
4896 SelectionDAG &DAG) const;
4897
4898 /// This callback is invoked for operations that are unsupported by the
4899 /// target, which are registered to use 'custom' lowering, and whose defined
4900 /// values are all legal. If the target has no operations that require custom
4901 /// lowering, it need not implement this. The default implementation of this
4902 /// aborts.
4903 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
4904
4905 /// This callback is invoked when a node result type is illegal for the
4906 /// target, and the operation was registered to use 'custom' lowering for that
4907 /// result type. The target places new result values for the node in Results
4908 /// (their number and types must exactly match those of the original return
4909 /// values of the node), or leaves Results empty, which indicates that the
4910 /// node is not to be custom lowered after all.
4911 ///
4912 /// If the target has no operations that require custom lowering, it need not
4913 /// implement this. The default implementation aborts.
4914 virtual void ReplaceNodeResults(SDNode * /*N*/,
4915 SmallVectorImpl<SDValue> &/*Results*/,
4916 SelectionDAG &/*DAG*/) const {
4917 llvm_unreachable("ReplaceNodeResults not implemented for this target!");
4918 }
4919
4920 /// This method returns the name of a target specific DAG node.
4921 virtual const char *getTargetNodeName(unsigned Opcode) const;
4922
4923 /// This method returns a target specific FastISel object, or null if the
4924 /// target does not support "fast" ISel.
4926 const TargetLibraryInfo *) const {
4927 return nullptr;
4928 }
4929
4931 SelectionDAG &DAG) const;
4932
4933#ifndef NDEBUG
4934 /// Check the given SDNode. Aborts if it is invalid.
4935 virtual void verifyTargetSDNode(const SDNode *N) const {};
4936#endif
4937
4938 //===--------------------------------------------------------------------===//
4939 // Inline Asm Support hooks
4940 //
4941
4942 /// This hook allows the target to expand an inline asm call to be explicit
4943 /// llvm code if it wants to. This is useful for turning simple inline asms
4944 /// into LLVM intrinsics, which gives the compiler more information about the
4945 /// behavior of the code.
4946 virtual bool ExpandInlineAsm(CallInst *) const {
4947 return false;
4948 }
4949
4951 C_Register, // Constraint represents specific register(s).
4952 C_RegisterClass, // Constraint represents any of register(s) in class.
4953 C_Memory, // Memory constraint.
4954 C_Address, // Address constraint.
4955 C_Immediate, // Requires an immediate.
4956 C_Other, // Something else.
4957 C_Unknown // Unsupported constraint.
4959
4961 // Generic weights.
4962 CW_Invalid = -1, // No match.
4963 CW_Okay = 0, // Acceptable.
4964 CW_Good = 1, // Good weight.
4965 CW_Better = 2, // Better weight.
4966 CW_Best = 3, // Best weight.
4967
4968 // Well-known weights.
4969 CW_SpecificReg = CW_Okay, // Specific register operands.
4970 CW_Register = CW_Good, // Register operands.
4971 CW_Memory = CW_Better, // Memory operands.
4972 CW_Constant = CW_Best, // Constant operand.
4973 CW_Default = CW_Okay // Default or don't know type.
4975
4976 /// This contains information for each constraint that we are lowering.
4978 /// This contains the actual string for the code, like "m". TargetLowering
4979 /// picks the 'best' code from ConstraintInfo::Codes that most closely
4980 /// matches the operand.
4981 std::string ConstraintCode;
4982
4983 /// Information about the constraint code, e.g. Register, RegisterClass,
4984 /// Memory, Other, Unknown.
4986
4987 /// If this is the result output operand or a clobber, this is null,
4988 /// otherwise it is the incoming operand to the CallInst. This gets
4989 /// modified as the asm is processed.
4991
4992 /// The ValueType for the operand value.
4993 MVT ConstraintVT = MVT::Other;
4994
4995 /// Copy constructor for copying from a ConstraintInfo.
4998
4999 /// Return true of this is an input operand that is a matching constraint
5000 /// like "4".
5001 bool isMatchingInputConstraint() const;
5002
5003 /// If this is an input matching constraint, this method returns the output
5004 /// operand it matches.
5005 unsigned getMatchedOperand() const;
5006 };
5007
5008 using AsmOperandInfoVector = std::vector<AsmOperandInfo>;
5009
5010 /// Split up the constraint string from the inline assembly value into the
5011 /// specific constraints and their prefixes, and also tie in the associated
5012 /// operand values. If this returns an empty vector, and if the constraint
5013 /// string itself isn't empty, there was an error parsing.
5015 const TargetRegisterInfo *TRI,
5016 const CallBase &Call) const;
5017
5018 /// Examine constraint type and operand type and determine a weight value.
5019 /// The operand object must already have been set up with the operand type.
5021 AsmOperandInfo &info, int maIndex) const;
5022
5023 /// Examine constraint string and operand type and determine a weight value.
5024 /// The operand object must already have been set up with the operand type.
5026 AsmOperandInfo &info, const char *constraint) const;
5027
5028 /// Determines the constraint code and constraint type to use for the specific
5029 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
5030 /// If the actual operand being passed in is available, it can be passed in as
5031 /// Op, otherwise an empty SDValue can be passed.
5032 virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
5033 SDValue Op,
5034 SelectionDAG *DAG = nullptr) const;
5035
5036 /// Given a constraint, return the type of constraint it is for this target.
5037 virtual ConstraintType getConstraintType(StringRef Constraint) const;
5038
5039 using ConstraintPair = std::pair<StringRef, TargetLowering::ConstraintType>;
5041 /// Given an OpInfo with list of constraints codes as strings, return a
5042 /// sorted Vector of pairs of constraint codes and their types in priority of
5043 /// what we'd prefer to lower them as. This may contain immediates that
5044 /// cannot be lowered, but it is meant to be a machine agnostic order of
5045 /// preferences.
5047
5048 /// Given a physical register constraint (e.g. {edx}), return the register
5049 /// number and the register class for the register.
5050 ///
5051 /// Given a register class constraint, like 'r', if this corresponds directly
5052 /// to an LLVM register class, return a register of 0 and the register class
5053 /// pointer.
5054 ///
5055 /// This should only be used for C_Register constraints. On error, this
5056 /// returns a register number of 0 and a null register class pointer.
5057 virtual std::pair<unsigned, const TargetRegisterClass *>
5059 StringRef Constraint, MVT VT) const;
5060
5062 getInlineAsmMemConstraint(StringRef ConstraintCode) const {
5063 if (ConstraintCode == "m")
5065 if (ConstraintCode == "o")
5067 if (ConstraintCode == "X")
5069 if (ConstraintCode == "p")
5072 }
5073
5074 /// Try to replace an X constraint, which matches anything, with another that
5075 /// has more specific requirements based on the type of the corresponding
5076 /// operand. This returns null if there is no replacement to make.
5077 virtual const char *LowerXConstraint(EVT ConstraintVT) const;
5078
5079 /// Lower the specified operand into the Ops vector. If it is invalid, don't
5080 /// add anything to Ops.
5081 virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint,
5082 std::vector<SDValue> &Ops,
5083 SelectionDAG &DAG) const;
5084
5085 // Lower custom output constraints. If invalid, return SDValue().
5086 virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue,
5087 const SDLoc &DL,
5088 const AsmOperandInfo &OpInfo,
5089 SelectionDAG &DAG) const;
5090
5091 // Targets may override this function to collect operands from the CallInst
5092 // and for example, lower them into the SelectionDAG operands.
5093 virtual void CollectTargetIntrinsicOperands(const CallInst &I,
5095 SelectionDAG &DAG) const;
5096
5097 //===--------------------------------------------------------------------===//
5098 // Div utility functions
5099 //
5100
5101 SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
5102 bool IsAfterLegalTypes,
5103 SmallVectorImpl<SDNode *> &Created) const;
5104 SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
5105 bool IsAfterLegalTypes,
5106 SmallVectorImpl<SDNode *> &Created) const;
5107 // Build sdiv by power-of-2 with conditional move instructions
5108 SDValue buildSDIVPow2WithCMov(SDNode *N, const APInt &Divisor,
5109 SelectionDAG &DAG,
5110 SmallVectorImpl<SDNode *> &Created) const;
5111
5112 /// Targets may override this function to provide custom SDIV lowering for
5113 /// power-of-2 denominators. If the target returns an empty SDValue, LLVM
5114 /// assumes SDIV is expensive and replaces it with a series of other integer
5115 /// operations.
5116 virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor,
5117 SelectionDAG &DAG,
5118 SmallVectorImpl<SDNode *> &Created) const;
5119
5120 /// Targets may override this function to provide custom SREM lowering for
5121 /// power-of-2 denominators. If the target returns an empty SDValue, LLVM
5122 /// assumes SREM is expensive and replaces it with a series of other integer
5123 /// operations.
5124 virtual SDValue BuildSREMPow2(SDNode *N, const APInt &Divisor,
5125 SelectionDAG &DAG,
5126 SmallVectorImpl<SDNode *> &Created) const;
5127
5128 /// Indicate whether this target prefers to combine FDIVs with the same
5129 /// divisor. If the transform should never be done, return zero. If the
5130 /// transform should be done, return the minimum number of divisor uses
5131 /// that must exist.
5132 virtual unsigned combineRepeatedFPDivisors() const {
5133 return 0;
5134 }
5135
5136 /// Hooks for building estimates in place of slower divisions and square
5137 /// roots.
5138
5139 /// Return either a square root or its reciprocal estimate value for the input
5140 /// operand.
5141 /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
5142 /// 'Enabled' as set by a potential default override attribute.
5143 /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
5144 /// refinement iterations required to generate a sufficient (though not
5145 /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
5146 /// The boolean UseOneConstNR output is used to select a Newton-Raphson
5147 /// algorithm implementation that uses either one or two constants.
5148 /// The boolean Reciprocal is used to select whether the estimate is for the
5149 /// square root of the input operand or the reciprocal of its square root.
5150 /// A target may choose to implement its own refinement within this function.
5151 /// If that's true, then return '0' as the number of RefinementSteps to avoid
5152 /// any further refinement of the estimate.
5153 /// An empty SDValue return means no estimate sequence can be created.
5155 int Enabled, int &RefinementSteps,
5156 bool &UseOneConstNR, bool Reciprocal) const {
5157 return SDValue();
5158 }
5159
5160 /// Try to convert the fminnum/fmaxnum to a compare/select sequence. This is
5161 /// required for correctness since InstCombine might have canonicalized a
5162 /// fcmp+select sequence to a FMINNUM/FMAXNUM intrinsic. If we were to fall
5163 /// through to the default expansion/soften to libcall, we might introduce a
5164 /// link-time dependency on libm into a file that originally did not have one.
5166
5167 /// Return a reciprocal estimate value for the input operand.
5168 /// \p Enabled is a ReciprocalEstimate enum with value either 'Unspecified' or
5169 /// 'Enabled' as set by a potential default override attribute.
5170 /// If \p RefinementSteps is 'Unspecified', the number of Newton-Raphson
5171 /// refinement iterations required to generate a sufficient (though not
5172 /// necessarily IEEE-754 compliant) estimate is returned in that parameter.
5173 /// A target may choose to implement its own refinement within this function.
5174 /// If that's true, then return '0' as the number of RefinementSteps to avoid
5175 /// any further refinement of the estimate.
5176 /// An empty SDValue return means no estimate sequence can be created.
5178 int Enabled, int &RefinementSteps) const {
5179 return SDValue();
5180 }
5181
5182 /// Return a target-dependent comparison result if the input operand is
5183 /// suitable for use with a square root estimate calculation. For example, the
5184 /// comparison may check if the operand is NAN, INF, zero, normal, etc. The
5185 /// result should be used as the condition operand for a select or branch.
5186 virtual SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG,
5187 const DenormalMode &Mode) const;
5188
5189 /// Return a target-dependent result if the input operand is not suitable for
5190 /// use with a square root estimate calculation.
5192 SelectionDAG &DAG) const {
5193 return DAG.getConstantFP(0.0, SDLoc(Operand), Operand.getValueType());
5194 }
5195
5196 //===--------------------------------------------------------------------===//
5197 // Legalization utility functions
5198 //
5199
5200 /// Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes,
5201 /// respectively, each computing an n/2-bit part of the result.
5202 /// \param Result A vector that will be filled with the parts of the result
5203 /// in little-endian order.
5204 /// \param LL Low bits of the LHS of the MUL. You can use this parameter
5205 /// if you want to control how low bits are extracted from the LHS.
5206 /// \param LH High bits of the LHS of the MUL. See LL for meaning.
5207 /// \param RL Low bits of the RHS of the MUL. See LL for meaning
5208 /// \param RH High bits of the RHS of the MUL. See LL for meaning.
5209 /// \returns true if the node has been expanded, false if it has not
5210 bool expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, SDValue LHS,
5211 SDValue RHS, SmallVectorImpl<SDValue> &Result, EVT HiLoVT,
5212 SelectionDAG &DAG, MulExpansionKind Kind,
5213 SDValue LL = SDValue(), SDValue LH = SDValue(),
5214 SDValue RL = SDValue(), SDValue RH = SDValue()) const;
5215
5216 /// Expand a MUL into two nodes. One that computes the high bits of
5217 /// the result and one that computes the low bits.
5218 /// \param HiLoVT The value type to use for the Lo and Hi nodes.
5219 /// \param LL Low bits of the LHS of the MUL. You can use this parameter
5220 /// if you want to control how low bits are extracted from the LHS.
5221 /// \param LH High bits of the LHS of the MUL. See LL for meaning.
5222 /// \param RL Low bits of the RHS of the MUL. See LL for meaning
5223 /// \param RH High bits of the RHS of the MUL. See LL for meaning.
5224 /// \returns true if the node has been expanded. false if it has not
5225 bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
5226 SelectionDAG &DAG, MulExpansionKind Kind,
5227 SDValue LL = SDValue(), SDValue LH = SDValue(),
5228 SDValue RL = SDValue(), SDValue RH = SDValue()) const;
5229
5230 /// Attempt to expand an n-bit div/rem/divrem by constant using a n/2-bit
5231 /// urem by constant and other arithmetic ops. The n/2-bit urem by constant
5232 /// will be expanded by DAGCombiner. This is not possible for all constant
5233 /// divisors.
5234 /// \param N Node to expand
5235 /// \param Result A vector that will be filled with the lo and high parts of
5236 /// the results. For *DIVREM, this will be the quotient parts followed
5237 /// by the remainder parts.
5238 /// \param HiLoVT The value type to use for the Lo and Hi parts. Should be
5239 /// half of VT.
5240 /// \param LL Low bits of the LHS of the operation. You can use this
5241 /// parameter if you want to control how low bits are extracted from
5242 /// the LHS.
5243 /// \param LH High bits of the LHS of the operation. See LL for meaning.
5244 /// \returns true if the node has been expanded, false if it has not.
5246 EVT HiLoVT, SelectionDAG &DAG,
5247 SDValue LL = SDValue(),
5248 SDValue LH = SDValue()) const;
5249
5250 /// Expand funnel shift.
5251 /// \param N Node to expand
5252 /// \returns The expansion if successful, SDValue() otherwise
5254
5255 /// Expand rotations.
5256 /// \param N Node to expand
5257 /// \param AllowVectorOps expand vector rotate, this should only be performed
5258 /// if the legalization is happening outside of LegalizeVectorOps
5259 /// \returns The expansion if successful, SDValue() otherwise
5260 SDValue expandROT(SDNode *N, bool AllowVectorOps, SelectionDAG &DAG) const;
5261
5262 /// Expand shift-by-parts.
5263 /// \param N Node to expand
5264 /// \param Lo lower-output-part after conversion
5265 /// \param Hi upper-output-part after conversion
5267 SelectionDAG &DAG) const;
5268
5269 /// Expand float(f32) to SINT(i64) conversion
5270 /// \param N Node to expand
5271 /// \param Result output after conversion
5272 /// \returns True, if the expansion was successful, false otherwise
5273 bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
5274
5275 /// Expand float to UINT conversion
5276 /// \param N Node to expand
5277 /// \param Result output after conversion
5278 /// \param Chain output chain after conversion
5279 /// \returns True, if the expansion was successful, false otherwise
5280 bool expandFP_TO_UINT(SDNode *N, SDValue &Result, SDValue &Chain,
5281 SelectionDAG &DAG) const;
5282
5283 /// Expand UINT(i64) to double(f64) conversion
5284 /// \param N Node to expand
5285 /// \param Result output after conversion
5286 /// \param Chain output chain after conversion
5287 /// \returns True, if the expansion was successful, false otherwise
5288 bool expandUINT_TO_FP(SDNode *N, SDValue &Result, SDValue &Chain,
5289 SelectionDAG &DAG) const;
5290
5291 /// Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs.
5293
5294 /// Expand fminimum/fmaximum into multiple comparison with selects.
5296
5297 /// Expand fminimumnum/fmaximumnum into multiple comparison with selects.
5299
5300 /// Expand FP_TO_[US]INT_SAT into FP_TO_[US]INT and selects or min/max.
5301 /// \param N Node to expand
5302 /// \returns The expansion result
5304
5305 /// Truncate Op to ResultVT. If the result is exact, leave it alone. If it is
5306 /// not exact, force the result to be odd.
5307 /// \param ResultVT The type of result.
5308 /// \param Op The value to round.
5309 /// \returns The expansion result
5311 SelectionDAG &DAG) const;
5312
5313 /// Expand round(fp) to fp conversion
5314 /// \param N Node to expand
5315 /// \returns The expansion result
5317
5318 /// Expand check for floating point class.
5319 /// \param ResultVT The type of intrinsic call result.
5320 /// \param Op The tested value.
5321 /// \param Test The test to perform.
5322 /// \param Flags The optimization flags.
5323 /// \returns The expansion result or SDValue() if it fails.
5325 SDNodeFlags Flags, const SDLoc &DL,
5326 SelectionDAG &DAG) const;
5327
5328 /// Expand CTPOP nodes. Expands vector/scalar CTPOP nodes,
5329 /// vector nodes can only succeed if all operations are legal/custom.
5330 /// \param N Node to expand
5331 /// \returns The expansion result or SDValue() if it fails.
5332 SDValue expandCTPOP(SDNode *N, SelectionDAG &DAG) const;
5333
5334 /// Expand VP_CTPOP nodes.
5335 /// \returns The expansion result or SDValue() if it fails.
5337
5338 /// Expand CTLZ/CTLZ_ZERO_UNDEF nodes. Expands vector/scalar CTLZ nodes,
5339 /// vector nodes can only succeed if all operations are legal/custom.
5340 /// \param N Node to expand
5341 /// \returns The expansion result or SDValue() if it fails.
5342 SDValue expandCTLZ(SDNode *N, SelectionDAG &DAG) const;
5343
5344 /// Expand VP_CTLZ/VP_CTLZ_ZERO_UNDEF nodes.
5345 /// \param N Node to expand
5346 /// \returns The expansion result or SDValue() if it fails.
5348
5349 /// Expand CTTZ via Table Lookup.
5350 /// \param N Node to expand
5351 /// \returns The expansion result or SDValue() if it fails.
5353 SDValue Op, unsigned NumBitsPerElt) const;
5354
5355 /// Expand CTTZ/CTTZ_ZERO_UNDEF nodes. Expands vector/scalar CTTZ nodes,
5356 /// vector nodes can only succeed if all operations are legal/custom.
5357 /// \param N Node to expand
5358 /// \returns The expansion result or SDValue() if it fails.
5359 SDValue expandCTTZ(SDNode *N, SelectionDAG &DAG) const;
5360
5361 /// Expand VP_CTTZ/VP_CTTZ_ZERO_UNDEF nodes.
5362 /// \param N Node to expand
5363 /// \returns The expansion result or SDValue() if it fails.
5365
5366 /// Expand VP_CTTZ_ELTS/VP_CTTZ_ELTS_ZERO_UNDEF nodes.
5367 /// \param N Node to expand
5368 /// \returns The expansion result or SDValue() if it fails.
5370
5371 /// Expand ABS nodes. Expands vector/scalar ABS nodes,
5372 /// vector nodes can only succeed if all operations are legal/custom.
5373 /// (ABS x) -> (XOR (ADD x, (SRA x, type_size)), (SRA x, type_size))
5374 /// \param N Node to expand
5375 /// \param IsNegative indicate negated abs
5376 /// \returns The expansion result or SDValue() if it fails.
5378 bool IsNegative = false) const;
5379
5380 /// Expand ABDS/ABDU nodes. Expands vector/scalar ABDS/ABDU nodes.
5381 /// \param N Node to expand
5382 /// \returns The expansion result or SDValue() if it fails.
5383 SDValue expandABD(SDNode *N, SelectionDAG &DAG) const;
5384
5385 /// Expand vector/scalar AVGCEILS/AVGCEILU/AVGFLOORS/AVGFLOORU nodes.
5386 /// \param N Node to expand
5387 /// \returns The expansion result or SDValue() if it fails.
5388 SDValue expandAVG(SDNode *N, SelectionDAG &DAG) const;
5389
5390 /// Expand BSWAP nodes. Expands scalar/vector BSWAP nodes with i16/i32/i64
5391 /// scalar types. Returns SDValue() if expand fails.
5392 /// \param N Node to expand
5393 /// \returns The expansion result or SDValue() if it fails.
5394 SDValue expandBSWAP(SDNode *N, SelectionDAG &DAG) const;
5395
5396 /// Expand VP_BSWAP nodes. Expands VP_BSWAP nodes with
5397 /// i16/i32/i64 scalar types. Returns SDValue() if expand fails. \param N Node
5398 /// to expand \returns The expansion result or SDValue() if it fails.
5400
5401 /// Expand BITREVERSE nodes. Expands scalar/vector BITREVERSE nodes.
5402 /// Returns SDValue() if expand fails.
5403 /// \param N Node to expand
5404 /// \returns The expansion result or SDValue() if it fails.
5406
5407 /// Expand VP_BITREVERSE nodes. Expands VP_BITREVERSE nodes with
5408 /// i8/i16/i32/i64 scalar types. \param N Node to expand \returns The
5409 /// expansion result or SDValue() if it fails.
5411
5412 /// Turn load of vector type into a load of the individual elements.
5413 /// \param LD load to expand
5414 /// \returns BUILD_VECTOR and TokenFactor nodes.
5415 std::pair<SDValue, SDValue> scalarizeVectorLoad(LoadSDNode *LD,
5416 SelectionDAG &DAG) const;
5417
5418 // Turn a store of a vector type into stores of the individual elements.
5419 /// \param ST Store with a vector value type
5420 /// \returns TokenFactor of the individual store chains.
5422
5423 /// Expands an unaligned load to 2 half-size loads for an integer, and
5424 /// possibly more for vectors.
5425 std::pair<SDValue, SDValue> expandUnalignedLoad(LoadSDNode *LD,
5426 SelectionDAG &DAG) const;
5427
5428 /// Expands an unaligned store to 2 half-size stores for integer values, and
5429 /// possibly more for vectors.
5431
5432 /// Increments memory address \p Addr according to the type of the value
5433 /// \p DataVT that should be stored. If the data is stored in compressed
5434 /// form, the memory address should be incremented according to the number of
5435 /// the stored elements. This number is equal to the number of '1's bits
5436 /// in the \p Mask.
5437 /// \p DataVT is a vector type. \p Mask is a vector value.
5438 /// \p DataVT and \p Mask have the same number of vector elements.
5440 EVT DataVT, SelectionDAG &DAG,
5441 bool IsCompressedMemory) const;
5442
5443 /// Get a pointer to vector element \p Idx located in memory for a vector of
5444 /// type \p VecVT starting at a base address of \p VecPtr. If \p Idx is out of
5445 /// bounds the returned pointer is unspecified, but will be within the vector
5446 /// bounds.
5448 SDValue Index) const;
5449
5450 /// Get a pointer to a sub-vector of type \p SubVecVT at index \p Idx located
5451 /// in memory for a vector of type \p VecVT starting at a base address of
5452 /// \p VecPtr. If \p Idx plus the size of \p SubVecVT is out of bounds the
5453 /// returned pointer is unspecified, but the value returned will be such that
5454 /// the entire subvector would be within the vector bounds.
5456 EVT SubVecVT, SDValue Index) const;
5457
5458 /// Method for building the DAG expansion of ISD::[US][MIN|MAX]. This
5459 /// method accepts integers as its arguments.
5461
5462 /// Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT. This
5463 /// method accepts integers as its arguments.
5465
5466 /// Method for building the DAG expansion of ISD::[US]CMP. This
5467 /// method accepts integers as its arguments
5469
5470 /// Method for building the DAG expansion of ISD::[US]SHLSAT. This
5471 /// method accepts integers as its arguments.
5473
5474 /// Method for building the DAG expansion of ISD::[U|S]MULFIX[SAT]. This
5475 /// method accepts integers as its arguments.
5477
5478 /// Method for building the DAG expansion of ISD::[US]DIVFIX[SAT]. This
5479 /// method accepts integers as its arguments.
5480 /// Note: This method may fail if the division could not be performed
5481 /// within the type. Clients must retry with a wider type if this happens.
5482 SDValue expandFixedPointDiv(unsigned Opcode, const SDLoc &dl,
5484 unsigned Scale, SelectionDAG &DAG) const;
5485
5486 /// Method for building the DAG expansion of ISD::U(ADD|SUB)O. Expansion
5487 /// always suceeds and populates the Result and Overflow arguments.
5488 void expandUADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow,
5489 SelectionDAG &DAG) const;
5490
5491 /// Method for building the DAG expansion of ISD::S(ADD|SUB)O. Expansion
5492 /// always suceeds and populates the Result and Overflow arguments.
5493 void expandSADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow,
5494 SelectionDAG &DAG) const;
5495
5496 /// Method for building the DAG expansion of ISD::[US]MULO. Returns whether
5497 /// expansion was successful and populates the Result and Overflow arguments.
5498 bool expandMULO(SDNode *Node, SDValue &Result, SDValue &Overflow,
5499 SelectionDAG &DAG) const;
5500
5501 /// forceExpandWideMUL - Unconditionally expand a MUL into either a libcall or
5502 /// brute force via a wide multiplication. The expansion works by
5503 /// attempting to do a multiplication on a wider type twice the size of the
5504 /// original operands. LL and LH represent the lower and upper halves of the
5505 /// first operand. RL and RH represent the lower and upper halves of the
5506 /// second operand. The upper and lower halves of the result are stored in Lo
5507 /// and Hi.
5508 void forceExpandWideMUL(SelectionDAG &DAG, const SDLoc &dl, bool Signed,
5509 EVT WideVT, const SDValue LL, const SDValue LH,
5510 const SDValue RL, const SDValue RH, SDValue &Lo,
5511 SDValue &Hi) const;
5512
5513 /// Same as above, but creates the upper halves of each operand by
5514 /// sign/zero-extending the operands.
5515 void forceExpandWideMUL(SelectionDAG &DAG, const SDLoc &dl, bool Signed,
5516 const SDValue LHS, const SDValue RHS, SDValue &Lo,
5517 SDValue &Hi) const;
5518
5519 /// Expand a VECREDUCE_* into an explicit calculation. If Count is specified,
5520 /// only the first Count elements of the vector are used.
5522
5523 /// Expand a VECREDUCE_SEQ_* into an explicit ordered calculation.
5525
5526 /// Expand an SREM or UREM using SDIV/UDIV or SDIVREM/UDIVREM, if legal.
5527 /// Returns true if the expansion was successful.
5528 bool expandREM(SDNode *Node, SDValue &Result, SelectionDAG &DAG) const;
5529
5530 /// Method for building the DAG expansion of ISD::VECTOR_SPLICE. This
5531 /// method accepts vectors as its arguments.
5533
5534 /// Expand a vector VECTOR_COMPRESS into a sequence of extract element, store
5535 /// temporarily, advance store position, before re-loading the final vector.
5537
5538 /// Legalize a SETCC or VP_SETCC with given LHS and RHS and condition code CC
5539 /// on the current target. A VP_SETCC will additionally be given a Mask
5540 /// and/or EVL not equal to SDValue().
5541 ///
5542 /// If the SETCC has been legalized using AND / OR, then the legalized node
5543 /// will be stored in LHS. RHS and CC will be set to SDValue(). NeedInvert
5544 /// will be set to false. This will also hold if the VP_SETCC has been
5545 /// legalized using VP_AND / VP_OR.
5546 ///
5547 /// If the SETCC / VP_SETCC has been legalized by using
5548 /// getSetCCSwappedOperands(), then the values of LHS and RHS will be
5549 /// swapped, CC will be set to the new condition, and NeedInvert will be set
5550 /// to false.
5551 ///
5552 /// If the SETCC / VP_SETCC has been legalized using the inverse condcode,
5553 /// then LHS and RHS will be unchanged, CC will set to the inverted condcode,
5554 /// and NeedInvert will be set to true. The caller must invert the result of
5555 /// the SETCC with SelectionDAG::getLogicalNOT() or take equivalent action to
5556 /// swap the effect of a true/false result.
5557 ///
5558 /// \returns true if the SETCC / VP_SETCC has been legalized, false if it
5559 /// hasn't.
5561 SDValue &RHS, SDValue &CC, SDValue Mask,
5562 SDValue EVL, bool &NeedInvert, const SDLoc &dl,
5563 SDValue &Chain, bool IsSignaling = false) const;
5564
5565 //===--------------------------------------------------------------------===//
5566 // Instruction Emitting Hooks
5567 //
5568
5569 /// This method should be implemented by targets that mark instructions with
5570 /// the 'usesCustomInserter' flag. These instructions are special in various
5571 /// ways, which require special support to insert. The specified MachineInstr
5572 /// is created but not inserted into any basic blocks, and this method is
5573 /// called to expand it into a sequence of instructions, potentially also
5574 /// creating new basic blocks and control flow.
5575 /// As long as the returned basic block is different (i.e., we created a new
5576 /// one), the custom inserter is free to modify the rest of \p MBB.
5577 virtual MachineBasicBlock *
5579
5580 /// This method should be implemented by targets that mark instructions with
5581 /// the 'hasPostISelHook' flag. These instructions must be adjusted after
5582 /// instruction selection by target hooks. e.g. To fill in optional defs for
5583 /// ARM 's' setting instructions.
5585 SDNode *Node) const;
5586
5587 /// If this function returns true, SelectionDAGBuilder emits a
5588 /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
5589 virtual bool useLoadStackGuardNode(const Module &M) const { return false; }
5590
5592 const SDLoc &DL) const {
5593 llvm_unreachable("not implemented for this target");
5594 }
5595
5596 /// Lower TLS global address SDNode for target independent emulated TLS model.
5598 SelectionDAG &DAG) const;
5599
5600 /// Expands target specific indirect branch for the case of JumpTable
5601 /// expansion.
5603 SDValue Addr, int JTI,
5604 SelectionDAG &DAG) const;
5605
5606 // seteq(x, 0) -> truncate(srl(ctlz(zext(x)), log2(#bits)))
5607 // If we're comparing for equality to zero and isCtlzFast is true, expose the
5608 // fact that this can be implemented as a ctlz/srl pair, so that the dag
5609 // combiner can fold the new nodes.
5611
5612 // Return true if `X & Y eq/ne 0` is preferable to `X & Y ne/eq Y`
5614 return true;
5615 }
5616
5617 // Expand vector operation by dividing it into smaller length operations and
5618 // joining their results. SDValue() is returned when expansion did not happen.
5620
5621private:
5622 SDValue foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
5623 const SDLoc &DL, DAGCombinerInfo &DCI) const;
5624 SDValue foldSetCCWithBinOp(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond,
5625 const SDLoc &DL, DAGCombinerInfo &DCI) const;
5626
5627 SDValue optimizeSetCCOfSignedTruncationCheck(EVT SCCVT, SDValue N0,
5629 DAGCombinerInfo &DCI,
5630 const SDLoc &DL) const;
5631
5632 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0
5633 SDValue optimizeSetCCByHoistingAndByConstFromLogicalShift(
5634 EVT SCCVT, SDValue N0, SDValue N1C, ISD::CondCode Cond,
5635 DAGCombinerInfo &DCI, const SDLoc &DL) const;
5636
5637 SDValue prepareUREMEqFold(EVT SETCCVT, SDValue REMNode,
5638 SDValue CompTargetNode, ISD::CondCode Cond,
5639 DAGCombinerInfo &DCI, const SDLoc &DL,
5640 SmallVectorImpl<SDNode *> &Created) const;
5641 SDValue buildUREMEqFold(EVT SETCCVT, SDValue REMNode, SDValue CompTargetNode,
5642 ISD::CondCode Cond, DAGCombinerInfo &DCI,
5643 const SDLoc &DL) const;
5644
5645 SDValue prepareSREMEqFold(EVT SETCCVT, SDValue REMNode,
5646 SDValue CompTargetNode, ISD::CondCode Cond,
5647 DAGCombinerInfo &DCI, const SDLoc &DL,
5648 SmallVectorImpl<SDNode *> &Created) const;
5649 SDValue buildSREMEqFold(EVT SETCCVT, SDValue REMNode, SDValue CompTargetNode,
5650 ISD::CondCode Cond, DAGCombinerInfo &DCI,
5651 const SDLoc &DL) const;
5652};
5653
5654/// Given an LLVM IR type and return type attributes, compute the return value
5655/// EVTs and flags, and optionally also the offsets, if the return value is
5656/// being lowered to memory.
5657void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr,
5658 SmallVectorImpl<ISD::OutputArg> &Outs,
5659 const TargetLowering &TLI, const DataLayout &DL);
5660
5661} // end namespace llvm
5662
5663#endif // LLVM_CODEGEN_TARGETLOWERING_H
unsigned const MachineRegisterInfo * MRI
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
basic Basic Alias true
block Block Frequency Analysis
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
#define LLVM_READONLY
Definition: Compiler.h:306
static cl::opt< unsigned > CostThreshold("dfa-cost-threshold", cl::desc("Maximum cost accepted for the transformation"), cl::Hidden, cl::init(50))
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
T Content
uint64_t Addr
std::string Name
uint32_t Index
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define RegName(no)
lazy value info
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Machine Check Debug Module
unsigned const TargetRegisterInfo * TRI
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t High
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
PowerPC Reduce CR logical Operation
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysis::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysis::AdvisorMode::Development, "development", "for training")))
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:78
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1468
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
A cache of @llvm.assume calls within a function.
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:501
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:704
bool isFloatingPointOperation() const
Definition: Instructions.h:882
BinOp getOperation() const
Definition: Instructions.h:805
Value * getValOperand()
Definition: Instructions.h:874
bool getValueAsBool() const
Return the attribute's value as a boolean.
Definition: Attributes.cpp:378
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
CCState - This class holds information needed while lowering arguments and return values.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1120
This class represents a function call, abstracting a target machine's calling convention.
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
This class represents a range of values.
Definition: ConstantRange.h:47
This is an important base class in LLVM.
Definition: Constant.h:42
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
bool empty() const
Definition: DenseMap.h:98
constexpr bool isScalar() const
Exactly one element.
Definition: TypeSize.h:322
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Definition: FastISel.h:66
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
Class to represent function types.
Definition: DerivedTypes.h:105
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:144
bool isVarArg() const
Definition: DerivedTypes.h:125
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Definition: Function.cpp:766
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:91
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:264
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:176
This class is used to represent ISD::LOAD nodes.
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:39
Context object for machine code objects.
Definition: MCContext.h:83
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:34
Machine Value Type.
@ INVALID_SIMPLE_VALUE_TYPE
SimpleValueType SimpleTy
uint64_t getScalarSizeInBits() const
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
ElementCount getVectorElementCount() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
bool isValid() const
Return true if this is a valid simple valuetype.
static MVT getIntegerVT(unsigned BitWidth)
Instructions::iterator instr_iterator
Representation of each machine instruction.
Definition: MachineInstr.h:69
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
This is an abstract virtual class for memory operations.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Definition: ArrayRef.h:310
A discriminated union of two or more pointer types, with the discriminator in the low bit of the poin...
Definition: PointerUnion.h:118
Analysis providing profile information.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool hasOneUse() const
Return true if there is exactly one use of this node.
bool use_empty() const
Return true if there are no uses of this node.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:228
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
const TargetLowering & getTargetLoweringInfo() const
Definition: SelectionDAG.h:501
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:495
void RemoveDeadNode(SDNode *N)
Remove the specified node from the system.
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
MachineFunction & getMachineFunction() const
Definition: SelectionDAG.h:490
LLVMContext * getContext() const
Definition: SelectionDAG.h:508
This instruction constructs a fixed permutation of two input vectors.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
An instruction for storing to memory.
Definition: Instructions.h:292
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
Multiway switch.
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
void setAttributes(const CallBase *Call, unsigned ArgIdx)
Set CallLoweringInfo attribute flags based on a call instruction and called function attributes.
LegalizeTypeAction getTypeAction(MVT VT) const
void setTypeAction(MVT VT, LegalizeTypeAction Action)
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
virtual Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Returns the desired alignment for ByVal or InAlloca aggregate function arguments in the caller parame...
virtual Value * emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const
Perform a store-conditional operation to Addr.
virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
bool isOperationExpand(unsigned Op, EVT VT) const
Return true if the specified operation is illegal on this target or unlikely to be made legal with cu...
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
virtual bool enableAggressiveFMAFusion(LLT Ty) const
Return true if target always benefits from combining into FMA for a given value type.
virtual void emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const
Perform a bit test atomicrmw using a target-specific intrinsic.
void setOperationAction(ArrayRef< unsigned > Ops, ArrayRef< MVT > VTs, LegalizeAction Action)
virtual bool requiresUniformRegister(MachineFunction &MF, const Value *) const
Allows target to decide about the register class of the specific value that is live outside the defin...
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
virtual unsigned getVaListSizeInBits(const DataLayout &DL) const
Returns the size of the platform's va_list object.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual void finalizeLowering(MachineFunction &MF) const
Execute target specific actions to finalize target lowering.
virtual bool preferSextInRegOfTruncate(EVT TruncVT, EVT VT, EVT ExtVT) const
virtual bool decomposeMulByConstant(LLVMContext &Context, EVT VT, SDValue C) const
Return true if it is profitable to transform an integer multiplication-by-constant into simpler opera...
void setMaxDivRemBitWidthSupported(unsigned SizeInBits)
Set the size in bits of the maximum div/rem the backend supports.
virtual bool hasAndNot(SDValue X) const
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
void initActions()
Initialize all of the actions to default values.
ReciprocalEstimate
Reciprocal estimate status values used by the functions below.
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
virtual bool isShuffleMaskLegal(ArrayRef< int >, EVT) const
Targets can use this to indicate that they only support some VECTOR_SHUFFLE operations,...
virtual bool enableAggressiveFMAFusion(EVT VT) const
Return true if target always benefits from combining into FMA for a given value type.
virtual bool isComplexDeinterleavingOperationSupported(ComplexDeinterleavingOperation Operation, Type *Ty) const
Does this target support complex deinterleaving with the given operation and type.
virtual bool shouldRemoveRedundantExtend(SDValue Op) const
Return true (the default) if it is profitable to remove a sext_inreg(x) where the sext is redundant,...
bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
SDValue promoteTargetBoolean(SelectionDAG &DAG, SDValue Bool, EVT ValVT) const
Promote the given target boolean to a target boolean of the given type.
virtual bool isFMADLegal(const SelectionDAG &DAG, const SDNode *N) const
Returns true if be combined with to form an ISD::FMAD.
virtual bool hasStandaloneRem(EVT VT) const
Return true if the target can handle a standalone remainder operation.
virtual bool isExtFreeImpl(const Instruction *I) const
Return true if the extension represented by I is free.
void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC)
Override the default CondCode to be used to test the result of the comparison libcall against zero.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool shouldExpandBuildVectorWithShuffles(EVT, unsigned DefinedValues) const
LegalizeAction getIndexedMaskedStoreAction(unsigned IdxMode, MVT VT) const
Return how the indexed store should be treated: either it is legal, needs to be promoted to a larger ...
virtual bool canCombineTruncStore(EVT ValVT, EVT MemVT, bool LegalOnly) const
virtual bool isSelectSupported(SelectSupportKind) const
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
virtual bool isEqualityCmpFoldedWithSignedCmp() const
Return true if instruction generated for equality comparison is folded with instruction generated for...
virtual bool useStackGuardXorFP() const
If this function returns true, stack protection checks should XOR the frame pointer (or whichever poi...
virtual bool isLegalICmpImmediate(int64_t) const
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
virtual bool convertSetCCLogicToBitwiseLogic(EVT VT) const
Use bitwise logic to make pairs of compares more efficient.
void setAtomicLoadExtAction(ArrayRef< unsigned > ExtTypes, MVT ValVT, ArrayRef< MVT > MemVTs, LegalizeAction Action)
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual Value * getSafeStackPointerLocation(IRBuilderBase &IRB) const
Returns the target-specific address of the unsafe stack pointer.
virtual bool shouldFormOverflowOp(unsigned Opcode, EVT VT, bool MathUsed) const
Try to convert math with an overflow comparison into the corresponding DAG node operation.
ShiftLegalizationStrategy
Return the preferred strategy to legalize tihs SHIFT instruction, with ExpansionFactor being the recu...
virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const
Return true if folding a vector load into ExtVal (a sign, zero, or any extend node) is profitable.
virtual bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const
Return if the target supports combining a chain like:
virtual Value * createComplexDeinterleavingIR(IRBuilderBase &B, ComplexDeinterleavingOperation OperationType, ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB, Value *Accumulator=nullptr) const
Create the IR node for the given complex deinterleaving operation.
virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const
Return true if it is beneficial to convert a load of a constant to just the constant itself.
virtual bool isSupportedFixedPointOperation(unsigned Op, EVT VT, unsigned Scale) const
Custom method defined by each target to indicate if an operation which may require a scale is support...
void setLoadExtAction(ArrayRef< unsigned > ExtTypes, MVT ValVT, MVT MemVT, LegalizeAction Action)
virtual Sched::Preference getSchedulingPreference(SDNode *) const
Some scheduler, e.g.
virtual MachineInstr * EmitKCFICheck(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator &MBBI, const TargetInstrInfo *TII) const
void setMinStackArgumentAlignment(Align Alignment)
Set the minimum stack alignment of an argument.
bool isExtLoad(const LoadInst *Load, const Instruction *Ext, const DataLayout &DL) const
Return true if Load and Ext can form an ExtLoad.
int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const
Return a ReciprocalEstimate enum value for a square root of the given type based on the function's at...
virtual bool canOpTrap(unsigned Op, EVT VT) const
Returns true if the operation can trap for the value type.
LegalizeTypeAction getTypeAction(MVT VT) const
virtual bool lowerInterleavedLoad(LoadInst *LI, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor) const
Lower an interleaved load to target specific intrinsics.
virtual bool isLegalScaleForGatherScatter(uint64_t Scale, uint64_t ElemSize) const
EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
virtual bool shouldInsertFencesForAtomic(const Instruction *I) const
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
virtual bool shouldLocalize(const MachineInstr &MI, const TargetTransformInfo *TTI) const
Check whether or not MI needs to be moved close to its uses.
virtual unsigned getMaxPermittedBytesForAlignment(MachineBasicBlock *MBB) const
Return the maximum amount of bytes allowed to be emitted when padding for alignment.
virtual bool allowsMisalignedMemoryAccesses(LLT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
LLT handling variant.
void setMaximumJumpTableSize(unsigned)
Indicate the maximum number of entries in jump tables.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
virtual bool isSafeMemOpType(MVT) const
Returns true if it's safe to use load / store of the specified type to expand memcpy / memset inline.
virtual void emitExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) const
Perform a cmpxchg expansion using a target-specific method.
virtual CondMergingParams getJumpConditionMergingParams(Instruction::BinaryOps, const Value *, const Value *) const
virtual unsigned getMinimumJumpTableEntries() const
Return lower limit for number of blocks in a jump table.
const TargetMachine & getTargetMachine() const
unsigned MaxLoadsPerMemcmp
Specify maximum number of load instructions per memcmp call.
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
bool rangeFitsInWord(const APInt &Low, const APInt &High, const DataLayout &DL) const
Check whether the range [Low,High] fits in a machine word.
virtual bool isCtpopFast(EVT VT) const
Return true if ctpop instruction is fast.
virtual MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const
This callback is used to inspect load/store instructions and add target-specific MachineMemOperand fl...
unsigned MaxGluedStoresPerMemcpy
Specify max number of store instructions to glue in inlined memcpy.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
bool isPaddedAtMostSignificantBitsWhenStored(EVT VT) const
Indicates if any padding is guaranteed to go at the most significant bits when storing the type to me...
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
Convenience method to set an operation to Promote and specify the type in a single call.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumC...
unsigned getMinCmpXchgSizeInBits() const
Returns the size of the smallest cmpxchg or ll/sc instruction the backend supports.
virtual Value * emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const
Perform a masked atomicrmw using a target-specific intrinsic.
virtual bool areJTsAllowed(const Function *Fn) const
Return true if lowering to a jump table is allowed.
bool enableExtLdPromotion() const
Return true if the target wants to use the optimization that turns ext(promotableInst1(....
virtual bool isFPExtFoldable(const MachineInstr &MI, unsigned Opcode, LLT DestTy, LLT SrcTy) const
Return true if an fpext operation input to an Opcode operation is free (for instance,...
void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC)
Set the CallingConv that should be used for the specified libcall.
virtual bool lowerInterleaveIntrinsicToStore(IntrinsicInst *II, StoreInst *SI, SmallVectorImpl< Instruction * > &DeadInsts) const
Lower an interleave intrinsic to a target specific store intrinsic.
void setIndexedMaskedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed masked load does or does not work with the specified type and ind...
void setMaxBytesForAlignment(unsigned MaxBytes)
bool isOperationLegalOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal using promotion.
void setHasExtractBitsInsn(bool hasExtractInsn=true)
Tells the code generator that the target has BitExtract instructions.
void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth)
Tells the code generator which bitwidths to bypass.
virtual bool hasBitTest(SDValue X, SDValue Y) const
Return true if the target has a bit-test instruction: (X & (1 << Y)) ==/!= 0 This knowledge can be us...
MVT getRegisterType(LLVMContext &Context, EVT VT) const
Return the type of registers that this ValueType will eventually require.
virtual bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const
Lower an interleaved store to target specific intrinsics.
virtual bool needsFixedCatchObjects() const
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
virtual Value * emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
void setMaxLargeFPConvertBitWidthSupported(unsigned SizeInBits)
Set the size in bits of the maximum fp convert the backend supports.
virtual EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &) const
Returns the target specific optimal type for load and store operations as a result of memset,...
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
virtual bool isCheapToSpeculateCttz(Type *Ty) const
Return true if it is cheap to speculate a call to intrinsic cttz.
bool isJumpExpensive() const
Return true if Flow Control is an expensive operation that should be avoided.
virtual bool useFPRegsForHalfType() const
LegalizeAction getCondCodeAction(ISD::CondCode CC, MVT VT) const
Return how the condition code should be treated: either it is legal, needs to be expanded to some oth...
bool hasExtractBitsInsn() const
Return true if the target has BitExtract instructions.
bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const
Return true if the specified store with truncation is legal on this target.
virtual bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT, const SelectionDAG &DAG, const MachineMemOperand &MMO) const
Return true if the following transform is beneficial: fold (conv (load x)) -> (load (conv*)x) On arch...
LegalizeAction getIndexedStoreAction(unsigned IdxMode, MVT VT) const
Return how the indexed store should be treated: either it is legal, needs to be promoted to a larger ...
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
unsigned getMaxStoresPerMemcpy(bool OptSize) const
Get maximum # of store operations permitted for llvm.memcpy.
void setPrefLoopAlignment(Align Alignment)
Set the target's preferred loop alignment.
virtual bool softPromoteHalfType() const
virtual bool areTwoSDNodeTargetMMOFlagsMergeable(const MemSDNode &NodeX, const MemSDNode &NodeY) const
Return true if it is valid to merge the TargetMMOFlags in two SDNodes.
unsigned getMaximumJumpTableSize() const
Return upper limit for number of entries in a jump table.
virtual bool isCommutativeBinOp(unsigned Opcode) const
Returns true if the opcode is a commutative binary operation.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual bool isFPImmLegal(const APFloat &, EVT, bool ForCodeSize=false) const
Returns true if the target can instruction select the specified FP immediate natively.
virtual bool isExtractVecEltCheap(EVT VT, unsigned Index) const
Return true if extraction of a scalar element from the given vector type at the given index is cheap.
virtual MVT::SimpleValueType getCmpLibcallReturnType() const
Return the ValueType for comparison libcalls.
void setOperationAction(ArrayRef< unsigned > Ops, MVT VT, LegalizeAction Action)
virtual bool optimizeFMulOrFDivAsShiftAddBitcast(SDNode *N, SDValue FPConst, SDValue IntPow2) const
unsigned getBitWidthForCttzElements(Type *RetTy, ElementCount EC, bool ZeroIsPoison, const ConstantRange *VScaleRange) const
Return the minimum number of bits required to hold the maximum possible number of trailing zero vecto...
SelectSupportKind
Enum that describes what type of support for selects the target has.
LegalizeAction getIndexedLoadAction(unsigned IdxMode, MVT VT) const
Return how the indexed load should be treated: either it is legal, needs to be promoted to a larger s...
virtual bool shouldTransformSignedTruncationCheck(EVT XVT, unsigned KeptBits) const
Should we tranform the IR-optimal check for whether given truncation down into KeptBits would be trun...
virtual bool isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode, EVT DestVT, EVT SrcVT) const
Return true if an fpext operation input to an Opcode operation is free (for instance,...
bool isLegalRC(const TargetRegisterInfo &TRI, const TargetRegisterClass &RC) const
Return true if the value types that can be represented by the specified register class are all legal.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
void setAtomicLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Let target indicate that an extending atomic load of the specified type is legal.
virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const
Returns true if the index type for a masked gather/scatter requires extending.
Value * getDefaultSafeStackPointerLocation(IRBuilderBase &IRB, bool UseTLS) const
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context, EVT VT) const
Returns true if we should normalize select(N0&N1, X, Y) => select(N0, select(N1, X,...
virtual Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
virtual StringRef getStackProbeSymbolName(const MachineFunction &MF) const
LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT, unsigned Scale) const
Some fixed point operations may be natively supported by the target but only for specific scales.
virtual bool preferScalarizeSplat(SDNode *N) const
virtual bool lowerDeinterleaveIntrinsicToLoad(IntrinsicInst *DI, LoadInst *LI, SmallVectorImpl< Instruction * > &DeadInsts) const
Lower a deinterleave intrinsic to a target specific load intrinsic.
MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, const DataLayout &DL) const
bool isIndexedMaskedLoadLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
virtual ISD::NodeType getExtendForAtomicOps() const
Returns how the platform's atomic operations are extended (ZERO_EXTEND, SIGN_EXTEND,...
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
virtual bool shouldExpandCmpUsingSelects(EVT VT) const
Should we expand [US]CMP nodes using two selects and two compares, or by doing arithmetic on boolean ...
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
virtual LLT getOptimalMemOpLLT(const MemOp &Op, const AttributeList &) const
LLT returning variant.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
virtual void emitExpandAtomicRMW(AtomicRMWInst *AI) const
Perform a atomicrmw expansion using a target-specific way.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
virtual bool reduceSelectOfFPConstantLoads(EVT CmpOpVT) const
Return true if it is profitable to convert a select of FP constants into a constant pool load whose a...
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first?...
virtual bool hasStackProbeSymbol(const MachineFunction &MF) const
Returns the name of the symbol used to emit stack probes or the empty string if not applicable.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
bool isSlowDivBypassed() const
Returns true if target has indicated at least one type should be bypassed.
virtual Align getABIAlignmentForCallingConv(Type *ArgTy, const DataLayout &DL) const
Certain targets have context sensitive alignment requirements, where one type has the alignment requi...
virtual bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const
Return true if EXTRACT_SUBVECTOR is cheap for extracting this result type from this source type with ...
virtual bool isMulAddWithConstProfitable(SDValue AddNode, SDValue ConstNode) const
Return true if it may be profitable to transform (mul (add x, c1), c2) -> (add (mul x,...
virtual bool shouldExtendTypeInLibCall(EVT Type) const
Returns true if arguments should be extended in lib calls.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
virtual void markLibCallAttributes(MachineFunction *MF, unsigned CC, ArgListTy &Args) const
virtual bool isFsqrtCheap(SDValue X, SelectionDAG &DAG) const
Return true if SQRT(X) shouldn't be replaced with X*RSQRT(X).
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
virtual Align getPrefLoopAlignment(MachineLoop *ML=nullptr) const
Return the preferred loop alignment.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
int getDivRefinementSteps(EVT VT, MachineFunction &MF) const
Return the refinement step count for a division of the given type based on the function's attributes.
virtual bool shouldFoldConstantShiftPairToMask(const SDNode *N, CombineLevel Level) const
Return true if it is profitable to fold a pair of shifts into a mask.
virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code in Sele...
virtual bool shallExtractConstSplatVectorElementToStore(Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const
Return true if the target shall perform extract vector element and store given that the vector is kno...
virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const
Return true if it's free to truncate a value of type FromTy to type ToTy.
unsigned getMaxExpandSizeMemcmp(bool OptSize) const
Get maximum # of load operations permitted for memcmp.
bool isStrictFPEnabled() const
Return true if the target support strict float operation.
virtual bool shouldAvoidTransformToShift(EVT VT, unsigned Amount) const
Return true if creating a shift of the type by the given amount is not profitable.
virtual bool isFPExtFree(EVT DestVT, EVT SrcVT) const
Return true if an fpext operation is free (for instance, because single-precision floating-point numb...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC=nullptr, const TargetLibraryInfo *LibInfo=nullptr) const
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
virtual bool shouldFoldSelectWithSingleBitTest(EVT VT, const APInt &AndMask) const
virtual Value * getIRStackGuard(IRBuilderBase &IRB) const
If the target has a standard location for the stack protector guard, returns the address of that loca...
MVT getSimpleValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the MVT corresponding to this LLVM type. See getValueType.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
virtual MVT getPreferredSwitchConditionType(LLVMContext &Context, EVT ConditionVT) const
Returns preferred type for switch condition.
virtual bool shouldFoldSelectWithIdentityConstant(unsigned BinOpcode, EVT VT) const
Return true if pulling a binary operation into a select with an identity constant is profitable.
virtual bool shouldReassociateReduction(unsigned RedOpc, EVT VT) const
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal for a comparison of the specified types on this ...
virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, unsigned &Cost) const
Return true if the target can combine store(extractelement VectorTy, Idx).
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
LegalizeAction getAtomicLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const
Same as getLoadExtAction, but for atomic loads.
int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const
Return a ReciprocalEstimate enum value for a division of the given type based on the function's attri...
MVT getProgramPointerTy(const DataLayout &DL) const
Return the type for code pointers, which is determined by the program address space specified through...
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const
void setSupportsUnalignedAtomics(bool UnalignedSupported)
Sets whether unaligned atomic operations are supported.
void setLoadExtAction(ArrayRef< unsigned > ExtTypes, MVT ValVT, ArrayRef< MVT > MemVTs, LegalizeAction Action)
virtual bool isJumpTableRelative() const
bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps, const APInt &Low, const APInt &High, const DataLayout &DL) const
Return true if lowering to a bit test is suitable for a set of case clusters which contains NumDests ...
virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const
Return the type to use for a scalar shift opcode, given the shifted amount type.
virtual bool preferIncOfAddToSubOfNot(EVT VT) const
These two forms are equivalent: sub y, (xor x, -1) add (add x, 1), y The variant with two add's is IR...
virtual bool ShouldShrinkFPConstant(EVT) const
If true, then instruction selection should seek to shrink the FP constant of the specified type to a ...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
unsigned getMaxDivRemBitWidthSupported() const
Returns the size in bits of the maximum div/rem the backend supports.
virtual bool isLegalAddImmediate(int64_t) const
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
virtual unsigned getMaxSupportedInterleaveFactor() const
Get the maximum supported factor for interleaved memory accesses.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const
Return how this store with truncation should be treated: either it is legal, needs to be promoted to ...
virtual bool shouldKeepZExtForFP16Conv() const
Does this target require the clearing of high-order bits in a register passed to the fp16 to fp conve...
virtual AtomicExpansionKind shouldCastAtomicRMWIInIR(AtomicRMWInst *RMWI) const
Returns how the given atomic atomicrmw should be cast by the IR-level AtomicExpand pass.
void setIndexedMaskedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed masked store does or does not work with the specified type and in...
virtual bool shouldConsiderGEPOffsetSplit() const
const ValueTypeActionImpl & getValueTypeActions() const
virtual AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
virtual bool isTruncateFree(SDValue Val, EVT VT2) const
Return true if truncating the specific node Val to type VT2 is free.
virtual bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy, EVT NewVT) const
Return true if it is profitable to reduce a load to a smaller type.
virtual bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const
Return true if the @llvm.experimental.vector.match intrinsic should be expanded for vector type ‘VT’ ...
virtual bool isProfitableToCombineMinNumMaxNum(EVT VT) const
virtual unsigned getCustomCtpopCost(EVT VT, ISD::CondCode Cond) const
Return the maximum number of "x & (x - 1)" operations that can be done instead of deferring to a cust...
virtual bool shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, unsigned OldShiftOpcode, unsigned NewShiftOpcode, SelectionDAG &DAG) const
Given the pattern (X & (C l>>/<< Y)) ==/!= 0 return true if it should be transformed into: ((X <</l>>...
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
virtual ~TargetLoweringBase()=default
virtual bool isFNegFree(EVT VT) const
Return true if an fneg operation is free to the point where it is never worthwhile to replace it with...
void setLibcallName(ArrayRef< RTLIB::Libcall > Calls, const char *Name)
LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return how this load with extension should be treated: either it is legal, needs to be promoted to a ...
bool hasMultipleConditionRegisters() const
Return true if multiple condition registers are available.
virtual bool shouldInsertTrailingFenceForAtomicStore(const Instruction *I) const
Whether AtomicExpandPass should automatically insert a trailing fence without reducing the ordering f...
virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
bool isExtFree(const Instruction *I) const
Return true if the extension represented by I is free.
virtual MVT getFenceOperandTy(const DataLayout &DL) const
Return the type for operands of fence.
virtual bool getAddrModeArguments(IntrinsicInst *, SmallVectorImpl< Value * > &, Type *&) const
CodeGenPrepare sinks address calculations into the same BB as Load/Store instructions reading the add...
virtual Value * emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const
Perform a masked cmpxchg using a target-specific intrinsic.
virtual bool isZExtFree(EVT FromTy, EVT ToTy) const
virtual ISD::NodeType getExtendForAtomicCmpSwapArg() const
Returns how the platform's atomic compare and swap expects its comparison value to be extended (ZERO_...
BooleanContent
Enum that describes how the target represents true/false values.
virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF, bool IsScalable) const
virtual bool isIntDivCheap(EVT VT, AttributeList Attr) const
Return true if integer divide is usually cheaper than a sequence of several shifts,...
virtual ShiftLegalizationStrategy preferredShiftLegalizationStrategy(SelectionDAG &DAG, SDNode *N, unsigned ExpansionFactor) const
virtual uint8_t getRepRegClassCostFor(MVT VT) const
Return the cost of the 'representative' register class for the specified value type.
virtual bool isZExtFree(LLT FromTy, LLT ToTy, LLVMContext &Ctx) const
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
bool isPredictableSelectExpensive() const
Return true if selects are only cheaper than branches if the branch is unlikely to be predicted right...
virtual bool mergeStoresAfterLegalization(EVT MemVT) const
Allow store merging for the specified type after legalization in addition to before legalization.
virtual bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given address space and align...
unsigned getMaxStoresPerMemmove(bool OptSize) const
Get maximum # of store operations permitted for llvm.memmove.
virtual bool isProfitableToHoist(Instruction *I) const
unsigned getGatherAllAliasesMaxDepth() const
virtual LegalizeAction getCustomOperationAction(SDNode &Op) const
How to legalize this custom operation?
virtual bool isFMAFasterThanFMulAndFAdd(const Function &F, Type *) const
IR version.
virtual bool hasAndNotCompare(SDValue Y) const
Return true if the target should transform: (X & Y) == Y —> (~X & Y) == 0 (X & Y) !...
virtual bool storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT, unsigned NumElem, unsigned AddrSpace) const
Return true if it is expected to be cheaper to do a store of vector constant with the given size and ...
unsigned MaxLoadsPerMemcmpOptSize
Likewise for functions with the OptSize attribute.
virtual MVT hasFastEqualityCompare(unsigned NumBits) const
Return the preferred operand type if the target has a quick way to compare integer values of the give...
virtual const TargetRegisterClass * getRepRegClassFor(MVT VT) const
Return the 'representative' register class for the specified value type.
MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, const DataLayout &DL) const
virtual bool isNarrowingProfitable(SDNode *N, EVT SrcVT, EVT DestVT) const
Return true if it's profitable to narrow operations of type SrcVT to DestVT.
virtual bool isMultiStoresCheaperThanBitsMerge(EVT LTy, EVT HTy) const
Return true if it is cheaper to split the store of a merged int val from a pair of smaller values int...
bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal or custom on this target.
TargetLoweringBase(const TargetLoweringBase &)=delete
virtual unsigned getMaxGluedStoresPerMemcpy() const
Get maximum # of store operations to be glued together.
bool isAtomicLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified atomic load with extension is legal on this target.
virtual bool isBinOp(unsigned Opcode) const
Return true if the node is a math/logic binary operator.
virtual bool shouldFoldMaskToVariableShiftPair(SDValue X) const
There are two ways to clear extreme bits (either low or high): Mask: x & (-1 << y) (the instcombine c...
virtual bool alignLoopsWithOptSize() const
Should loops be aligned even when the function is marked OptSize (but not MinSize).
unsigned getMaxAtomicSizeInBitsSupported() const
Returns the maximum atomic operation size (in bits) supported by the backend.
bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
virtual bool canMergeStoresTo(unsigned AS, EVT MemVT, const MachineFunction &MF) const
Returns if it's reasonable to merge stores to MemVT size.
LegalizeAction getStrictFPOperationAction(unsigned Op, EVT VT) const
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
virtual bool preferABDSToABSWithNSW(EVT VT) const
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
unsigned getMinimumJumpTableDensity(bool OptForSize) const
Return lower limit of the density in a jump table.
bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const
Return true if the specified load with extension is legal on this target.
virtual bool hasInlineStackProbe(const MachineFunction &MF) const
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
virtual bool shouldExpandPartialReductionIntrinsic(const IntrinsicInst *I) const
Return true if the @llvm.experimental.vector.partial.reduce.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
const DenseMap< unsigned int, unsigned int > & getBypassSlowDivWidths() const
Returns map of slow types for division or remainder with corresponding fast types.
void setOperationPromotedToType(ArrayRef< unsigned > Ops, MVT OrigVT, MVT DestVT)
unsigned getMaxLargeFPConvertBitWidthSupported() const
Returns the size in bits of the maximum larget fp convert the backend supports.
virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, LLT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const
virtual bool isCheapToSpeculateCtlz(Type *Ty) const
Return true if it is cheap to speculate a call to intrinsic ctlz.
virtual bool shouldExpandCttzElements(EVT VT) const
Return true if the @llvm.experimental.cttz.elts intrinsic should be expanded using generic code in Se...
virtual bool signExtendConstant(const ConstantInt *C) const
Return true if this constant should be sign extended when promoting to a larger type.
virtual bool isTruncateFree(LLT FromTy, LLT ToTy, LLVMContext &Ctx) const
AndOrSETCCFoldKind
Enum of different potentially desirable ways to fold (and/or (setcc ...), (setcc ....
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
virtual bool shouldScalarizeBinop(SDValue VecOp) const
Try to convert an extract element of a vector binary operation into an extract element followed by a ...
Align getPrefFunctionAlignment() const
Return the preferred function alignment.
Align getMinFunctionAlignment() const
Return the minimum function alignment.
virtual AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const
Returns how the given (atomic) store should be expanded by the IR-level AtomicExpand pass into.
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
virtual bool isCtlzFast() const
Return true if ctlz instruction is fast.
virtual bool useSoftFloat() const
virtual bool isStoreBitCastBeneficial(EVT StoreVT, EVT BitcastVT, const SelectionDAG &DAG, const MachineMemOperand &MMO) const
Return true if the following transform is beneficial: (store (y (conv x)), y*)) -> (store x,...
BooleanContent getBooleanContents(EVT Type) const
virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
bool isIndexedMaskedStoreLegal(unsigned IdxMode, EVT VT) const
Return true if the specified indexed load is legal on this target.
virtual int64_t getPreferredLargeGEPBaseOffset(int64_t MinOffset, int64_t MaxOffset) const
Return the prefered common base offset.
virtual bool isVectorClearMaskLegal(ArrayRef< int >, EVT) const
Similar to isShuffleMaskLegal.
LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const
Return pair that represents the legalization kind (first) that needs to happen to EVT (second) in ord...
Align getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
virtual bool shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT, bool IsSigned) const
Return true if it is more correct/profitable to use strict FP_TO_INT conversion operations - canonica...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
bool hasTargetDAGCombine(ISD::NodeType NT) const
If true, the target has custom DAG combine transformations that it can perform for the specified node...
virtual bool fallBackToDAGISel(const Instruction &Inst) const
unsigned GatherAllAliasesMaxDepth
Depth that GatherAllAliases should continue looking for chain dependencies when trying to find a more...
virtual bool shouldSplatInsEltVarIndex(EVT) const
Return true if inserting a scalar into a variable element of an undef vector is more efficiently hand...
LegalizeAction getIndexedMaskedLoadAction(unsigned IdxMode, MVT VT) const
Return how the indexed load should be treated: either it is legal, needs to be promoted to a larger s...
NegatibleCost
Enum that specifies when a float negation is beneficial.
bool isTruncStoreLegalOrCustom(EVT ValVT, EVT MemVT) const
Return true if the specified store with truncation has solution on this target.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const
Get the CondCode that's to be used to test the result of the comparison libcall against zero.
int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const
Return the refinement step count for a square root of the given type based on the function's attribut...
virtual unsigned preferedOpcodeForCmpEqPiecesOfOperand(EVT VT, unsigned ShiftOpc, bool MayTransformRotate, const APInt &ShiftOrRotateAmt, const std::optional< APInt > &AndMask) const
virtual void emitCmpArithAtomicRMWIntrinsic(AtomicRMWInst *AI) const
Perform a atomicrmw which the result is only used by comparison, using a target-specific intrinsic.
virtual bool shouldSignExtendTypeInLibCall(Type *Ty, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
virtual Register getExceptionPointerRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an ...
virtual bool isFMADLegal(const MachineInstr &MI, LLT Ty) const
Returns true if MI can be combined with another instruction to form TargetOpcode::G_FMAD.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, ArrayRef< MVT > VTs, LegalizeAction Action)
bool supportsUnalignedAtomics() const
Whether the target supports unaligned atomic operations.
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
virtual bool isLegalAddScalableImmediate(int64_t) const
Return true if adding the specified scalable immediate is legal, that is the target has add instructi...
std::vector< ArgListEntry > ArgListTy
bool allowsMemoryAccessForAlignment(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
This function returns true if the memory access is aligned or if the target allows this specific unal...
virtual bool shouldAlignPointerArgs(CallInst *, unsigned &, Align &) const
Return true if the pointer arguments to CI should be aligned by aligning the object whose address is ...
virtual bool hasVectorBlend() const
Return true if the target has a vector blend instruction.
virtual AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const
Returns how the given (atomic) store should be cast by the IR-level AtomicExpand pass into.
virtual Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, ArrayRef< MVT > VTs, LegalizeAction Action)
virtual bool isVScaleKnownToBeAPowerOfTwo() const
Return true only if vscale must be a power of two.
virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const
virtual MachineMemOperand::Flags getTargetMMOFlags(const MemSDNode &Node) const
This callback is used to inspect load/store SDNode.
virtual Type * shouldConvertSplatType(ShuffleVectorInst *SVI) const
Given a shuffle vector SVI representing a vector splat, return a new scalar type of size equal to SVI...
virtual bool isZExtFree(SDValue Val, EVT VT2) const
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
void setAtomicLoadExtAction(ArrayRef< unsigned > ExtTypes, MVT ValVT, MVT MemVT, LegalizeAction Action)
virtual bool shouldRemoveExtendFromGSIndex(SDValue Extend, EVT DataVT) const
unsigned getMaxStoresPerMemset(bool OptSize) const
Get maximum # of store operations permitted for llvm.memset.
virtual LLVM_READONLY LLT getPreferredShiftAmountTy(LLT ShiftValueTy) const
Return the preferred type to use for a shift opcode, given the shifted amount type is ShiftValueTy.
void setHasMultipleConditionRegisters(bool hasManyRegs=true)
Tells the code generator that the target has multiple (allocatable) condition registers that can be u...
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
virtual EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, ArrayRef< MVT > VTs, LegalizeAction Action)
virtual AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const
Returns how the given (atomic) load should be cast by the IR-level AtomicExpand pass.
virtual Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
Inserts in the IR a target-specific intrinsic specifying a fence.
bool isCondCodeLegalOrCustom(ISD::CondCode CC, MVT VT) const
Return true if the specified condition code is legal or custom for a comparison of the specified type...
virtual bool isComplexDeinterleavingSupported() const
Does this target support complex deinterleaving.
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
MVT getFrameIndexTy(const DataLayout &DL) const
Return the type for frame index, which is determined by the alloca address space specified through th...
virtual Register getExceptionSelectorRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
virtual bool addressingModeSupportsTLS(const GlobalValue &) const
Returns true if the targets addressing mode can target thread local storage (TLS).
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
virtual void insertSSPDeclarations(Module &M) const
Inserts necessary declarations for SSP (stack protection) purpose.
virtual bool shouldConvertPhiType(Type *From, Type *To) const
Given a set in interconnected phis of type 'From' that are loaded/stored or bitcast to type 'To',...
virtual bool isFAbsFree(EVT VT) const
Return true if an fabs operation is free to the point where it is never worthwhile to replace it with...
virtual bool isLegalStoreImmediate(int64_t Value) const
Return true if the specified immediate is legal for the value input of a store instruction.
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
virtual bool preferZeroCompareBranch() const
Return true if the heuristic to prefer icmp eq zero should be used in code gen prepare.
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
MVT getTypeToPromoteTo(unsigned Op, MVT VT) const
If the action for this operation is to promote, this method returns the ValueType to promote to.
virtual bool generateFMAsInMachineCombiner(EVT VT, CodeGenOptLevel OptLevel) const
virtual LoadInst * lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const
On some platforms, an AtomicRMW that never actually modifies the value (such as fetch_add of 0) can b...
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AddrSpace, Instruction *I=nullptr) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
virtual bool hasPairedLoad(EVT, Align &) const
Return true if the target supplies and combines to a paired load two loaded values of type LoadedType...
virtual bool convertSelectOfConstantsToMath(EVT VT) const
Return true if a select of constants (select Cond, C1, C2) should be transformed into simple math ops...
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
virtual bool optimizeExtendOrTruncateConversion(Instruction *I, Loop *L, const TargetTransformInfo &TTI) const
Try to optimize extending or truncating conversion instructions (like zext, trunc,...
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
TargetLoweringBase & operator=(const TargetLoweringBase &)=delete
MulExpansionKind
Enum that specifies when a multiplication should be expanded.
static ISD::NodeType getExtendForContent(BooleanContent Content)
virtual bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const
Should we generate fp_to_si_sat and fp_to_ui_sat from type FPVT to type VT from min(max(fptoi)) satur...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool supportKCFIBundles() const
Return true if the target supports kcfi operand bundles.
SDValue expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US][ADD|SUB]SAT.
SDValue buildSDIVPow2WithCMov(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Build sdiv by power-of-2 with conditional move instructions Ref: "Hacker's Delight" by Henry Warren 1...
virtual ConstraintWeight getMultipleConstraintMatchWeight(AsmOperandInfo &info, int maIndex) const
Examine constraint type and operand type and determine a weight value.
SDValue expandVPCTLZ(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTLZ/VP_CTLZ_ZERO_UNDEF nodes.
bool expandMULO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]MULO.
bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT, SelectionDAG &DAG, MulExpansionKind Kind, SDValue LL=SDValue(), SDValue LH=SDValue(), SDValue RL=SDValue(), SDValue RH=SDValue()) const
Expand a MUL into two nodes.
virtual const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
virtual SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &RefinementSteps, bool &UseOneConstNR, bool Reciprocal) const
Hooks for building estimates in place of slower divisions and square roots.
virtual bool isDesirableToCommuteWithShift(const MachineInstr &MI, bool IsAfterLegal) const
GlobalISel - return true if it is profitable to move this shift by a constant amount through its oper...
virtual bool supportPtrAuthBundles() const
Return true if the target supports ptrauth operand bundles.
virtual void ReplaceNodeResults(SDNode *, SmallVectorImpl< SDValue > &, SelectionDAG &) const
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask, APInt &KnownUndef, APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Vector Op.
virtual bool isUsedByReturnOnly(SDNode *, SDValue &) const
Return true if result of the specified node is used by a return node only.
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
virtual void computeKnownBitsForFrameIndex(int FIOp, KnownBits &Known, const MachineFunction &MF) const
Determine which of the bits of FrameIndex FIOp are known to be 0.
virtual SDValue visitMaskedLoad(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue &NewLoad, SDValue Ptr, SDValue PassThru, SDValue Mask) const
SDValue scalarizeVectorStore(StoreSDNode *ST, SelectionDAG &DAG) const
virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits ...
virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val, const SDLoc &DL) const
SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, unsigned Depth=0) const
This is the helper function to return the newly negated expression if the cost is not expensive.
SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const
virtual unsigned computeNumSignBitsForTargetInstr(GISelKnownBits &Analysis, Register R, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const
This method can be implemented by targets that want to expose additional information about sign bits ...
virtual bool isReassocProfitable(SelectionDAG &DAG, SDValue N0, SDValue N1) const
virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
SDValue expandVPBSWAP(SDNode *N, SelectionDAG &DAG) const
Expand VP_BSWAP nodes.
void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode, const SDLoc &DL, const SDValue OldLHS, const SDValue OldRHS) const
Soften the operands of a comparison.
SDValue getCheaperOrNeutralNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, const NegatibleCost CostThreshold=NegatibleCost::Neutral, unsigned Depth=0) const
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
SDValue expandVecReduceSeq(SDNode *Node, SelectionDAG &DAG) const
Expand a VECREDUCE_SEQ_* into an explicit ordered calculation.
virtual Register getRegisterByName(const char *RegName, LLT Ty, const MachineFunction &MF) const
Return the register ID of the name passed in.
SDValue expandCTLZ(SDNode *N, SelectionDAG &DAG) const
Expand CTLZ/CTLZ_ZERO_UNDEF nodes.
SDValue expandBITREVERSE(SDNode *N, SelectionDAG &DAG) const
Expand BITREVERSE nodes.
SDValue expandCTTZ(SDNode *N, SelectionDAG &DAG) const
Expand CTTZ/CTTZ_ZERO_UNDEF nodes.
virtual SDValue expandIndirectJTBranch(const SDLoc &dl, SDValue Value, SDValue Addr, int JTI, SelectionDAG &DAG) const
Expands target specific indirect branch for the case of JumpTable expansion.
SDValue expandABD(SDNode *N, SelectionDAG &DAG) const
Expand ABDS/ABDU nodes.
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
virtual bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const
virtual Align computeKnownAlignForTargetInstr(GISelKnownBits &Analysis, Register R, const MachineRegisterInfo &MRI, unsigned Depth=0) const
Determine the known alignment for the pointer value R.
std::vector< AsmOperandInfo > AsmOperandInfoVector
SDValue expandShlSat(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]SHLSAT.
SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, SDNodeFlags Flags, const SDLoc &DL, SelectionDAG &DAG) const
Expand check for floating point class.
virtual bool isTargetCanonicalConstantNode(SDValue Op) const
Returns true if the given Opc is considered a canonical constant for the target, which should not be ...
SDValue expandFP_TO_INT_SAT(SDNode *N, SelectionDAG &DAG) const
Expand FP_TO_[US]INT_SAT into FP_TO_[US]INT and selects or min/max.
SDValue getCheaperNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, unsigned Depth=0) const
This is the helper function to return the newly negated expression only when the cost is cheaper.
virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const
This callback is used to prepare for a volatile or atomic load.
SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
More limited version of SimplifyDemandedBits that can be used to "look through" ops that don't contri...
SDValue expandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG) const
Expands an unaligned store to 2 half-size stores for integer values, and possibly more for vectors.
SDValue SimplifyMultipleUseDemandedVectorElts(SDValue Op, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
Helper wrapper around SimplifyMultipleUseDemandedBits, demanding all bits from only some vector eleme...
virtual void verifyTargetSDNode(const SDNode *N) const
Check the given SDNode. Aborts if it is invalid.
virtual bool findOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const
Determines the optimal series of memory ops to replace the memset / memcpy.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue unwrapAddress(SDValue N) const
void expandSADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::S(ADD|SUB)O.
SDValue expandVPBITREVERSE(SDNode *N, SelectionDAG &DAG) const
Expand VP_BITREVERSE nodes.
SDValue expandABS(SDNode *N, SelectionDAG &DAG, bool IsNegative=false) const
Expand ABS nodes.
SDValue expandVecReduce(SDNode *Node, SelectionDAG &DAG) const
Expand a VECREDUCE_* into an explicit calculation.
bool ShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const
Check to see if the specified operand of the specified instruction is a constant integer.
virtual bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const
Target-specific splitting of values into parts that fit a register storing a legal type.
virtual bool IsDesirableToPromoteOp(SDValue, EVT &) const
This method query the target whether it is beneficial for dag combiner to promote the specified node.
SDValue expandVPCTTZElements(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTTZ_ELTS/VP_CTTZ_ELTS_ZERO_UNDEF nodes.
SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, bool IsAfterLegalTypes, SmallVectorImpl< SDNode * > &Created) const
Given an ISD::SDIV node expressing a divide by constant, return a DAG expression to select that will ...
virtual const char * getTargetNodeName(unsigned Opcode) const
This method returns the name of a target specific DAG node.
bool expandFP_TO_UINT(SDNode *N, SDValue &Result, SDValue &Chain, SelectionDAG &DAG) const
Expand float to UINT conversion.
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
virtual bool SimplifyDemandedVectorEltsForTargetNode(SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth=0) const
Attempt to simplify any target nodes based on the demanded vector elements, returning true on success...
virtual SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const
Target-specific combining of register parts into its original value.
virtual void insertCopiesSplitCSR(MachineBasicBlock *Entry, const SmallVectorImpl< MachineBasicBlock * > &Exits) const
Insert explicit copies in entry and exit blocks.
virtual SDValue LowerCall(CallLoweringInfo &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower calls into the specified DAG.
bool expandREM(SDNode *Node, SDValue &Result, SelectionDAG &DAG) const
Expand an SREM or UREM using SDIV/UDIV or SDIVREM/UDIVREM, if legal.
std::pair< SDValue, SDValue > expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Expands an unaligned load to 2 half-size loads for an integer, and possibly more for vectors.
SDValue expandFMINIMUMNUM_FMAXIMUMNUM(SDNode *N, SelectionDAG &DAG) const
Expand fminimumnum/fmaximumnum into multiple comparison with selects.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
virtual bool isTypeDesirableForOp(unsigned, EVT VT) const
Return true if the target has native support for the specified value type and it is 'desirable' to us...
SDValue expandVectorSplice(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::VECTOR_SPLICE.
virtual const char * LowerXConstraint(EVT ConstraintVT) const
Try to replace an X constraint, which matches anything, with another that has more specific requireme...
SDValue expandCTPOP(SDNode *N, SelectionDAG &DAG) const
Expand CTPOP nodes.
SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization, bool IsAfterLegalTypes, SmallVectorImpl< SDNode * > &Created) const
Given an ISD::UDIV node expressing a divide by constant, return a DAG expression to select that will ...
SDValue expandVectorNaryOpBySplitting(SDNode *Node, SelectionDAG &DAG) const
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
SDValue expandBSWAP(SDNode *N, SelectionDAG &DAG) const
Expand BSWAP nodes.
TargetLowering & operator=(const TargetLowering &)=delete
SDValue expandFMINIMUM_FMAXIMUM(SDNode *N, SelectionDAG &DAG) const
Expand fminimum/fmaximum into multiple comparison with selects.
SDValue CTTZTableLookup(SDNode *N, SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Op, unsigned NumBitsPerElt) const
Expand CTTZ via Table Lookup.
virtual bool isKnownNeverNaNForTargetNode(SDValue Op, const SelectionDAG &DAG, bool SNaN=false, unsigned Depth=0) const
If SNaN is false,.
bool expandDIVREMByConstant(SDNode *N, SmallVectorImpl< SDValue > &Result, EVT HiLoVT, SelectionDAG &DAG, SDValue LL=SDValue(), SDValue LH=SDValue()) const
Attempt to expand an n-bit div/rem/divrem by constant using a n/2-bit urem by constant and other arit...
virtual bool isDesirableToPullExtFromShl(const MachineInstr &MI) const
GlobalISel - return true if it's profitable to perform the combine: shl ([sza]ext x),...
SDValue getVectorSubVecPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, EVT SubVecVT, SDValue Index) const
Get a pointer to a sub-vector of type SubVecVT at index Idx located in memory for a vector of type Ve...
virtual void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool isPositionIndependent() const
std::pair< StringRef, TargetLowering::ConstraintType > ConstraintPair
virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, NegatibleCost &Cost, unsigned Depth=0) const
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual bool isIndexingLegal(MachineInstr &MI, Register Base, Register Offset, bool IsPre, MachineRegisterInfo &MRI) const
Returns true if the specified base+offset is a legal indexed addressing mode for this target.
virtual void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
virtual SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG, const DenormalMode &Mode) const
Return a target-dependent comparison result if the input operand is suitable for use with a square ro...
ConstraintGroup getConstraintPreferences(AsmOperandInfo &OpInfo) const
Given an OpInfo with list of constraints codes as strings, return a sorted Vector of pairs of constra...
bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const
Expand float(f32) to SINT(i64) conversion.
virtual SDValue SimplifyMultipleUseDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth) const
More limited version of SimplifyDemandedBits that can be used to "look through" ops that don't contri...
virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, const SDLoc &DL, const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const
virtual void initializeSplitCSR(MachineBasicBlock *Entry) const
Perform necessary initialization to handle a subset of CSRs explicitly via copies.
virtual bool isSDNodeSourceOfDivergence(const SDNode *N, FunctionLoweringInfo *FLI, UniformityInfo *UA) const
SDValue buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0, SDValue N1, MutableArrayRef< int > Mask, SelectionDAG &DAG) const
Tries to build a legal vector shuffle using the provided parameters or equivalent variations.
virtual bool ExpandInlineAsm(CallInst *) const
This hook allows the target to expand an inline asm call to be explicit llvm code if it wants to.
virtual SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &RefinementSteps) const
Return a reciprocal estimate value for the input operand.
virtual SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const
Returns relocation base for the given PIC jumptable.
std::pair< SDValue, SDValue > scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const
Turn load of vector type into a load of the individual elements.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual bool isSDNodeAlwaysUniform(const SDNode *N) const
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
void forceExpandWideMUL(SelectionDAG &DAG, const SDLoc &dl, bool Signed, EVT WideVT, const SDValue LL, const SDValue LH, const SDValue RL, const SDValue RH, SDValue &Lo, SDValue &Hi) const
forceExpandWideMUL - Unconditionally expand a MUL into either a libcall or brute force via a wide mul...
virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0) const
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
virtual bool isDesirableToCommuteXorWithShift(const SDNode *N) const
Return true if it is profitable to combine an XOR of a logical shift to create a logical shift of NOT...
TargetLowering(const TargetLowering &)=delete
virtual bool shouldSimplifyDemandedVectorElts(SDValue Op, const TargetLoweringOpt &TLO) const
Return true if the target supports simplifying demanded vector elements by converting them to undefs.
bool isConstFalseVal(SDValue N) const
Return if the N is a constant or constant vector equal to the false value from getBooleanContents().
SDValue IncrementMemoryAddress(SDValue Addr, SDValue Mask, const SDLoc &DL, EVT DataVT, SelectionDAG &DAG, bool IsCompressedMemory) const
Increments memory address Addr according to the type of the value DataVT that should be stored.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
virtual SDValue LowerFormalArguments(SDValue, CallingConv::ID, bool, const SmallVectorImpl< ISD::InputArg > &, const SDLoc &, SelectionDAG &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
virtual MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual bool isSplatValueForTargetNode(SDValue Op, const APInt &DemandedElts, APInt &UndefElts, const SelectionDAG &DAG, unsigned Depth=0) const
Return true if vector Op has the same value across all DemandedElts, indicating any elements which ma...
virtual SDValue getSqrtResultForDenormInput(SDValue Operand, SelectionDAG &DAG) const
Return a target-dependent result if the input operand is not suitable for use with a square root esti...
SDValue expandRoundInexactToOdd(EVT ResultVT, SDValue Op, const SDLoc &DL, SelectionDAG &DAG) const
Truncate Op to ResultVT.
virtual bool getPostIndexedAddressParts(SDNode *, SDNode *, SDValue &, SDValue &, ISD::MemIndexedMode &, SelectionDAG &) const
Returns true by value, base pointer and offset pointer and addressing mode by reference if this node ...
virtual bool shouldSplitFunctionArgumentsAsLittleEndian(const DataLayout &DL) const
For most targets, an LLVM type must be broken down into multiple smaller types.
SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, bool foldBooleans, DAGCombinerInfo &DCI, const SDLoc &dl) const
Try to simplify a setcc built with the specified operands and cc.
SDValue expandFunnelShift(SDNode *N, SelectionDAG &DAG) const
Expand funnel shift.
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
Return true if folding a constant offset with the given GlobalAddress is legal.
bool LegalizeSetCCCondCode(SelectionDAG &DAG, EVT VT, SDValue &LHS, SDValue &RHS, SDValue &CC, SDValue Mask, SDValue EVL, bool &NeedInvert, const SDLoc &dl, SDValue &Chain, bool IsSignaling=false) const
Legalize a SETCC or VP_SETCC with given LHS and RHS and condition code CC on the current target.
bool isExtendedTrueVal(const ConstantSDNode *N, EVT VT, bool SExt) const
Return if N is a True value when extended to VT.
bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &DemandedBits, TargetLoweringOpt &TLO) const
Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free.
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
This callback is invoked for operations that are unsupported by the target, which are registered to u...
bool isConstTrueVal(SDValue N) const
Return if the N is a constant or constant vector equal to the true value from getBooleanContents().
virtual ArrayRef< MCPhysReg > getRoundingControlRegisters() const
Returns a 0 terminated array of rounding control registers that can be attached into strict FP call.
virtual SDValue LowerReturn(SDValue, CallingConv::ID, bool, const SmallVectorImpl< ISD::OutputArg > &, const SmallVectorImpl< SDValue > &, const SDLoc &, SelectionDAG &) const
This hook must be implemented to lower outgoing return values, described by the Outs array,...
SDValue expandVPCTPOP(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTPOP nodes.
SDValue expandFixedPointDiv(unsigned Opcode, const SDLoc &dl, SDValue LHS, SDValue RHS, unsigned Scale, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]DIVFIX[SAT].
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT, SDValue Index) const
Get a pointer to vector element Idx located in memory for a vector of type VecVT starting at a base a...
virtual bool isDesirableToCommuteWithShift(const SDNode *N, CombineLevel Level) const
Return true if it is profitable to move this shift by a constant amount through its operand,...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
virtual void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const
SDValue expandVPCTTZ(SDNode *N, SelectionDAG &DAG) const
Expand VP_CTTZ/VP_CTTZ_ZERO_UNDEF nodes.
virtual SDValue visitMaskedStore(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue Ptr, SDValue Val, SDValue Mask) const
virtual const MCExpr * LowerCustomJumpTableEntry(const MachineJumpTableInfo *, const MachineBasicBlock *, unsigned, MCContext &) const
SDValue expandVECTOR_COMPRESS(SDNode *Node, SelectionDAG &DAG) const
Expand a vector VECTOR_COMPRESS into a sequence of extract element, store temporarily,...
virtual const Constant * getTargetConstantFromLoad(LoadSDNode *LD) const
This method returns the constant pool value that will be loaded by LD.
virtual bool useLoadStackGuardNode(const Module &M) const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
SDValue expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const
Expand round(fp) to fp conversion.
SDValue createSelectForFMINNUM_FMAXNUM(SDNode *Node, SelectionDAG &DAG) const
Try to convert the fminnum/fmaxnum to a compare/select sequence.
virtual unsigned combineRepeatedFPDivisors() const
Indicate whether this target prefers to combine FDIVs with the same divisor.
SDValue expandROT(SDNode *N, bool AllowVectorOps, SelectionDAG &DAG) const
Expand rotations.
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual AndOrSETCCFoldKind isDesirableToCombineLogicOpOfSETCC(const SDNode *LogicOp, const SDNode *SETCC0, const SDNode *SETCC1) const
virtual void HandleByVal(CCState *, unsigned &, Align) const
Target-specific cleanup for formal ByVal parameters.
virtual const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const
Returns a 0 terminated array of registers that can be safely used as scratch registers.
virtual bool getPreIndexedAddressParts(SDNode *, SDValue &, SDValue &, ISD::MemIndexedMode &, SelectionDAG &) const
Returns true by value, base pointer and offset pointer and addressing mode by reference if the node's...
SDValue expandFMINNUM_FMAXNUM(SDNode *N, SelectionDAG &DAG) const
Expand fminnum/fmaxnum into fminnum_ieee/fmaxnum_ieee with quieted inputs.
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset.
virtual FastISel * createFastISel(FunctionLoweringInfo &, const TargetLibraryInfo *) const
This method returns a target specific FastISel object, or null if the target does not support "fast" ...
virtual bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, unsigned Depth) const
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
virtual bool checkForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op, const TargetRegisterInfo *TRI, const TargetInstrInfo *TII, unsigned &PhysReg, int &Cost) const
Allows the target to handle physreg-carried dependency in target-specific way.
virtual bool supportSplitCSR(MachineFunction *MF) const
Return true if the target supports that a subset of CSRs for the given machine function is handled ex...
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
SDValue expandCMP(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US]CMP.
virtual bool isReassocProfitable(MachineRegisterInfo &MRI, Register N0, Register N1) const
void expandShiftParts(SDNode *N, SDValue &Lo, SDValue &Hi, SelectionDAG &DAG) const
Expand shift-by-parts.
virtual bool mayBeEmittedAsTailCall(const CallInst *) const
Return true if the target may be able emit the call instruction as a tail call.
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const
This method will be invoked for all target nodes and for any target-independent nodes that the target...
virtual bool canCreateUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const
Return true if Op can create undef or poison from non-undef & non-poison operands.
SDValue expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[U|S]MULFIX[SAT].
virtual bool isInlineAsmTargetBranch(const SmallVectorImpl< StringRef > &AsmStrs, unsigned OpNo) const
On x86, return true if the operand with index OpNo is a CALL or JUMP instruction, which can use eithe...
SDValue expandIntMINMAX(SDNode *Node, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::[US][MIN|MAX].
virtual void computeKnownBitsForTargetInstr(GISelKnownBits &Analysis, Register R, KnownBits &Known, const APInt &DemandedElts, const MachineRegisterInfo &MRI, unsigned Depth=0) const
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
virtual MVT getJumpTableRegTy(const DataLayout &DL) const
void expandUADDSUBO(SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const
Method for building the DAG expansion of ISD::U(ADD|SUB)O.
virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Targets may override this function to provide custom SDIV lowering for power-of-2 denominators.
virtual SDValue BuildSREMPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const
Targets may override this function to provide custom SREM lowering for power-of-2 denominators.
bool expandUINT_TO_FP(SDNode *N, SDValue &Result, SDValue &Chain, SelectionDAG &DAG) const
Expand UINT(i64) to double(f64) conversion.
bool expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, SDValue LHS, SDValue RHS, SmallVectorImpl< SDValue > &Result, EVT HiLoVT, SelectionDAG &DAG, MulExpansionKind Kind, SDValue LL=SDValue(), SDValue LH=SDValue(), SDValue RL=SDValue(), SDValue RH=SDValue()) const
Expand a MUL or [US]MUL_LOHI of n-bit values into two or four nodes, respectively,...
SDValue expandAVG(SDNode *N, SelectionDAG &DAG) const
Expand vector/scalar AVGCEILS/AVGCEILU/AVGFLOORS/AVGFLOORU nodes.
virtual bool isXAndYEqZeroPreferableToXAndYEqY(ISD::CondCode, EVT) const
virtual bool isDesirableToTransformToIntegerOp(unsigned, EVT) const
Return true if it is profitable for dag combiner to transform a floating point op of specified opcode...
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:77
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Target - Wrapper for Target specific information.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:264
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:128
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:184
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:237
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
#define UINT64_MAX
Definition: DataTypes.h:77
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition: ISDOpcodes.h:40
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition: ISDOpcodes.h:257
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
Definition: ISDOpcodes.h:374
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:276
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
Definition: ISDOpcodes.h:502
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:246
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
Definition: ISDOpcodes.h:380
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:814
@ FADD
Simple binary floating point operators.
Definition: ISDOpcodes.h:397
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
Definition: ISDOpcodes.h:387
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:1490
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:805
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
Definition: ISDOpcodes.h:685
@ BRIND
BRIND - Indirect branch.
Definition: ISDOpcodes.h:1123
@ BR_JT
BR_JT - Jumptable branch.
Definition: ISDOpcodes.h:1127
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
Definition: ISDOpcodes.h:356
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
Definition: ISDOpcodes.h:642
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition: ISDOpcodes.h:330
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition: ISDOpcodes.h:674
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:735
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
Definition: ISDOpcodes.h:1044
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:811
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
Definition: ISDOpcodes.h:1031
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition: ISDOpcodes.h:697
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
Definition: ISDOpcodes.h:393
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
Definition: ISDOpcodes.h:1050
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:709
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
Definition: ISDOpcodes.h:680
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:286
@ SPLAT_VECTOR_PARTS
SPLAT_VECTOR_PARTS(SCALAR1, SCALAR2, ...) - Returns a vector with the scalar values joined together a...
Definition: ISDOpcodes.h:651
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition: ISDOpcodes.h:347
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
Definition: ISDOpcodes.h:1055
@ ABDS
ABDS/ABDU - Absolute difference - Return the absolute difference between two numbers interpreted as s...
Definition: ISDOpcodes.h:692
static const int LAST_LOADEXT_TYPE
Definition: ISDOpcodes.h:1595
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
Definition: ISDOpcodes.h:1562
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1613
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
Definition: ISDOpcodes.h:1593
static const int LAST_INDEXED_MODE
Definition: ISDOpcodes.h:1564
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
@ Offset
Definition: DWP.cpp:480
int popcount(T Value) noexcept
Count the number of set bits in a value.
Definition: bit.h:385
void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
Definition: Alignment.h:145
void * PointerTy
Definition: GenericValue.h:21
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:346
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
CodeGenOptLevel
Code generation optimization level.
Definition: CodeGen.h:54
AtomicOrdering
Atomic ordering for LLVM's memory model.
EVT getApproximateEVTForLLT(LLT Ty, LLVMContext &Ctx)
CombineLevel
Definition: DAGCombine.h:15
@ AfterLegalizeDAG
Definition: DAGCombine.h:19
@ AfterLegalizeVectorOps
Definition: DAGCombine.h:18
@ BeforeLegalizeTypes
Definition: DAGCombine.h:16
DWARFExpression::Operation Op
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:217
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1873
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
Represent subnormal handling kind for floating point instruction inputs and outputs.
Extended Value Type.
Definition: ValueTypes.h:35
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:137
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
Definition: ValueTypes.h:74
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition: ValueTypes.h:295
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
Definition: ValueTypes.h:147
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:368
bool isByteSized() const
Return true if the bit size is a multiple of 8.
Definition: ValueTypes.h:238
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:289
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:311
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:168
bool isExtended() const
Test if the given EVT is extended (as opposed to being simple).
Definition: ValueTypes.h:142
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition: ValueTypes.h:157
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:152
ConstraintInfo()=default
Default constructor.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
bool isDstAligned(Align AlignCheck) const
bool allowOverlap() const
bool isFixedDstAlign() const
uint64_t size() const
static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign, bool IsZeroMemset, bool IsVolatile)
Align getDstAlign() const
bool isMemcpyStrSrc() const
bool isAligned(Align AlignCheck) const
static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign, Align SrcAlign, bool IsVolatile, bool MemcpyStrSrc=false)
bool isSrcAligned(Align AlignCheck) const
bool isMemset() const
bool isMemcpy() const
bool isMemcpyWithFixedDstAlign() const
bool isZeroMemset() const
Align getSrcAlign() const
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC)
Set the CallingConv that should be used for the specified libcall.
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
These are IR-level optimization flags that may be propagated to SDNodes.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
std::optional< unsigned > fallbackAddressSpace
PointerUnion< const Value *, const PseudoSourceValue * > ptrVal
This contains information for each constraint that we are lowering.
AsmOperandInfo(InlineAsm::ConstraintInfo Info)
Copy constructor for copying from a ConstraintInfo.
MVT ConstraintVT
The ValueType for the operand value.
std::string ConstraintCode
This contains the actual string for the code, like "m".
Value * CallOperandVal
If this is the result output operand or a clobber, this is null, otherwise it is the incoming operand...
unsigned getMatchedOperand() const
If this is an input matching constraint, this method returns the output operand it matches.
bool isMatchingInputConstraint() const
Return true of this is an input operand that is a matching constraint like "4".
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setConvergent(bool Value=true)
CallLoweringInfo & setIsPostTypeLegalization(bool Value=true)
CallLoweringInfo & setCallee(Type *ResultType, FunctionType *FTy, SDValue Target, ArgListTy &&ArgsList, const CallBase &Call)
CallLoweringInfo & setCFIType(const ConstantInt *Type)
CallLoweringInfo & setInRegister(bool Value=true)
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setVarArg(bool Value=true)
std::optional< PtrAuthInfo > PAI
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setZExtResult(bool Value=true)
CallLoweringInfo & setIsPatchPoint(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setIsPreallocated(bool Value=true)
CallLoweringInfo & setSExtResult(bool Value=true)
CallLoweringInfo & setNoReturn(bool Value=true)
CallLoweringInfo & setConvergenceControlToken(SDValue Token)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setPtrAuth(PtrAuthInfo Value)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})
DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc)
SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)
void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO)
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setIsPostTypeLegalization(bool Value=true)
MakeLibCallOptions & setDiscardResult(bool Value=true)
MakeLibCallOptions & setIsSigned(bool Value=true)
MakeLibCallOptions & setTypeListBeforeSoften(ArrayRef< EVT > OpsVT, EVT RetVT, bool Value=true)
MakeLibCallOptions & setNoReturn(bool Value=true)
This structure contains the information necessary for lowering pointer-authenticating indirect calls.
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
bool CombineTo(SDValue O, SDValue N)
TargetLoweringOpt(SelectionDAG &InDAG, bool LT, bool LO)