23#include "llvm/IR/IntrinsicsSPIRV.h"
31#define DEBUG_TYPE "spirv-legalizer"
34 return [IsExtendedInts, TypeIdx](
const LegalityQuery &Query) {
35 const LLT Ty = Query.Types[TypeIdx];
36 return IsExtendedInts && Ty.isValid() && Ty.isScalar();
82 const unsigned PSize = ST.getPointerSize();
100 auto allPtrsScalarsAndVectors = {
101 p0, p1, p2, p3, p4, p5, p6, p7, p8,
102 p9, p10, p11, p12, s1, s8, s16, s32, s64,
103 v2s1, v2s8, v2s16, v2s32, v2s64, v3s1, v3s8, v3s16, v3s32,
104 v3s64, v4s1, v4s8, v4s16, v4s32, v4s64, v8s1, v8s8, v8s16,
105 v8s32, v8s64, v16s1, v16s8, v16s16, v16s32, v16s64};
107 auto allVectors = {v2s1, v2s8, v2s16, v2s32, v2s64, v3s1, v3s8,
108 v3s16, v3s32, v3s64, v4s1, v4s8, v4s16, v4s32,
109 v4s64, v8s1, v8s8, v8s16, v8s32, v8s64, v16s1,
110 v16s8, v16s16, v16s32, v16s64};
112 auto allShaderVectors = {v2s1, v2s8, v2s16, v2s32, v2s64,
113 v3s1, v3s8, v3s16, v3s32, v3s64,
114 v4s1, v4s8, v4s16, v4s32, v4s64};
116 auto allScalarsAndVectors = {
117 s1, s8, s16, s32, s64, v2s1, v2s8, v2s16, v2s32, v2s64,
118 v3s1, v3s8, v3s16, v3s32, v3s64, v4s1, v4s8, v4s16, v4s32, v4s64,
119 v8s1, v8s8, v8s16, v8s32, v8s64, v16s1, v16s8, v16s16, v16s32, v16s64};
121 auto allIntScalarsAndVectors = {s8, s16, s32, s64, v2s8, v2s16,
122 v2s32, v2s64, v3s8, v3s16, v3s32, v3s64,
123 v4s8, v4s16, v4s32, v4s64, v8s8, v8s16,
124 v8s32, v8s64, v16s8, v16s16, v16s32, v16s64};
126 auto allBoolScalarsAndVectors = {s1, v2s1, v3s1, v4s1, v8s1, v16s1};
128 auto allIntScalars = {s8, s16, s32, s64};
130 auto allFloatScalarsAndF16Vector2AndVector4s = {s16, s32, s64, v2s16, v4s16};
132 auto allFloatScalarsAndVectors = {
133 s16, s32, s64, v2s16, v2s32, v2s64, v3s16, v3s32, v3s64,
134 v4s16, v4s32, v4s64, v8s16, v8s32, v8s64, v16s16, v16s32, v16s64};
136 auto allFloatAndIntScalarsAndPtrs = {s8, s16, s32, s64, p0, p1, p2, p3, p4,
137 p5, p6, p7, p8, p9, p10, p11, p12};
139 auto allPtrs = {p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12};
141 auto &allowedVectorTypes = ST.isShader() ? allShaderVectors : allVectors;
143 bool IsExtendedInts =
145 SPIRV::Extension::SPV_ALTERA_arbitrary_precision_integers) ||
146 ST.canUseExtension(SPIRV::Extension::SPV_KHR_bit_instructions) ||
147 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_int4);
148 auto extendedScalarsAndVectors =
150 const LLT Ty = Query.Types[0];
151 return IsExtendedInts && Ty.isValid() && !Ty.isPointerOrPointerVector();
153 auto extendedScalarsAndVectorsProduct = [IsExtendedInts](
155 const LLT Ty1 = Query.Types[0], Ty2 = Query.Types[1];
156 return IsExtendedInts && Ty1.
isValid() && Ty2.isValid() &&
159 auto extendedPtrsScalarsAndVectors =
161 const LLT Ty = Query.Types[0];
162 return IsExtendedInts && Ty.isValid();
171 uint32_t MaxVectorSize = ST.isShader() ? 4 : 16;
174 if (
Opc != G_EXTRACT_VECTOR_ELT)
233 {G_VECREDUCE_SMIN, G_VECREDUCE_SMAX, G_VECREDUCE_UMIN, G_VECREDUCE_UMAX,
234 G_VECREDUCE_ADD, G_VECREDUCE_MUL, G_VECREDUCE_FMUL, G_VECREDUCE_FMIN,
235 G_VECREDUCE_FMAX, G_VECREDUCE_FMINIMUM, G_VECREDUCE_FMAXIMUM,
236 G_VECREDUCE_OR, G_VECREDUCE_AND, G_VECREDUCE_XOR})
237 .legalFor(allowedVectorTypes)
265 .unsupportedIf(
typeIs(1, p9))
269 G_BITREVERSE, G_SADDSAT, G_UADDSAT, G_SSUBSAT,
270 G_USUBSAT, G_SCMP, G_UCMP})
271 .legalFor(allIntScalarsAndVectors)
272 .
legalIf(extendedScalarsAndVectors);
275 .legalFor(allFloatScalarsAndVectors);
281 .legalForCartesianProduct(allIntScalarsAndVectors,
282 allFloatScalarsAndVectors);
285 .legalForCartesianProduct(allIntScalarsAndVectors,
286 allFloatScalarsAndVectors);
289 .legalForCartesianProduct(allFloatScalarsAndVectors,
290 allScalarsAndVectors);
294 .
legalIf(extendedScalarsAndVectorsProduct);
298 .legalForCartesianProduct(allScalarsAndVectors)
299 .
legalIf(extendedScalarsAndVectorsProduct);
303 .
legalIf(extendedPtrsScalarsAndVectors);
307 typeInSet(1, allPtrsScalarsAndVectors)));
311 .legalFor(allFloatAndIntScalarsAndPtrs)
340 typeInSet(1, allPtrsScalarsAndVectors)));
344 typeInSet(1, allFloatScalarsAndVectors)));
347 G_ATOMICRMW_MAX, G_ATOMICRMW_MIN,
348 G_ATOMICRMW_SUB, G_ATOMICRMW_XOR,
349 G_ATOMICRMW_UMAX, G_ATOMICRMW_UMIN})
350 .legalForCartesianProduct(allIntScalars, allPtrs);
353 {G_ATOMICRMW_FADD, G_ATOMICRMW_FSUB, G_ATOMICRMW_FMIN, G_ATOMICRMW_FMAX})
354 .legalForCartesianProduct(allFloatScalarsAndF16Vector2AndVector4s,
365 {G_UADDO, G_SADDO, G_USUBO, G_SSUBO, G_UMULO, G_SMULO})
369 .legalForCartesianProduct(allFloatScalarsAndVectors,
370 allIntScalarsAndVectors);
374 .legalForCartesianProduct(allFloatScalarsAndVectors);
385 allFloatScalarsAndVectors, {s32, v2s32, v3s32, v4s32, v8s32, v16s32});
421 G_INTRINSIC_ROUNDEVEN})
422 .legalFor(allFloatScalarsAndVectors);
426 allFloatScalarsAndVectors);
429 allFloatScalarsAndVectors, allIntScalarsAndVectors);
431 if (ST.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
433 {G_CTTZ, G_CTTZ_ZERO_UNDEF, G_CTLZ, G_CTLZ_ZERO_UNDEF})
434 .legalForCartesianProduct(allIntScalarsAndVectors,
435 allIntScalarsAndVectors);
444 verify(*ST.getInstrInfo());
458 MI.eraseFromParent();
466 Register ConvReg =
MRI.createGenericVirtualRegister(ConvTy);
479 switch (
MI.getOpcode()) {
483 case TargetOpcode::G_BITCAST:
484 return legalizeBitcast(Helper,
MI);
485 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
487 case TargetOpcode::G_INTRINSIC:
488 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
490 case TargetOpcode::G_IS_FPCLASS:
491 return legalizeIsFPClass(Helper,
MI, LocObserver);
492 case TargetOpcode::G_ICMP: {
493 assert(GR->getSPIRVTypeForVReg(
MI.getOperand(0).getReg()));
494 auto &Op0 =
MI.getOperand(2);
495 auto &Op1 =
MI.getOperand(3);
500 if ((!ST->canDirectlyComparePointers() ||
502 MRI.getType(Reg0).isPointer() &&
MRI.getType(Reg1).isPointer()) {
505 ST->getPointerSize());
506 SPIRVType *SpirvTy = GR->getOrCreateSPIRVType(
507 LLVMTy, Helper.
MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
true);
525 if (IntrinsicID == Intrinsic::spv_bitcast) {
529 LLT DstTy =
MRI.getType(DstReg);
530 LLT SrcTy =
MRI.getType(SrcReg);
532 int32_t MaxVectorSize = ST.isShader() ? 4 : 16;
534 bool DstNeedsLegalization =
false;
535 bool SrcNeedsLegalization =
false;
540 DstNeedsLegalization =
true;
544 DstNeedsLegalization =
true;
548 if (SrcTy.isVector()) {
549 if (SrcTy.getNumElements() > 4 &&
551 SrcNeedsLegalization =
true;
554 if (SrcTy.getNumElements() > MaxVectorSize) {
555 SrcNeedsLegalization =
true;
561 if (DstNeedsLegalization || SrcNeedsLegalization) {
564 MI.eraseFromParent();
582 MI.eraseFromParent();
589bool SPIRVLegalizerInfo::legalizeIsFPClass(
592 auto [DstReg, DstTy, SrcReg, SrcTy] =
MI.getFirst2RegLLTs();
596 auto &MF = MIRBuilder.
getMF();
597 MachineRegisterInfo &
MRI = MF.getRegInfo();
601 if (DstTy.isVector())
603 SPIRVType *SPIRVDstTy = GR->getOrCreateSPIRVType(
604 LLVMDstTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
607 unsigned BitSize = SrcTy.getScalarSizeInBits();
612 if (SrcTy.isVector()) {
613 IntTy =
LLT::vector(SrcTy.getElementCount(), IntTy);
616 SPIRVType *SPIRVIntTy = GR->getOrCreateSPIRVType(
617 LLVMIntTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
621 LLT DstTyCopy = DstTy;
622 const auto assignSPIRVTy = [&](MachineInstrBuilder &&
MI) {
626 LLT MITy =
MRI.getType(
MI.getReg(0));
627 assert((MITy == IntTy || MITy == DstTyCopy) &&
628 "Unexpected LLT type while lowering G_IS_FPCLASS");
629 auto *SPVTy = MITy == IntTy ? SPIRVIntTy : SPIRVDstTy;
630 GR->assignSPIRVTypeToVReg(SPVTy,
MI.getReg(0), MF);
635 const auto buildSPIRVConstant = [&](LLT Ty,
auto &&
C) -> MachineInstrBuilder {
639 assert((Ty == IntTy || Ty == DstTyCopy) &&
640 "Unexpected LLT type while lowering constant for G_IS_FPCLASS");
641 SPIRVType *VecEltTy = GR->getOrCreateSPIRVType(
642 (Ty == IntTy ? LLVMIntTy : LLVMDstTy)->getScalarType(), MIRBuilder,
643 SPIRV::AccessQualifier::ReadWrite,
645 GR->assignSPIRVTypeToVReg(VecEltTy, ScalarC.getReg(0), MF);
650 MIRBuilder.
buildCopy(DstReg, buildSPIRVConstant(DstTy, 0));
651 MI.eraseFromParent();
655 MIRBuilder.
buildCopy(DstReg, buildSPIRVConstant(DstTy, 1));
656 MI.eraseFromParent();
664 Register ResVReg =
MRI.createGenericVirtualRegister(IntTy);
665 MRI.setRegClass(ResVReg, GR->getRegClass(SPIRVIntTy));
666 GR->assignSPIRVTypeToVReg(SPIRVIntTy, ResVReg, Helper.
MIRBuilder.
getMF());
667 auto AsInt = MIRBuilder.
buildInstr(SPIRV::OpBitcast)
669 .
addUse(GR->getSPIRVTypeID(SPIRVIntTy))
671 AsInt = assignSPIRVTy(std::move(AsInt));
683 auto SignBitC = buildSPIRVConstant(IntTy, SignBit);
684 auto ValueMaskC = buildSPIRVConstant(IntTy, ValueMask);
685 auto InfC = buildSPIRVConstant(IntTy, Inf);
686 auto ExpMaskC = buildSPIRVConstant(IntTy, ExpMask);
687 auto ZeroC = buildSPIRVConstant(IntTy, 0);
689 auto Abs = assignSPIRVTy(MIRBuilder.
buildAnd(IntTy, AsInt, ValueMaskC));
690 auto Sign = assignSPIRVTy(
693 auto Res = buildSPIRVConstant(DstTy, 0);
695 const auto appendToRes = [&](MachineInstrBuilder &&ToAppend) {
697 MIRBuilder.
buildOr(DstTyCopy, Res, assignSPIRVTy(std::move(ToAppend))));
710 Mask &= ~fcPosFinite;
714 DstTy, Abs, ExpMaskC));
715 appendToRes(MIRBuilder.
buildAnd(DstTy, Cmp, Sign));
716 Mask &= ~fcNegFinite;
724 auto ExpBits = assignSPIRVTy(MIRBuilder.
buildAnd(IntTy, AsInt, ExpMaskC));
727 Mask &= ~PartialCheck;
736 else if (PartialCheck ==
fcZero)
748 auto OneC = buildSPIRVConstant(IntTy, 1);
749 auto VMinusOne = MIRBuilder.
buildSub(IntTy, V, OneC);
750 auto SubnormalRes = assignSPIRVTy(
752 buildSPIRVConstant(IntTy, AllOneMantissa)));
754 SubnormalRes = MIRBuilder.
buildAnd(DstTy, SubnormalRes, Sign);
755 appendToRes(std::move(SubnormalRes));
762 else if (PartialCheck ==
fcInf)
767 auto NegInfC = buildSPIRVConstant(IntTy, NegInf);
774 auto InfWithQnanBitC =
775 buildSPIRVConstant(IntTy, std::move(Inf) | QNaNBitMask);
776 if (PartialCheck ==
fcNan) {
780 }
else if (PartialCheck ==
fcQNan) {
787 auto IsNan = assignSPIRVTy(
789 auto IsNotQnan = assignSPIRVTy(MIRBuilder.
buildICmp(
791 appendToRes(MIRBuilder.
buildAnd(DstTy, IsNan, IsNotQnan));
798 APInt ExpLSB = ExpMask & ~(ExpMask.
shl(1));
799 auto ExpMinusOne = assignSPIRVTy(
800 MIRBuilder.
buildSub(IntTy, Abs, buildSPIRVConstant(IntTy, ExpLSB)));
801 APInt MaxExpMinusOne = std::move(ExpMask) - ExpLSB;
802 auto NormalRes = assignSPIRVTy(
804 buildSPIRVConstant(IntTy, MaxExpMinusOne)));
806 NormalRes = MIRBuilder.
buildAnd(DstTy, NormalRes, Sign);
808 auto PosSign = assignSPIRVTy(MIRBuilder.
buildXor(
809 DstTy, Sign, buildSPIRVConstant(DstTy, InversionMask)));
810 NormalRes = MIRBuilder.
buildAnd(DstTy, NormalRes, PosSign);
812 appendToRes(std::move(NormalRes));
816 MI.eraseFromParent();
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static void scalarize(Instruction *I, SmallVectorImpl< Instruction * > &Worklist)
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
This file declares the MachineIRBuilder class.
Promote Memory to Register
const SmallVectorImpl< MachineOperand > & Cond
static Register convertPtrToInt(Register Reg, LLT ConvTy, SPIRVType *SpvType, LegalizerHelper &Helper, MachineRegisterInfo &MRI, SPIRVGlobalRegistry *GR)
LegalityPredicate typeOfExtendedScalars(unsigned TypeIdx, bool IsExtendedInts)
static bool legalizeExtractVectorElt(LegalizerHelper &Helper, MachineInstr &MI, SPIRVGlobalRegistry *GR)
APInt bitcastToAPInt() const
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
unsigned getActiveBits() const
Compute the number of active bits in the value.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
APInt shl(unsigned shiftAmt) const
Left-shift function.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_ULT
unsigned less than
static constexpr ElementCount getFixed(ScalarTy MinVal)
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr bool isPointerOrPointerVector() const
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
constexpr LLT getScalarType() const
LLVM_ABI void computeTables()
Compute any ancillary tables needed to quickly decide how an operation should be handled.
LegalizeRuleSet & legalFor(std::initializer_list< LLT > Types)
The instruction is legal when type index 0 is any type in the given list.
LegalizeRuleSet & fewerElementsIf(LegalityPredicate Predicate, LegalizeMutation Mutation)
Remove elements to reach the type selected by the mutation if the predicate is true.
LegalizeRuleSet & moreElementsToNextPow2(unsigned TypeIdx)
Add more elements to the vector to reach the next power of two.
LegalizeRuleSet & lower()
The instruction is lowered.
LegalizeRuleSet & lowerIf(LegalityPredicate Predicate)
The instruction is lowered if predicate is true.
LegalizeRuleSet & custom()
Unconditionally custom lower.
LegalizeRuleSet & unsupportedIf(LegalityPredicate Predicate)
LegalizeRuleSet & alwaysLegal()
LegalizeRuleSet & customIf(LegalityPredicate Predicate)
LegalizeRuleSet & scalarize(unsigned TypeIdx)
LegalizeRuleSet & legalForCartesianProduct(std::initializer_list< LLT > Types)
The instruction is legal when type indexes 0 and 1 are both in the given list.
LegalizeRuleSet & legalIf(LegalityPredicate Predicate)
The instruction is legal if predicate is true.
MachineIRBuilder & MIRBuilder
Expose MIRBuilder so clients can set their own RecordInsertInstruction functions.
LegalizeRuleSet & getActionDefinitionsBuilder(unsigned Opcode)
Get the action definition builder for the given opcode.
const LegacyLegalizerInfo & getLegacyLegalizerInfo() const
Helper class to build MachineInstr.
LLVMContext & getContext() const
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_SUB Op0, Op1.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildBitcast(const DstOp &Dst, const SrcOp &Src)
Build and insert Dst = G_BITCAST Src.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildOr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_OR Op0, Op1.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildXor(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_XOR Op0, Op1.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
SPIRVLegalizerInfo(const SPIRVSubtarget &ST)
bool legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI, LostDebugLocObserver &LocObserver) const override
Called for instructions with the Custom LegalizationAction.
bool legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const override
SPIRVGlobalRegistry * getSPIRVGlobalRegistry() const
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
LLVM_ABI LegalityPredicate vectorElementCountIsLessThanOrEqualTo(unsigned TypeIdx, unsigned Size)
True iff the specified type index is a vector with a number of elements that's less than or equal to ...
LLVM_ABI LegalityPredicate typeInSet(unsigned TypeIdx, std::initializer_list< LLT > TypesInit)
True iff the given type index is one of the specified types.
LLVM_ABI LegalityPredicate vectorElementCountIsGreaterThan(unsigned TypeIdx, unsigned Size)
True iff the specified type index is a vector with a number of elements that's greater than the given...
Predicate any(Predicate P0, Predicate P1)
True iff P0 or P1 are true.
LegalityPredicate typeIsNot(unsigned TypeIdx, LLT Type)
True iff the given type index is not the specified type.
Predicate all(Predicate P0, Predicate P1)
True iff P0 and P1 are true.
LLVM_ABI LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit)
True iff the given type index is the specified type.
LLVM_ABI LegalizeMutation changeElementCountTo(unsigned TypeIdx, unsigned FromTypeIdx)
Keep the same scalar or element type as TypeIdx, but take the number of elements from FromTypeIdx.
LLVM_ABI LegalizeMutation changeElementSizeTo(unsigned TypeIdx, unsigned FromTypeIdx)
Change the scalar size or element size to have the same scalar size as type index FromIndex.
Invariant opcodes: All instruction sets have these as their low opcodes.
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)
Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.
std::function< bool(const LegalityQuery &)> LegalityPredicate
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
const MachineInstr SPIRVType
const std::set< unsigned > & getTypeFoldingSupportedOpcodes()
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
The LegalityQuery object bundles together all the information that's needed to decide whether a given...