30using namespace LegalityPredicates;
31using namespace LegalizeMutations;
38 return Query.Types[TypeIdx].isScalar() &&
39 ((ST.hasStdExtZfh() && Query.Types[TypeIdx].getSizeInBits() == 16) ||
40 (ST.hasStdExtF() && Query.Types[TypeIdx].getSizeInBits() == 32) ||
41 (ST.hasStdExtD() && Query.Types[TypeIdx].getSizeInBits() == 64));
47 std::initializer_list<LLT> IntOrFPVecTys,
50 return ST.hasVInstructions() &&
51 (Query.Types[TypeIdx].getScalarSizeInBits() != 64 ||
52 ST.hasVInstructionsI64()) &&
53 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
64 return ST.hasVInstructions() &&
65 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
72 std::initializer_list<LLT> PtrVecTys,
75 return ST.hasVInstructions() &&
76 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
77 ST.getELen() == 64) &&
78 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 16 ||
79 Query.Types[TypeIdx].getScalarSizeInBits() == 32);
85 : STI(ST), XLen(STI.getXLen()), sXLen(
LLT::scalar(XLen)) {
134 using namespace TargetOpcode;
136 auto BoolVecTys = {nxv1s1, nxv2s1, nxv4s1, nxv8s1, nxv16s1, nxv32s1, nxv64s1};
138 auto IntOrFPVecTys = {nxv1s8, nxv2s8, nxv4s8, nxv8s8, nxv16s8, nxv32s8,
139 nxv64s8, nxv1s16, nxv2s16, nxv4s16, nxv8s16, nxv16s16,
140 nxv32s16, nxv1s32, nxv2s32, nxv4s32, nxv8s32, nxv16s32,
141 nxv1s64, nxv2s64, nxv4s64, nxv8s64};
143 auto PtrVecTys = {nxv1p0, nxv2p0, nxv4p0, nxv8p0, nxv16p0};
146 .legalFor({s32, sXLen})
152 {G_UADDE, G_UADDO, G_USUBE, G_USUBO}).lower();
163 ShiftActions.
legalFor({{s32, s32}, {s32, sXLen}, {sXLen, sXLen}})
164 .widenScalarToNextPow2(0)
175 ExtActions.legalFor({{sXLen, s32}});
190 for (
unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
192 unsigned BigTyIdx =
Op == G_MERGE_VALUES ? 0 : 1;
193 unsigned LitTyIdx =
Op == G_MERGE_VALUES ? 1 : 0;
194 if (XLen == 32 && ST.hasStdExtD()) {
195 MergeUnmergeActions.legalIf(
198 MergeUnmergeActions.widenScalarToNextPow2(LitTyIdx, XLen)
199 .widenScalarToNextPow2(BigTyIdx, XLen)
200 .clampScalar(LitTyIdx, sXLen, sXLen)
201 .clampScalar(BigTyIdx, sXLen, sXLen);
207 if (ST.hasStdExtZbb() || ST.hasStdExtZbkb()) {
208 RotateActions.
legalFor({{s32, sXLen}, {sXLen, sXLen}});
214 RotateActions.
lower();
225 if (ST.hasStdExtZbb() || ST.hasStdExtZbkb())
226 BSWAPActions.legalFor({sXLen}).clampScalar(0, sXLen, sXLen);
228 BSWAPActions.maxScalar(0, sXLen).lower();
231 auto &CountZerosUndefActions =
233 if (ST.hasStdExtZbb()) {
234 CountZerosActions.
legalFor({{s32, s32}, {sXLen, sXLen}})
235 .clampScalar(0, s32, sXLen)
242 CountZerosUndefActions.
lower();
245 if (ST.hasStdExtZbb()) {
246 CTPOPActions.legalFor({{s32, s32}, {sXLen, sXLen}})
247 .clampScalar(0, s32, sXLen)
248 .widenScalarToNextPow2(0)
249 .scalarSameSizeAs(1, 0);
251 CTPOPActions.maxScalar(0, sXLen).scalarSameSizeAs(1, 0).lower();
255 ConstantActions.legalFor({s32, p0});
257 ConstantActions.customFor({s64});
258 ConstantActions.widenScalarToNextPow2(0).clampScalar(0, s32, sXLen);
262 {G_IMPLICIT_DEF, G_CONSTANT_FOLD_BARRIER, G_FREEZE})
263 .legalFor({s32, sXLen, p0})
270 .
legalFor({{sXLen, sXLen}, {sXLen, p0}})
277 auto &SelectActions =
279 .
legalFor({{s32, sXLen}, {p0, sXLen}})
282 if (XLen == 64 || ST.hasStdExtD())
283 SelectActions.
legalFor({{s64, sXLen}});
285 .
clampScalar(0, s32, (XLen == 64 || ST.hasStdExtD()) ? s64 : s32)
292 .legalForTypesWithMemDesc({{s32, p0, s8, 8},
295 {p0, p0, sXLen, XLen}});
297 .legalForTypesWithMemDesc({{s32, p0, s8, 8},
300 {p0, p0, sXLen, XLen}});
301 auto &ExtLoadActions =
303 .legalForTypesWithMemDesc({{s32, p0, s8, 8}, {s32, p0, s16, 16}});
308 {s64, p0, s64, 64}});
312 {s64, p0, s64, 64}});
314 {{s64, p0, s8, 8}, {s64, p0, s16, 16}, {s64, p0, s32, 32}});
315 }
else if (ST.hasStdExtD()) {
321 if (ST.hasVInstructions()) {
323 {nxv4s8, p0, nxv4s8, 8},
324 {nxv8s8, p0, nxv8s8, 8},
325 {nxv16s8, p0, nxv16s8, 8},
326 {nxv32s8, p0, nxv32s8, 8},
327 {nxv64s8, p0, nxv64s8, 8},
328 {nxv2s16, p0, nxv2s16, 16},
329 {nxv4s16, p0, nxv4s16, 16},
330 {nxv8s16, p0, nxv8s16, 16},
331 {nxv16s16, p0, nxv16s16, 16},
332 {nxv32s16, p0, nxv32s16, 16},
333 {nxv2s32, p0, nxv2s32, 32},
334 {nxv4s32, p0, nxv4s32, 32},
335 {nxv8s32, p0, nxv8s32, 32},
336 {nxv16s32, p0, nxv16s32, 32}});
338 {nxv4s8, p0, nxv4s8, 8},
339 {nxv8s8, p0, nxv8s8, 8},
340 {nxv16s8, p0, nxv16s8, 8},
341 {nxv32s8, p0, nxv32s8, 8},
342 {nxv64s8, p0, nxv64s8, 8},
343 {nxv2s16, p0, nxv2s16, 16},
344 {nxv4s16, p0, nxv4s16, 16},
345 {nxv8s16, p0, nxv8s16, 16},
346 {nxv16s16, p0, nxv16s16, 16},
347 {nxv32s16, p0, nxv32s16, 16},
348 {nxv2s32, p0, nxv2s32, 32},
349 {nxv4s32, p0, nxv4s32, 32},
350 {nxv8s32, p0, nxv8s32, 32},
351 {nxv16s32, p0, nxv16s32, 32}});
353 if (ST.getELen() == 64) {
355 {nxv1s16, p0, nxv1s16, 16},
356 {nxv1s32, p0, nxv1s32, 32}});
358 {nxv1s16, p0, nxv1s16, 16},
359 {nxv1s32, p0, nxv1s32, 32}});
362 if (ST.hasVInstructionsI64()) {
364 {nxv2s64, p0, nxv2s64, 64},
365 {nxv4s64, p0, nxv4s64, 64},
366 {nxv8s64, p0, nxv8s64, 64}});
368 {nxv2s64, p0, nxv2s64, 64},
369 {nxv4s64, p0, nxv4s64, 64},
370 {nxv8s64, p0, nxv8s64, 64}});
379 if (XLen <= ST.getELen()) {
400 .clampScalar(0, sXLen, sXLen);
404 .clampScalar(1, sXLen, sXLen);
414 .widenScalarToNextPow2(0)
420 if (ST.hasStdExtZmmul()) {
423 .widenScalarToNextPow2(0)
436 .widenScalarToNextPow2(0)
451 if (ST.hasStdExtM()) {
454 .libcallFor({sDoubleXLen})
455 .clampScalar(0, sXLen, sDoubleXLen)
459 .libcallFor({sXLen, sDoubleXLen})
460 .clampScalar(0, sXLen, sDoubleXLen)
468 if (ST.hasStdExtZbb())
469 AbsActions.customFor({sXLen}).minScalar(0, sXLen);
472 auto &MinMaxActions =
474 if (ST.hasStdExtZbb())
475 MinMaxActions.
legalFor({sXLen}).minScalar(0, sXLen);
476 MinMaxActions.
lower();
487 G_FABS, G_FSQRT, G_FMAXNUM, G_FMINNUM})
501 return (ST.hasStdExtD() &&
typeIs(0, s32)(Query) &&
503 (ST.hasStdExtZfh() &&
typeIs(0, s16)(Query) &&
505 (ST.hasStdExtZfh() && ST.hasStdExtD() &&
typeIs(0, s16)(Query) &&
510 return (ST.hasStdExtD() &&
typeIs(0, s64)(Query) &&
512 (ST.hasStdExtZfh() &&
typeIs(0, s32)(Query) &&
514 (ST.hasStdExtZfh() && ST.hasStdExtD() &&
typeIs(0, s64)(Query) &&
544 .libcallFor({s32, s64});
573 if (ST.hasVInstructionsF64() && ST.hasStdExtD())
576 else if (ST.hasVInstructionsI64())
589 switch (IntrinsicID) {
592 case Intrinsic::vacopy: {
603 LLT PtrTy =
MRI.getType(DstLst);
609 auto Tmp = MIRBuilder.
buildLoad(PtrTy,
MI.getOperand(2), *LoadMMO);
614 MIRBuilder.
buildStore(Tmp, DstLst, *StoreMMO);
616 MI.eraseFromParent();
622bool RISCVLegalizerInfo::legalizeShlAshrLshr(
625 assert(
MI.getOpcode() == TargetOpcode::G_ASHR ||
626 MI.getOpcode() == TargetOpcode::G_LSHR ||
627 MI.getOpcode() == TargetOpcode::G_SHL);
636 uint64_t Amount = VRegAndVal->Value.getZExtValue();
641 MI.getOperand(2).setReg(ExtCst.getReg(0));
649 assert(
MI.getOpcode() == TargetOpcode::G_VASTART);
656 MIRBuilder.
buildStore(FINAddr,
MI.getOperand(0).getReg(),
657 *
MI.memoperands()[0]);
658 MI.eraseFromParent();
662bool RISCVLegalizerInfo::shouldBeInConstantPool(
APInt APImm,
663 bool ShouldOptForSize)
const {
683 if (ShouldOptForSize)
691 unsigned ShiftAmt, AddOpc;
712 uint64_t Val =
MI.getOperand(1).getCImm()->getZExtValue();
716 auto VLENB = MIB.
buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
718 }
else if (
Log2 > 3) {
719 auto VLENB = MIB.
buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
722 MIB.
buildInstr(RISCV::G_READ_VLENB, {Dst}, {});
724 }
else if ((Val % 8) == 0) {
727 auto VLENB = MIB.
buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
730 auto VLENB = MIB.
buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
734 MI.eraseFromParent();
745 unsigned Opc =
MI.getOpcode();
746 assert(Opc == TargetOpcode::G_ZEXT || Opc == TargetOpcode::G_SEXT ||
747 Opc == TargetOpcode::G_ANYEXT);
753 LLT DstTy =
MRI.getType(Dst);
754 int64_t ExtTrueVal = Opc == TargetOpcode::G_SEXT ? -1 : 1;
761 MI.eraseFromParent();
769 "Machine instructions must be Load/Store.");
776 LLT DataTy =
MRI.getType(DstReg);
780 if (!
MI.hasOneMemOperand())
788 if (TLI->allowsMemoryAccessForAlignment(Ctx,
DL, VT, *MMO))
792 assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
793 "Unexpected unaligned RVV load type");
796 unsigned NumElements =
820 return MIB.
buildInstr(RISCV::G_VMSET_VL, {MaskTy}, {VL});
825static std::pair<MachineInstrBuilder, Register>
828 LLT VecTy = Dst.getLLTTy(
MRI);
847 return MIB.
buildInstr(RISCV::G_SPLAT_VECTOR_SPLIT_I64_VL, {Dst},
848 {Passthru,
Lo,
Hi, VL});
858 Unmerge.getReg(1), VL, MIB,
MRI);
867 assert(
MI.getOpcode() == TargetOpcode::G_SPLAT_VECTOR);
872 Register SplatVal =
MI.getOperand(1).getReg();
874 LLT VecTy =
MRI.getType(Dst);
878 if (XLenTy.getSizeInBits() == 32 &&
883 MI.eraseFromParent();
891 MIB.
buildInstr(RISCV::G_VMSET_VL, {Dst}, {VL});
892 MI.eraseFromParent();
897 MIB.
buildInstr(RISCV::G_VMCLR_VL, {Dst}, {VL});
898 MI.eraseFromParent();
906 auto ZExtSplatVal = MIB.
buildZExt(InterEltTy, SplatVal);
913 MI.eraseFromParent();
924 switch (
MI.getOpcode()) {
928 case TargetOpcode::G_ABS:
931 case TargetOpcode::G_CONSTANT: {
935 bool ShouldOptForSize =
F.hasOptSize() ||
F.hasMinSize();
937 if (!shouldBeInConstantPool(ConstVal->
getValue(), ShouldOptForSize))
941 case TargetOpcode::G_SHL:
942 case TargetOpcode::G_ASHR:
943 case TargetOpcode::G_LSHR:
944 return legalizeShlAshrLshr(
MI, MIRBuilder, Observer);
945 case TargetOpcode::G_SEXT_INREG: {
946 LLT DstTy =
MRI.getType(
MI.getOperand(0).getReg());
947 int64_t SizeInBits =
MI.getOperand(2).getImm();
952 if (STI.hasStdExtZbb() && (SizeInBits == 8 || SizeInBits == 16))
958 case TargetOpcode::G_IS_FPCLASS: {
959 Register GISFPCLASS =
MI.getOperand(0).getReg();
970 auto GFClass = MIB.
buildInstr(RISCV::G_FCLASS, {sXLen}, {Src});
971 auto And = MIB.
buildAnd(sXLen, GFClass, FClassMask);
974 MI.eraseFromParent();
977 case TargetOpcode::G_VASTART:
978 return legalizeVAStart(
MI, MIRBuilder);
979 case TargetOpcode::G_VSCALE:
980 return legalizeVScale(
MI, MIRBuilder);
981 case TargetOpcode::G_ZEXT:
982 case TargetOpcode::G_SEXT:
983 case TargetOpcode::G_ANYEXT:
984 return legalizeExt(
MI, MIRBuilder);
985 case TargetOpcode::G_SPLAT_VECTOR:
986 return legalizeSplatVector(
MI, MIRBuilder);
987 case TargetOpcode::G_LOAD:
988 case TargetOpcode::G_STORE:
989 return legalizeLoadStore(
MI, Helper, MIRBuilder);
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
This file declares the MachineIRBuilder class.
static MachineInstrBuilder buildAllOnesMask(LLT VecTy, const SrcOp &VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Creates an all ones mask suitable for masking a vector of type VecTy with vector length VL.
static LegalityPredicate typeIsLegalBoolVec(unsigned TypeIdx, std::initializer_list< LLT > BoolVecTys, const RISCVSubtarget &ST)
static LegalityPredicate typeIsScalarFPArith(unsigned TypeIdx, const RISCVSubtarget &ST)
static LegalityPredicate typeIsLegalIntOrFPVec(unsigned TypeIdx, std::initializer_list< LLT > IntOrFPVecTys, const RISCVSubtarget &ST)
static MachineInstrBuilder buildSplatPartsS64WithVL(const DstOp &Dst, const SrcOp &Passthru, Register Lo, Register Hi, Register VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
static MachineInstrBuilder buildSplatSplitS64WithVL(const DstOp &Dst, const SrcOp &Passthru, const SrcOp &Scalar, Register VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
static std::pair< MachineInstrBuilder, Register > buildDefaultVLOps(const DstOp &Dst, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Gets the two common "VL" operands: an all-ones mask and the vector length.
static LLT getMaskTypeFor(LLT VecTy)
Return the type of the mask type suitable for masking the provided vector type.
static LegalityPredicate typeIsLegalPtrVec(unsigned TypeIdx, std::initializer_list< LLT > PtrVecTys, const RISCVSubtarget &ST)
This file declares the targeting of the Machinelegalizer class for RISC-V.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Class for arbitrary precision integers.
APInt zext(unsigned width) const
Zero extend to a new width.
unsigned getBitWidth() const
Return the number of bits in the APInt.
APInt rotr(unsigned rotateAmt) const
Rotate right by rotateAmt.
int64_t getSExtValue() const
Get sign extended value.
This is the shared class of boolean and integer constants.
const APInt & getValue() const
Return the constant as an APInt value reference.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Abstract class that contains various methods for clients to notify about changes.
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT scalable_vector(unsigned MinNumElements, unsigned ScalarSizeInBits)
Get a low-level scalable vector of some number of elements and element width.
constexpr LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr ElementCount getElementCount() const
This is an important class for using LLVM in a threaded context.
void computeTables()
Compute any ancillary tables needed to quickly decide how an operation should be handled.
LegalizeRuleSet & widenScalarOrEltToNextPow2OrMinSize(unsigned TypeIdx, unsigned MinSize=0)
Widen the scalar or vector element type to the next power of two that is at least MinSize.
LegalizeRuleSet & legalFor(std::initializer_list< LLT > Types)
The instruction is legal when type index 0 is any type in the given list.
LegalizeRuleSet & scalarSameSizeAs(unsigned TypeIdx, unsigned SameSizeIdx)
Change the type TypeIdx to have the same scalar size as type SameSizeIdx.
LegalizeRuleSet & libcall()
The instruction is emitted as a library call.
LegalizeRuleSet & libcallFor(std::initializer_list< LLT > Types)
LegalizeRuleSet & maxScalar(unsigned TypeIdx, const LLT Ty)
Ensure the scalar is at most as wide as Ty.
LegalizeRuleSet & lowerIfMemSizeNotByteSizePow2()
Lower a memory operation if the memory access size is not a round power of 2 byte size.
LegalizeRuleSet & lower()
The instruction is lowered.
LegalizeRuleSet & lowerFor(std::initializer_list< LLT > Types)
The instruction is lowered when type index 0 is any type in the given list.
LegalizeRuleSet & clampScalar(unsigned TypeIdx, const LLT MinTy, const LLT MaxTy)
Limit the range of scalar sizes to MinTy and MaxTy.
LegalizeRuleSet & minScalarSameAs(unsigned TypeIdx, unsigned LargeTypeIdx)
Widen the scalar to match the size of another.
LegalizeRuleSet & widenScalarIf(LegalityPredicate Predicate, LegalizeMutation Mutation)
Widen the scalar to the one selected by the mutation if the predicate is true.
LegalizeRuleSet & customIf(LegalityPredicate Predicate)
LegalizeRuleSet & widenScalarToNextPow2(unsigned TypeIdx, unsigned MinSize=0)
Widen the scalar to the next power of two that is at least MinSize.
LegalizeRuleSet & scalarize(unsigned TypeIdx)
LegalizeRuleSet & lowerForCartesianProduct(std::initializer_list< LLT > Types0, std::initializer_list< LLT > Types1)
The instruction is lowered when type indexes 0 and 1 are both in their respective lists.
LegalizeRuleSet & legalForTypesWithMemDesc(std::initializer_list< LegalityPredicates::TypePairAndMemDesc > TypesAndMemDesc)
The instruction is legal when type indexes 0 and 1 along with the memory size and minimum alignment i...
LegalizeRuleSet & legalIf(LegalityPredicate Predicate)
The instruction is legal if predicate is true.
LegalizeRuleSet & customFor(std::initializer_list< LLT > Types)
LegalizeResult lowerAbsToMaxNeg(MachineInstr &MI)
LegalizeResult bitcast(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
Legalize an instruction by replacing the value type.
@ Legalized
Instruction has been legalized and the MachineFunction changed.
LegalizeResult lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
Legalize an instruction by splitting it into simpler parts, hopefully understood by the target.
GISelChangeObserver & Observer
To keep track of changes made by the LegalizerHelper.
MachineIRBuilder & MIRBuilder
Expose MIRBuilder so clients can set their own RecordInsertInstruction functions.
LegalizeResult lowerConstant(MachineInstr &MI)
LegalizeRuleSet & getActionDefinitionsBuilder(unsigned Opcode)
Get the action definition builder for the given opcode.
const LegacyLegalizerInfo & getLegacyLegalizerInfo() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Helper class to build MachineInstr.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_MUL Op0, Op1.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildLShr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ZEXT Op.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineRegisterInfo * getMRI()
Getter for MRI.
const DataLayout & getDataLayout() const
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Representation of each machine instruction.
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
bool legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI, LostDebugLocObserver &LocObserver) const override
Called for instructions with the Custom LegalizationAction.
bool legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const override
RISCVLegalizerInfo(const RISCVSubtarget &ST)
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getVarArgsFrameIndex() const
unsigned getRealMinVLen() const
unsigned getMaxBuildIntsCost() const
bool useConstantPoolForLargeInts() const
const RISCVTargetLowering * getTargetLowering() const override
Wrapper class representing virtual and physical registers.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LegalityPredicate typeInSet(unsigned TypeIdx, std::initializer_list< LLT > TypesInit)
True iff the given type index is one of the specified types.
Predicate any(Predicate P0, Predicate P1)
True iff P0 or P1 are true.
Predicate all(Predicate P0, Predicate P1)
True iff P0 and P1 are true.
LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit)
True iff the given type index is the specified type.
LegalizeMutation changeTo(unsigned TypeIdx, LLT Ty)
Select this specific type for the given type index.
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, unsigned &ShiftAmt, unsigned &AddOpc)
static constexpr unsigned RVVBitsPerBlock
This is an optimization pass for GlobalISel generic memory operations.
Type * getTypeForLLT(LLT Ty, LLVMContext &C)
Get the type back from LLT.
bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
@ And
Bitwise or logical AND of integers.
std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
unsigned Log2(Align A)
Returns the log2 of the alignment.
std::function< bool(const LegalityQuery &)> LegalityPredicate
This struct is a compact representation of a valid (non-zero power of two) alignment.
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
This class contains a discriminated union of information about pointers in memory operands,...