30using namespace LegalityPredicates;
31using namespace LegalizeMutations;
38 return Query.Types[TypeIdx].isScalar() &&
39 ((ST.hasStdExtZfh() && Query.Types[TypeIdx].getSizeInBits() == 16) ||
40 (ST.hasStdExtF() && Query.Types[TypeIdx].getSizeInBits() == 32) ||
41 (ST.hasStdExtD() && Query.Types[TypeIdx].getSizeInBits() == 64));
47 std::initializer_list<LLT> IntOrFPVecTys,
50 return ST.hasVInstructions() &&
51 (Query.Types[TypeIdx].getScalarSizeInBits() != 64 ||
52 ST.hasVInstructionsI64()) &&
53 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
64 return ST.hasVInstructions() &&
65 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
72 std::initializer_list<LLT> PtrVecTys,
75 return ST.hasVInstructions() &&
76 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
77 ST.getELen() == 64) &&
78 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 16 ||
79 Query.Types[TypeIdx].getScalarSizeInBits() == 32);
85 : STI(ST), XLen(STI.getXLen()), sXLen(
LLT::scalar(XLen)) {
134 using namespace TargetOpcode;
136 auto BoolVecTys = {nxv1s1, nxv2s1, nxv4s1, nxv8s1, nxv16s1, nxv32s1, nxv64s1};
138 auto IntOrFPVecTys = {nxv1s8, nxv2s8, nxv4s8, nxv8s8, nxv16s8, nxv32s8,
139 nxv64s8, nxv1s16, nxv2s16, nxv4s16, nxv8s16, nxv16s16,
140 nxv32s16, nxv1s32, nxv2s32, nxv4s32, nxv8s32, nxv16s32,
141 nxv1s64, nxv2s64, nxv4s64, nxv8s64};
143 auto PtrVecTys = {nxv1p0, nxv2p0, nxv4p0, nxv8p0, nxv16p0};
146 .legalFor({s32, sXLen})
152 {G_UADDE, G_UADDO, G_USUBE, G_USUBO}).lower();
163 ShiftActions.
legalFor({{s32, s32}, {s32, sXLen}, {sXLen, sXLen}})
164 .widenScalarToNextPow2(0)
175 ExtActions.legalFor({{sXLen, s32}});
190 for (
unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
192 unsigned BigTyIdx =
Op == G_MERGE_VALUES ? 0 : 1;
193 unsigned LitTyIdx =
Op == G_MERGE_VALUES ? 1 : 0;
194 if (XLen == 32 && ST.hasStdExtD()) {
195 MergeUnmergeActions.legalIf(
198 MergeUnmergeActions.widenScalarToNextPow2(LitTyIdx, XLen)
199 .widenScalarToNextPow2(BigTyIdx, XLen)
200 .clampScalar(LitTyIdx, sXLen, sXLen)
201 .clampScalar(BigTyIdx, sXLen, sXLen);
207 if (ST.hasStdExtZbb() || ST.hasStdExtZbkb()) {
208 RotateActions.
legalFor({{s32, sXLen}, {sXLen, sXLen}});
214 RotateActions.
lower();
225 if (ST.hasStdExtZbb() || ST.hasStdExtZbkb())
226 BSWAPActions.legalFor({sXLen}).clampScalar(0, sXLen, sXLen);
228 BSWAPActions.maxScalar(0, sXLen).lower();
231 auto &CountZerosUndefActions =
233 if (ST.hasStdExtZbb()) {
234 CountZerosActions.
legalFor({{s32, s32}, {sXLen, sXLen}})
235 .clampScalar(0, s32, sXLen)
242 CountZerosUndefActions.
lower();
245 if (ST.hasStdExtZbb()) {
246 CTPOPActions.legalFor({{s32, s32}, {sXLen, sXLen}})
247 .clampScalar(0, s32, sXLen)
248 .widenScalarToNextPow2(0)
249 .scalarSameSizeAs(1, 0);
251 CTPOPActions.maxScalar(0, sXLen).scalarSameSizeAs(1, 0).lower();
255 ConstantActions.legalFor({s32, p0});
257 ConstantActions.customFor({s64});
258 ConstantActions.widenScalarToNextPow2(0).clampScalar(0, s32, sXLen);
262 {G_IMPLICIT_DEF, G_CONSTANT_FOLD_BARRIER, G_FREEZE})
263 .legalFor({s32, sXLen, p0})
270 .
legalFor({{sXLen, sXLen}, {sXLen, p0}})
277 auto &SelectActions =
279 .
legalFor({{s32, sXLen}, {p0, sXLen}})
282 if (XLen == 64 || ST.hasStdExtD())
283 SelectActions.
legalFor({{s64, sXLen}});
285 .
clampScalar(0, s32, (XLen == 64 || ST.hasStdExtD()) ? s64 : s32)
288 auto &LoadStoreActions =
290 .legalForTypesWithMemDesc({{s32, p0, s8, 8},
293 {p0, p0, sXLen, XLen}});
294 auto &ExtLoadActions =
296 .legalForTypesWithMemDesc({{s32, p0, s8, 8}, {s32, p0, s16, 16}});
301 {s64, p0, s64, 64}});
303 {{s64, p0, s8, 8}, {s64, p0, s16, 16}, {s64, p0, s32, 32}});
304 }
else if (ST.hasStdExtD()) {
309 if (ST.hasVInstructions()) {
311 {nxv4s8, p0, nxv4s8, 8},
312 {nxv8s8, p0, nxv8s8, 8},
313 {nxv16s8, p0, nxv16s8, 8},
314 {nxv32s8, p0, nxv32s8, 8},
315 {nxv64s8, p0, nxv64s8, 8},
316 {nxv2s16, p0, nxv2s16, 16},
317 {nxv4s16, p0, nxv4s16, 16},
318 {nxv8s16, p0, nxv8s16, 16},
319 {nxv16s16, p0, nxv16s16, 16},
320 {nxv32s16, p0, nxv32s16, 16},
321 {nxv2s32, p0, nxv2s32, 32},
322 {nxv4s32, p0, nxv4s32, 32},
323 {nxv8s32, p0, nxv8s32, 32},
324 {nxv16s32, p0, nxv16s32, 32}});
326 if (ST.getELen() == 64)
328 {nxv1s16, p0, nxv1s16, 16},
329 {nxv1s32, p0, nxv1s32, 32}});
331 if (ST.hasVInstructionsI64())
333 {nxv2s64, p0, nxv2s64, 64},
334 {nxv4s64, p0, nxv4s64, 64},
335 {nxv8s64, p0, nxv8s64, 64}});
342 if (XLen <= ST.getELen())
357 .clampScalar(0, sXLen, sXLen);
361 .clampScalar(1, sXLen, sXLen);
371 .widenScalarToNextPow2(0)
377 if (ST.hasStdExtZmmul()) {
380 .widenScalarToNextPow2(0)
393 .widenScalarToNextPow2(0)
408 if (ST.hasStdExtM()) {
410 .legalFor({s32, sXLen})
411 .libcallFor({sDoubleXLen})
412 .clampScalar(0, s32, sDoubleXLen)
416 .libcallFor({sXLen, sDoubleXLen})
417 .clampScalar(0, sXLen, sDoubleXLen)
425 if (ST.hasStdExtZbb())
426 AbsActions.customFor({s32, sXLen}).minScalar(0, sXLen);
429 auto &MinMaxActions =
431 if (ST.hasStdExtZbb())
432 MinMaxActions.
legalFor({sXLen}).minScalar(0, sXLen);
433 MinMaxActions.
lower();
444 G_FABS, G_FSQRT, G_FMAXNUM, G_FMINNUM})
458 return (ST.hasStdExtD() &&
typeIs(0, s32)(Query) &&
460 (ST.hasStdExtZfh() &&
typeIs(0, s16)(Query) &&
462 (ST.hasStdExtZfh() && ST.hasStdExtD() &&
typeIs(0, s16)(Query) &&
467 return (ST.hasStdExtD() &&
typeIs(0, s64)(Query) &&
469 (ST.hasStdExtZfh() &&
typeIs(0, s32)(Query) &&
471 (ST.hasStdExtZfh() && ST.hasStdExtD() &&
typeIs(0, s64)(Query) &&
501 .libcallFor({s32, s64});
530 if (ST.hasVInstructionsF64() && ST.hasStdExtD())
533 else if (ST.hasVInstructionsI64())
546 switch (IntrinsicID) {
549 case Intrinsic::vacopy: {
560 LLT PtrTy =
MRI.getType(DstLst);
566 auto Tmp = MIRBuilder.
buildLoad(PtrTy,
MI.getOperand(2), *LoadMMO);
571 MIRBuilder.
buildStore(Tmp, DstLst, *StoreMMO);
573 MI.eraseFromParent();
579bool RISCVLegalizerInfo::legalizeShlAshrLshr(
582 assert(
MI.getOpcode() == TargetOpcode::G_ASHR ||
583 MI.getOpcode() == TargetOpcode::G_LSHR ||
584 MI.getOpcode() == TargetOpcode::G_SHL);
593 uint64_t Amount = VRegAndVal->Value.getZExtValue();
598 MI.getOperand(2).setReg(ExtCst.getReg(0));
606 assert(
MI.getOpcode() == TargetOpcode::G_VASTART);
613 MIRBuilder.
buildStore(FINAddr,
MI.getOperand(0).getReg(),
614 *
MI.memoperands()[0]);
615 MI.eraseFromParent();
619bool RISCVLegalizerInfo::shouldBeInConstantPool(
APInt APImm,
620 bool ShouldOptForSize)
const {
640 if (ShouldOptForSize)
648 unsigned ShiftAmt, AddOpc;
669 uint64_t Val =
MI.getOperand(1).getCImm()->getZExtValue();
673 auto VLENB = MIB.
buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
675 }
else if (
Log2 > 3) {
676 auto VLENB = MIB.
buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
679 MIB.
buildInstr(RISCV::G_READ_VLENB, {Dst}, {});
681 }
else if ((Val % 8) == 0) {
684 auto VLENB = MIB.
buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
687 auto VLENB = MIB.
buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
691 MI.eraseFromParent();
702 unsigned Opc =
MI.getOpcode();
703 assert(Opc == TargetOpcode::G_ZEXT || Opc == TargetOpcode::G_SEXT ||
704 Opc == TargetOpcode::G_ANYEXT);
710 LLT DstTy =
MRI.getType(Dst);
711 int64_t ExtTrueVal = Opc == TargetOpcode::G_SEXT ? -1 : 1;
718 MI.eraseFromParent();
726 "Machine instructions must be Load/Store.");
733 LLT DataTy =
MRI.getType(DstReg);
737 if (!
MI.hasOneMemOperand())
745 if (TLI->allowsMemoryAccessForAlignment(Ctx,
DL, VT, *MMO))
749 assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
750 "Unexpected unaligned RVV load type");
753 unsigned NumElements =
777 return MIB.
buildInstr(RISCV::G_VMSET_VL, {MaskTy}, {VL});
782static std::pair<MachineInstrBuilder, Register>
785 LLT VecTy = Dst.getLLTTy(
MRI);
804 return MIB.
buildInstr(RISCV::G_SPLAT_VECTOR_SPLIT_I64_VL, {Dst},
805 {Passthru,
Lo,
Hi, VL});
815 Unmerge.getReg(1), VL, MIB,
MRI);
824 assert(
MI.getOpcode() == TargetOpcode::G_SPLAT_VECTOR);
829 Register SplatVal =
MI.getOperand(1).getReg();
831 LLT VecTy =
MRI.getType(Dst);
835 if (XLenTy.getSizeInBits() == 32 &&
840 MI.eraseFromParent();
848 MIB.
buildInstr(RISCV::G_VMSET_VL, {Dst}, {VL});
849 MI.eraseFromParent();
854 MIB.
buildInstr(RISCV::G_VMCLR_VL, {Dst}, {VL});
855 MI.eraseFromParent();
863 auto ZExtSplatVal = MIB.
buildZExt(InterEltTy, SplatVal);
870 MI.eraseFromParent();
881 switch (
MI.getOpcode()) {
885 case TargetOpcode::G_ABS:
888 case TargetOpcode::G_CONSTANT: {
892 bool ShouldOptForSize =
F.hasOptSize() ||
F.hasMinSize();
894 if (!shouldBeInConstantPool(ConstVal->
getValue(), ShouldOptForSize))
898 case TargetOpcode::G_SHL:
899 case TargetOpcode::G_ASHR:
900 case TargetOpcode::G_LSHR:
901 return legalizeShlAshrLshr(
MI, MIRBuilder, Observer);
902 case TargetOpcode::G_SEXT_INREG: {
903 LLT DstTy =
MRI.getType(
MI.getOperand(0).getReg());
904 int64_t SizeInBits =
MI.getOperand(2).getImm();
909 if (STI.hasStdExtZbb() && (SizeInBits == 8 || SizeInBits == 16))
915 case TargetOpcode::G_IS_FPCLASS: {
916 Register GISFPCLASS =
MI.getOperand(0).getReg();
927 auto GFClass = MIB.
buildInstr(RISCV::G_FCLASS, {sXLen}, {Src});
928 auto And = MIB.
buildAnd(sXLen, GFClass, FClassMask);
931 MI.eraseFromParent();
934 case TargetOpcode::G_VASTART:
935 return legalizeVAStart(
MI, MIRBuilder);
936 case TargetOpcode::G_VSCALE:
937 return legalizeVScale(
MI, MIRBuilder);
938 case TargetOpcode::G_ZEXT:
939 case TargetOpcode::G_SEXT:
940 case TargetOpcode::G_ANYEXT:
941 return legalizeExt(
MI, MIRBuilder);
942 case TargetOpcode::G_SPLAT_VECTOR:
943 return legalizeSplatVector(
MI, MIRBuilder);
944 case TargetOpcode::G_LOAD:
945 case TargetOpcode::G_STORE:
946 return legalizeLoadStore(
MI, Helper, MIRBuilder);
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
This file declares the MachineIRBuilder class.
static MachineInstrBuilder buildAllOnesMask(LLT VecTy, const SrcOp &VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Creates an all ones mask suitable for masking a vector of type VecTy with vector length VL.
static LegalityPredicate typeIsLegalBoolVec(unsigned TypeIdx, std::initializer_list< LLT > BoolVecTys, const RISCVSubtarget &ST)
static LegalityPredicate typeIsScalarFPArith(unsigned TypeIdx, const RISCVSubtarget &ST)
static LegalityPredicate typeIsLegalIntOrFPVec(unsigned TypeIdx, std::initializer_list< LLT > IntOrFPVecTys, const RISCVSubtarget &ST)
static MachineInstrBuilder buildSplatPartsS64WithVL(const DstOp &Dst, const SrcOp &Passthru, Register Lo, Register Hi, Register VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
static MachineInstrBuilder buildSplatSplitS64WithVL(const DstOp &Dst, const SrcOp &Passthru, const SrcOp &Scalar, Register VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
static std::pair< MachineInstrBuilder, Register > buildDefaultVLOps(const DstOp &Dst, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Gets the two common "VL" operands: an all-ones mask and the vector length.
static LLT getMaskTypeFor(LLT VecTy)
Return the type of the mask type suitable for masking the provided vector type.
static LegalityPredicate typeIsLegalPtrVec(unsigned TypeIdx, std::initializer_list< LLT > PtrVecTys, const RISCVSubtarget &ST)
This file declares the targeting of the Machinelegalizer class for RISC-V.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Class for arbitrary precision integers.
APInt zext(unsigned width) const
Zero extend to a new width.
unsigned getBitWidth() const
Return the number of bits in the APInt.
APInt rotr(unsigned rotateAmt) const
Rotate right by rotateAmt.
int64_t getSExtValue() const
Get sign extended value.
This is the shared class of boolean and integer constants.
const APInt & getValue() const
Return the constant as an APInt value reference.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Abstract class that contains various methods for clients to notify about changes.
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT scalable_vector(unsigned MinNumElements, unsigned ScalarSizeInBits)
Get a low-level scalable vector of some number of elements and element width.
constexpr LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr ElementCount getElementCount() const
This is an important class for using LLVM in a threaded context.
void computeTables()
Compute any ancillary tables needed to quickly decide how an operation should be handled.
LegalizeRuleSet & widenScalarOrEltToNextPow2OrMinSize(unsigned TypeIdx, unsigned MinSize=0)
Widen the scalar or vector element type to the next power of two that is at least MinSize.
LegalizeRuleSet & legalFor(std::initializer_list< LLT > Types)
The instruction is legal when type index 0 is any type in the given list.
LegalizeRuleSet & scalarSameSizeAs(unsigned TypeIdx, unsigned SameSizeIdx)
Change the type TypeIdx to have the same scalar size as type SameSizeIdx.
LegalizeRuleSet & libcall()
The instruction is emitted as a library call.
LegalizeRuleSet & libcallFor(std::initializer_list< LLT > Types)
LegalizeRuleSet & maxScalar(unsigned TypeIdx, const LLT Ty)
Ensure the scalar is at most as wide as Ty.
LegalizeRuleSet & lowerIfMemSizeNotByteSizePow2()
Lower a memory operation if the memory access size is not a round power of 2 byte size.
LegalizeRuleSet & lower()
The instruction is lowered.
LegalizeRuleSet & lowerFor(std::initializer_list< LLT > Types)
The instruction is lowered when type index 0 is any type in the given list.
LegalizeRuleSet & clampScalar(unsigned TypeIdx, const LLT MinTy, const LLT MaxTy)
Limit the range of scalar sizes to MinTy and MaxTy.
LegalizeRuleSet & minScalarSameAs(unsigned TypeIdx, unsigned LargeTypeIdx)
Widen the scalar to match the size of another.
LegalizeRuleSet & widenScalarIf(LegalityPredicate Predicate, LegalizeMutation Mutation)
Widen the scalar to the one selected by the mutation if the predicate is true.
LegalizeRuleSet & customIf(LegalityPredicate Predicate)
LegalizeRuleSet & widenScalarToNextPow2(unsigned TypeIdx, unsigned MinSize=0)
Widen the scalar to the next power of two that is at least MinSize.
LegalizeRuleSet & scalarize(unsigned TypeIdx)
LegalizeRuleSet & lowerForCartesianProduct(std::initializer_list< LLT > Types0, std::initializer_list< LLT > Types1)
The instruction is lowered when type indexes 0 and 1 are both in their respective lists.
LegalizeRuleSet & legalForTypesWithMemDesc(std::initializer_list< LegalityPredicates::TypePairAndMemDesc > TypesAndMemDesc)
The instruction is legal when type indexes 0 and 1 along with the memory size and minimum alignment i...
LegalizeRuleSet & legalIf(LegalityPredicate Predicate)
The instruction is legal if predicate is true.
LegalizeRuleSet & customFor(std::initializer_list< LLT > Types)
LegalizeResult lowerAbsToMaxNeg(MachineInstr &MI)
LegalizeResult bitcast(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
Legalize an instruction by replacing the value type.
@ Legalized
Instruction has been legalized and the MachineFunction changed.
LegalizeResult lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
Legalize an instruction by splitting it into simpler parts, hopefully understood by the target.
GISelChangeObserver & Observer
To keep track of changes made by the LegalizerHelper.
MachineIRBuilder & MIRBuilder
Expose MIRBuilder so clients can set their own RecordInsertInstruction functions.
LegalizeResult lowerConstant(MachineInstr &MI)
LegalizeRuleSet & getActionDefinitionsBuilder(unsigned Opcode)
Get the action definition builder for the given opcode.
const LegacyLegalizerInfo & getLegacyLegalizerInfo() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Helper class to build MachineInstr.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_MUL Op0, Op1.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildLShr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ZEXT Op.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineRegisterInfo * getMRI()
Getter for MRI.
const DataLayout & getDataLayout() const
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Representation of each machine instruction.
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
bool legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI, LostDebugLocObserver &LocObserver) const override
Called for instructions with the Custom LegalizationAction.
bool legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const override
RISCVLegalizerInfo(const RISCVSubtarget &ST)
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getVarArgsFrameIndex() const
unsigned getRealMinVLen() const
unsigned getMaxBuildIntsCost() const
bool useConstantPoolForLargeInts() const
const RISCVTargetLowering * getTargetLowering() const override
Wrapper class representing virtual and physical registers.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LegalityPredicate typeInSet(unsigned TypeIdx, std::initializer_list< LLT > TypesInit)
True iff the given type index is one of the specified types.
Predicate any(Predicate P0, Predicate P1)
True iff P0 or P1 are true.
Predicate all(Predicate P0, Predicate P1)
True iff P0 and P1 are true.
LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit)
True iff the given type index is the specified type.
LegalizeMutation changeTo(unsigned TypeIdx, LLT Ty)
Select this specific type for the given type index.
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, unsigned &ShiftAmt, unsigned &AddOpc)
static constexpr unsigned RVVBitsPerBlock
This is an optimization pass for GlobalISel generic memory operations.
Type * getTypeForLLT(LLT Ty, LLVMContext &C)
Get the type back from LLT.
bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
@ And
Bitwise or logical AND of integers.
std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
unsigned Log2(Align A)
Returns the log2 of the alignment.
std::function< bool(const LegalityQuery &)> LegalityPredicate
This struct is a compact representation of a valid (non-zero power of two) alignment.
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
This class contains a discriminated union of information about pointers in memory operands,...