51#define GET_GICOMBINER_DEPS
52#include "AArch64GenPostLegalizeGILowering.inc"
53#undef GET_GICOMBINER_DEPS
55#define DEBUG_TYPE "aarch64-postlegalizer-lowering"
61#define GET_GICOMBINER_TYPES
62#include "AArch64GenPostLegalizeGILowering.inc"
63#undef GET_GICOMBINER_TYPES
70struct ShuffleVectorPseudo {
75 std::initializer_list<SrcOp> SrcOps)
76 :
Opc(
Opc), Dst(Dst), SrcOps(SrcOps){};
77 ShuffleVectorPseudo() =
default;
83 assert(
MI.getOpcode() == TargetOpcode::G_FCONSTANT);
86 if (DstSize != 16 && DstSize != 32 && DstSize != 64)
98 assert(
MI.getOpcode() == TargetOpcode::G_FCONSTANT);
100 const APFloat &ImmValAPF =
MI.getOperand(1).getFPImm()->getValueAPF();
102 MI.eraseFromParent();
107std::optional<std::pair<bool, uint64_t>> getExtMask(
ArrayRef<int> M,
110 auto FirstRealElt =
find_if(M, [](
int Elt) {
return Elt >= 0; });
111 if (FirstRealElt == M.end())
116 APInt ExpectedElt =
APInt(MaskBits, *FirstRealElt + 1,
false,
true);
122 [&ExpectedElt](
int Elt) { return Elt != ExpectedElt++ && Elt >= 0; }))
132 bool ReverseExt =
false;
144 return std::make_pair(ReverseExt, Imm);
156 int NumInputElements) {
157 if (M.size() !=
static_cast<size_t>(NumInputElements))
159 int NumLHSMatch = 0, NumRHSMatch = 0;
160 int LastLHSMismatch = -1, LastRHSMismatch = -1;
161 for (
int Idx = 0; Idx < NumInputElements; ++Idx) {
167 M[Idx] == Idx ? ++NumLHSMatch : LastLHSMismatch = Idx;
168 M[Idx] == Idx + NumInputElements ? ++NumRHSMatch : LastRHSMismatch = Idx;
170 const int NumNeededToMatch = NumInputElements - 1;
171 if (NumLHSMatch == NumNeededToMatch)
172 return std::make_pair(
true, LastLHSMismatch);
173 if (NumRHSMatch == NumNeededToMatch)
174 return std::make_pair(
false, LastRHSMismatch);
181 ShuffleVectorPseudo &MatchInfo) {
182 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
193 unsigned NumElts = Ty.getNumElements();
196 for (
unsigned LaneSize : {64U, 32U, 16U}) {
197 if (
isREVMask(ShuffleMask, EltSize, NumElts, LaneSize)) {
200 Opcode = AArch64::G_REV64;
201 else if (LaneSize == 32U)
202 Opcode = AArch64::G_REV32;
204 Opcode = AArch64::G_BSWAP;
206 MatchInfo = ShuffleVectorPseudo(Opcode, Dst, {Src});
217 ShuffleVectorPseudo &MatchInfo) {
218 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
219 unsigned WhichResult;
220 unsigned OperandOrder;
224 if (!
isTRNMask(ShuffleMask, NumElts, WhichResult, OperandOrder))
226 unsigned Opc = (WhichResult == 0) ? AArch64::G_TRN1 : AArch64::G_TRN2;
227 Register V1 =
MI.getOperand(OperandOrder == 0 ? 1 : 2).getReg();
228 Register V2 =
MI.getOperand(OperandOrder == 0 ? 2 : 1).getReg();
229 MatchInfo = ShuffleVectorPseudo(
Opc, Dst, {V1, V2});
239 ShuffleVectorPseudo &MatchInfo) {
240 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
241 unsigned WhichResult;
245 if (!
isUZPMask(ShuffleMask, NumElts, WhichResult))
247 unsigned Opc = (WhichResult == 0) ? AArch64::G_UZP1 : AArch64::G_UZP2;
250 MatchInfo = ShuffleVectorPseudo(
Opc, Dst, {V1, V2});
255 ShuffleVectorPseudo &MatchInfo) {
256 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
257 unsigned WhichResult;
258 unsigned OperandOrder;
262 if (!
isZIPMask(ShuffleMask, NumElts, WhichResult, OperandOrder))
264 unsigned Opc = (WhichResult == 0) ? AArch64::G_ZIP1 : AArch64::G_ZIP2;
265 Register V1 =
MI.getOperand(OperandOrder == 0 ? 1 : 2).getReg();
266 Register V2 =
MI.getOperand(OperandOrder == 0 ? 2 : 1).getReg();
267 MatchInfo = ShuffleVectorPseudo(
Opc, Dst, {V1, V2});
274 ShuffleVectorPseudo &MatchInfo) {
293 auto *InsMI =
getOpcodeDef(TargetOpcode::G_INSERT_VECTOR_ELT,
294 MI.getOperand(1).getReg(), MRI);
298 if (!
getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, InsMI->getOperand(1).getReg(),
306 MatchInfo = ShuffleVectorPseudo(AArch64::G_DUP,
MI.getOperand(0).getReg(),
307 {InsMI->getOperand(2).getReg()});
314 ShuffleVectorPseudo &MatchInfo) {
315 assert(Lane >= 0 &&
"Expected positive lane?");
321 MI.getOperand(Lane < NumElements ? 1 : 2).getReg(), MRI);
323 if (NumElements <= Lane)
328 Register Reg = BuildVecMI->getOperand(Lane + 1).getReg();
330 ShuffleVectorPseudo(AArch64::G_DUP,
MI.getOperand(0).getReg(), {Reg});
335 ShuffleVectorPseudo &MatchInfo) {
336 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
340 int Lane = *MaybeLane;
344 if (matchDupFromInsertVectorElt(Lane,
MI, MRI, MatchInfo))
346 if (matchDupFromBuildVector(Lane,
MI, MRI, MatchInfo))
354 unsigned NumElts = Ty.getNumElements();
363 unsigned ExpectedElt = M[0];
364 for (
unsigned I = 1;
I < NumElts; ++
I) {
368 if (ExpectedElt == NumElts)
373 if (ExpectedElt !=
static_cast<unsigned>(M[
I]))
381 ShuffleVectorPseudo &MatchInfo) {
382 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
387 auto Mask =
MI.getOperand(3).getShuffleMask();
394 !isSingletonExtMask(Mask, DstTy))
397 Imm = Mask[0] * ExtFactor;
398 MatchInfo = ShuffleVectorPseudo(AArch64::G_EXT, Dst, {V1, V1, Imm});
402 std::tie(ReverseExt, Imm) = *ExtInfo;
406 MatchInfo = ShuffleVectorPseudo(AArch64::G_EXT, Dst, {V1, V2, Imm});
413 ShuffleVectorPseudo &MatchInfo) {
415 if (MatchInfo.Opc == TargetOpcode::G_BSWAP) {
416 assert(MatchInfo.SrcOps.size() == 1);
423 auto BS1 = MIRBuilder.
buildInstr(TargetOpcode::G_BITCAST, {BSTy},
424 MatchInfo.SrcOps[0]);
425 auto BS2 = MIRBuilder.
buildInstr(MatchInfo.Opc, {BSTy}, {BS1});
426 MIRBuilder.
buildInstr(TargetOpcode::G_BITCAST, {MatchInfo.Dst}, {BS2});
428 MIRBuilder.
buildInstr(MatchInfo.Opc, {MatchInfo.Dst}, MatchInfo.SrcOps);
429 MI.eraseFromParent();
437 if (MatchInfo.SrcOps[2].getImm() == 0)
438 MIRBuilder.
buildCopy(MatchInfo.Dst, MatchInfo.SrcOps[0]);
443 MIRBuilder.
buildInstr(MatchInfo.Opc, {MatchInfo.Dst},
444 {MatchInfo.SrcOps[0], MatchInfo.SrcOps[1], Cst});
446 MI.eraseFromParent();
454 "Expected 128bit vector in applyFullRev");
457 auto Rev = MIRBuilder.
buildInstr(AArch64::G_REV64, {DstTy}, {Src});
458 MIRBuilder.
buildInstr(AArch64::G_EXT, {Dst}, {Rev, Rev, Cst});
459 MI.eraseFromParent();
463 assert(
MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT);
473 Builder.setInstrAndDebugLoc(Insert);
477 LLT EltTy = MRI.
getType(Insert.getElementReg());
478 LLT IdxTy = MRI.
getType(Insert.getIndexReg());
491 auto StackTemp = Builder.buildFrameIndex(FramePtrTy, FrameIdx);
493 Builder.buildStore(Insert.getOperand(1), StackTemp, PtrInfo,
Align(8));
498 "Expected a power-2 vector size");
499 auto Mask = Builder.buildConstant(IdxTy, VecTy.
getNumElements() - 1);
501 auto EltSize = Builder.buildConstant(IdxTy, EltTy.
getSizeInBytes());
504 Builder.buildPtrAdd(MRI.
getType(StackTemp.getReg(0)), StackTemp,
Mul)
508 Builder.buildStore(Insert.getElementReg(), EltPtr, PtrInfo,
Align(1));
510 Builder.buildLoad(Insert.getReg(0), StackTemp, PtrInfo,
Align(8));
511 Insert.eraseFromParent();
526 std::tuple<Register, int, Register, int> &MatchInfo) {
527 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
531 auto DstIsLeftAndDstLane =
isINSMask(ShuffleMask, NumElts);
532 if (!DstIsLeftAndDstLane)
536 std::tie(DstIsLeft, DstLane) = *DstIsLeftAndDstLane;
542 int SrcLane = ShuffleMask[DstLane];
543 if (SrcLane >= NumElts) {
548 MatchInfo = std::make_tuple(DstVec, DstLane, SrcVec, SrcLane);
554 std::tuple<Register, int, Register, int> &MatchInfo) {
555 Builder.setInstrAndDebugLoc(
MI);
559 int DstLane, SrcLane;
560 std::tie(DstVec, DstLane, SrcVec, SrcLane) = MatchInfo;
561 auto SrcCst = Builder.buildConstant(
LLT::scalar(64), SrcLane);
562 auto Extract = Builder.buildExtractVectorElement(ScalarTy, SrcVec, SrcCst);
563 auto DstCst = Builder.buildConstant(
LLT::scalar(64), DstLane);
564 Builder.buildInsertVectorElement(Dst, DstVec, Extract, DstCst);
565 MI.eraseFromParent();
573 assert(Ty.isVector() &&
"vector shift count is not a vector type");
579 int64_t ElementBits = Ty.getScalarSizeInBits();
580 return Cnt >= 1 && Cnt <= ElementBits;
586 assert(
MI.getOpcode() == TargetOpcode::G_ASHR ||
587 MI.getOpcode() == TargetOpcode::G_LSHR);
596 unsigned Opc =
MI.getOpcode();
597 assert(
Opc == TargetOpcode::G_ASHR ||
Opc == TargetOpcode::G_LSHR);
599 Opc == TargetOpcode::G_ASHR ? AArch64::G_VASHR : AArch64::G_VLSHR;
601 MIB.
buildInstr(NewOpc, {
MI.getOperand(0)}, {
MI.getOperand(1)}).addImm(Imm);
602 MI.eraseFromParent();
612std::optional<std::pair<uint64_t, CmpInst::Predicate>>
618 unsigned Size = Ty.getSizeInBits();
619 assert((
Size == 32 ||
Size == 64) &&
"Expected 32 or 64 bit compare only?");
626 uint64_t OriginalC = ValAndVReg->Value.getZExtValue();
645 (
Size == 32 &&
static_cast<int32_t
>(
C) == INT32_MIN))
658 assert(
C != 0 &&
"C should not be zero here!");
670 if ((
Size == 32 &&
static_cast<int32_t
>(
C) == INT32_MAX) ||
684 if ((
Size == 32 &&
static_cast<uint32_t>(
C) == UINT32_MAX) ||
699 auto NumberOfInstrToLoadImm = [=](
uint64_t Imm) {
705 if (NumberOfInstrToLoadImm(OriginalC) > NumberOfInstrToLoadImm(
C))
719bool matchAdjustICmpImmAndPred(
721 std::pair<uint64_t, CmpInst::Predicate> &MatchInfo) {
722 assert(
MI.getOpcode() == TargetOpcode::G_ICMP);
725 if (
auto MaybeNewImmAndPred = tryAdjustICmpImmAndPred(
RHS, Pred, MRI)) {
726 MatchInfo = *MaybeNewImmAndPred;
732void applyAdjustICmpImmAndPred(
733 MachineInstr &
MI, std::pair<uint64_t, CmpInst::Predicate> &MatchInfo,
741 RHS.setReg(Cst->getOperand(0).getReg());
742 MI.getOperand(1).setPredicate(MatchInfo.second);
747 std::pair<unsigned, int> &MatchInfo) {
748 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
751 const LLT DstTy = MRI.
getType(
MI.getOperand(0).getReg());
758 if (*LaneIdx >= SrcTy.getNumElements())
768 switch (SrcTy.getNumElements()) {
770 if (ScalarSize == 64)
771 Opc = AArch64::G_DUPLANE64;
772 else if (ScalarSize == 32)
773 Opc = AArch64::G_DUPLANE32;
776 if (ScalarSize == 32)
777 Opc = AArch64::G_DUPLANE32;
778 else if (ScalarSize == 16)
779 Opc = AArch64::G_DUPLANE16;
783 Opc = AArch64::G_DUPLANE8;
784 else if (ScalarSize == 16)
785 Opc = AArch64::G_DUPLANE16;
789 Opc = AArch64::G_DUPLANE8;
797 MatchInfo.first =
Opc;
798 MatchInfo.second = *LaneIdx;
804 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
808 B.setInstrAndDebugLoc(
MI);
809 auto Lane =
B.buildConstant(
LLT::scalar(64), MatchInfo.second);
814 if (SrcTy.getSizeInBits() == 64) {
815 auto Undef =
B.buildUndef(SrcTy);
816 DupSrc =
B.buildConcatVectors(SrcTy.multiplyElements(2),
817 {Src1Reg, Undef.getReg(0)})
820 B.buildInstr(MatchInfo.first, {MI.getOperand(0).getReg()}, {DupSrc, Lane});
821 MI.eraseFromParent();
826 Register Src1Reg = Unmerge.getReg(Unmerge.getNumOperands() - 1);
828 if (SrcTy.getSizeInBits() != 128 && SrcTy.getSizeInBits() != 64)
830 return SrcTy.
isVector() && !SrcTy.isScalable() &&
831 Unmerge.getNumOperands() == (
unsigned)SrcTy.getNumElements() + 1;
837 Register Src1Reg = Unmerge.getReg(Unmerge.getNumOperands() - 1);
839 assert((SrcTy.isVector() && !SrcTy.isScalable()) &&
840 "Expected a fixed length vector");
842 for (
int I = 0;
I < SrcTy.getNumElements(); ++
I)
843 B.buildExtractVectorElementConstant(Unmerge.getReg(
I), Src1Reg,
I);
844 MI.eraseFromParent();
848 assert(
MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
861 B.setInstrAndDebugLoc(
MI);
862 B.buildInstr(AArch64::G_DUP, {
MI.getOperand(0).getReg()},
863 {
MI.getOperand(1).getReg()});
864 MI.eraseFromParent();
876 if (
MI.getOpcode() == TargetOpcode::G_SEXT_INREG)
878 if (
MI.getOpcode() != TargetOpcode::G_AND)
884 uint64_t Mask = ValAndVReg->Value.getZExtValue();
885 return (Mask == 0xFF || Mask == 0xFFFF || Mask == 0xFFFFFFFF);
889 if (IsSupportedExtend(*Def))
892 unsigned Opc = Def->getOpcode();
893 if (
Opc != TargetOpcode::G_SHL &&
Opc != TargetOpcode::G_ASHR &&
894 Opc != TargetOpcode::G_LSHR)
901 uint64_t ShiftAmt = MaybeShiftAmt->Value.getZExtValue();
908 if (IsSupportedExtend(*ShiftLHS))
909 return (ShiftAmt <= 4) ? 2 : 1;
911 LLT Ty = MRI.
getType(Def->getOperand(0).getReg());
915 if ((ShiftSize == 32 && ShiftAmt <= 31) ||
916 (ShiftSize == 64 && ShiftAmt <= 63))
924 assert(
MI.getOpcode() == TargetOpcode::G_ICMP);
945 return isCMN(Def, Pred, MRI) ? Def->getOperand(2).getReg() :
Reg;
965 MI.getOperand(2).setReg(
RHS);
966 MI.getOperand(3).setReg(
LHS);
1013 assert(
MI.getOpcode() == TargetOpcode::G_FCMP);
1018 if (!DstTy.
isVector() || !ST.hasNEON())
1022 if (EltSize == 16 && !ST.hasFullFP16())
1024 if (EltSize != 16 && EltSize != 32 && EltSize != 64)
1033 assert(
MI.getOpcode() == TargetOpcode::G_FCMP);
1044 bool Invert =
false;
1065 auto Cmp = getVectorFCMP(CC,
LHS,
RHS, NoNans, MRI);
1070 auto Cmp2 = getVectorFCMP(CC2,
LHS,
RHS, NoNans, MRI);
1071 auto Cmp2Dst = Cmp2(MIB);
1072 auto Cmp1Dst = Cmp(MIB);
1078 MI.eraseFromParent();
1086 for (
unsigned I = 0;
I < GBuildVec->getNumSources(); ++
I) {
1090 if (!ConstVal.has_value())
1100 LLT DstTy = MRI.
getType(GBuildVec->getReg(0));
1101 Register DstReg =
B.buildUndef(DstTy).getReg(0);
1103 for (
unsigned I = 0;
I < GBuildVec->getNumSources(); ++
I) {
1104 Register SrcReg = GBuildVec->getSourceReg(
I);
1109 B.buildInsertVectorElement(DstTy, DstReg, SrcReg, IdxReg).getReg(0);
1111 B.buildCopy(GBuildVec->getReg(0), DstReg);
1112 GBuildVec->eraseFromParent();
1117 assert(
MI.getOpcode() == TargetOpcode::G_STORE);
1131 assert(
MI.getOpcode() == TargetOpcode::G_STORE);
1133 MI.getOperand(0).setReg(SrcReg);
1141 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
1149 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
1150 B.setInstrAndDebugLoc(
MI);
1160 if (Unmerge.getNumDefs() != 2)
1177 if (!LowestVal || LowestVal->Value.getZExtValue() != DstTy.
getSizeInBytes())
1183 MatchInfo = ExtSrc1;
1193 MI.getOperand(0).setReg(
MI.getOperand(1).getReg());
1194 MI.getOperand(1).setReg(Dst1);
1195 MI.getOperand(2).setReg(SrcReg);
1212 assert(
MI.getOpcode() == TargetOpcode::G_MUL &&
1213 "Expected a G_MUL instruction");
1224class AArch64PostLegalizerLoweringImpl :
public Combiner {
1227 const AArch64PostLegalizerLoweringImplRuleConfig &RuleConfig;
1231 AArch64PostLegalizerLoweringImpl(
1233 const AArch64PostLegalizerLoweringImplRuleConfig &RuleConfig,
1236 static const char *
getName() {
return "AArch6400PreLegalizerCombiner"; }
1241#define GET_GICOMBINER_CLASS_MEMBERS
1242#include "AArch64GenPostLegalizeGILowering.inc"
1243#undef GET_GICOMBINER_CLASS_MEMBERS
1246#define GET_GICOMBINER_IMPL
1247#include "AArch64GenPostLegalizeGILowering.inc"
1248#undef GET_GICOMBINER_IMPL
1250AArch64PostLegalizerLoweringImpl::AArch64PostLegalizerLoweringImpl(
1252 const AArch64PostLegalizerLoweringImplRuleConfig &RuleConfig,
1254 :
Combiner(MF, CInfo, nullptr, CSEInfo),
1255 Helper(Observer,
B,
true), RuleConfig(RuleConfig),
1258#include
"AArch64GenPostLegalizeGILowering.inc"
1263bool runPostLegalizerLowering(
1265 const AArch64PostLegalizerLoweringImplRuleConfig &RuleConfig) {
1273 F.hasOptSize(),
F.hasMinSize());
1275 CInfo.MaxIterations = 1;
1278 CInfo.EnableFullDCE =
false;
1279 AArch64PostLegalizerLoweringImpl Impl(MF, CInfo,
nullptr,
1281 return Impl.combineMachineInstrs();
1288 AArch64PostLegalizerLoweringLegacy();
1290 StringRef getPassName()
const override {
1291 return "AArch64PostLegalizerLowering";
1294 bool runOnMachineFunction(MachineFunction &MF)
override;
1295 void getAnalysisUsage(AnalysisUsage &AU)
const override;
1298 AArch64PostLegalizerLoweringImplRuleConfig RuleConfig;
1302void AArch64PostLegalizerLoweringLegacy::getAnalysisUsage(
1309AArch64PostLegalizerLoweringLegacy::AArch64PostLegalizerLoweringLegacy()
1310 : MachineFunctionPass(
ID) {
1311 if (!RuleConfig.parseCommandLineOption())
1315bool AArch64PostLegalizerLoweringLegacy::runOnMachineFunction(
1318 return runPostLegalizerLowering(MF, RuleConfig);
1321char AArch64PostLegalizerLoweringLegacy::ID = 0;
1323 "Lower AArch64 MachineInstrs after legalization",
false,
1326 "Lower AArch64 MachineInstrs after legalization",
false,
1331 std::make_unique<AArch64PostLegalizerLoweringImplRuleConfig>()) {
1332 if (!RuleConfig->parseCommandLineOption())
1345 const bool Changed = runPostLegalizerLowering(MF, *RuleConfig);
1357 return new AArch64PostLegalizerLoweringLegacy();
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, int64_t &Cnt)
isVShiftRImm - Check if this is a valid build_vector for the immediate operand of a vector shift righ...
static bool isINSMask(ArrayRef< int > M, int NumInputElements, bool &DstIsLeft, int &Anomaly)
static unsigned getCmpOperandFoldingProfit(SDValue Op)
Returns how profitable it is to fold a comparison's operand's shift and/or extension operations.
This file declares the targeting of the Machinelegalizer class for AArch64.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
#define GET_GICOMBINER_CONSTRUCTOR_INITS
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This contains common combine transformations that may be used in a combine pass,or by the target else...
Option class for Targets to specify which operations are combined how and when.
This contains the base class for all Combiners generated by TableGen.
This contains common code to allow clients to notify changes to machine instr.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
Promote Memory to Register
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
static StringRef getName(Value *V)
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
~AArch64PostLegalizerLoweringPass()
AArch64PostLegalizerLoweringPass()
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
unsigned logBase2() const
Represent the analysis usage information of a pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Represents analyses that only rely on functions' control flow.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
FunctionPass class - This class is used to implement most global optimizations.
Abstract class that contains various methods for clients to notify about changes.
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.
LLT changeElementCount(ElementCount EC) const
Return a vector or scalar with the same element type and the new element count.
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr ElementCount getElementCount() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
LLVM_ABI LegalizeResult lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
Legalize an instruction by splitting it into simpler parts, hopefully understood by the target.
LLVM_ABI LegalizeResult fewerElementsVector(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy)
Legalize a vector instruction by splitting into multiple components, each acting on the same scalar t...
An RAII based helper class to modify MachineFunctionProperties when running pass.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
Helper class to build MachineInstr.
MachineInstrBuilder buildNot(const DstOp &Dst, const SrcOp &Src0)
Build and insert a bitwise not, NegOne = G_CONSTANT -1 Res = G_OR Op0, NegOne.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
void setInstrAndDebugLoc(MachineInstr &MI)
Set the insertion point to before MI, and set the debug loc to MI's loc.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildOr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_OR Op0, Op1.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI bool hasOneNonDBGUse(Register RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug use of the specified register.
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
bool use_nodbg_empty(Register RegNo) const
use_nodbg_empty - Return true if there are no non-Debug instructions using the specified register.
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
iterator_range< use_instr_nodbg_iterator > use_nodbg_instructions(Register Reg) const
LLVM_ABI Register cloneVirtualRegister(Register VReg, StringRef Name="")
Create and return a new virtual register in the function with the same attributes as the given regist...
LLVM_ABI void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Wrapper class representing virtual and physical registers.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
A Use represents the edge between a Value definition and its users.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
std::optional< RegOrConstant > getAArch64VectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI)
constexpr bool isLegalArithImmed(const uint64_t C)
void changeVectorFCMPPredToAArch64CC(const CmpInst::Predicate P, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2, bool &Invert)
Find the AArch64 condition codes necessary to represent P for a vector floating point comparison.
bool isCMN(const MachineInstr *MaybeSub, const CmpInst::Predicate &Pred, const MachineRegisterInfo &MRI)
std::optional< int64_t > getAArch64VectorSplatScalar(const MachineInstr &MI, const MachineRegisterInfo &MRI)
void expandMOVImm(uint64_t Imm, unsigned BitSize, SmallVectorImpl< ImmInsnModel > &Insn)
Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more real move-immediate instructions to...
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
operand_type_match m_Reg()
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
ImplicitDefMatch m_GImplicitDef()
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
UnaryOp_match< SrcTy, TargetOpcode::G_TRUNC > m_GTrunc(const SrcTy &Src)
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI bool isBuildVectorAllZeros(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI MachineInstr * getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI)
See if Reg is defined by an single def instruction that is Opcode.
bool isZIPMask(ArrayRef< int > M, unsigned NumElts, unsigned &WhichResultOut, unsigned &OperandOrderOut)
Return true for zip1 or zip2 masks of the form: <0, 8, 1, 9, 2, 10, 3, 11> (WhichResultOut = 0,...
@ Undef
Value of the register doesn't matter.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
LLVM_ABI MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
FunctionPass * createAArch64PostLegalizerLowering()
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
bool isUZPMask(ArrayRef< int > M, unsigned NumElts, unsigned &WhichResultOut)
Return true for uzp1 or uzp2 masks of the form: <0, 2, 4, 6, 8, 10, 12, 14> or <1,...
bool isREVMask(ArrayRef< int > M, unsigned EltSize, unsigned NumElts, unsigned BlockSize)
isREVMask - Check if a vector shuffle corresponds to a REV instruction with the specified blocksize.
LLVM_ABI std::optional< ValueAndVReg > getAnyConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true, bool LookThroughAnyExt=false)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT or G_FCONST...
LLVM_ABI bool isBuildVectorAllOnes(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
LLVM_ABI void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
bool isTRNMask(ArrayRef< int > M, unsigned NumElts, unsigned &WhichResultOut, unsigned &OperandOrderOut)
Return true for trn1 or trn2 masks of the form: <0, 8, 2, 10, 4, 12, 6, 14> (WhichResultOut = 0,...
LLVM_ABI int getSplatIndex(ArrayRef< int > Mask)
If all non-negative Mask elements are the same value, return that value.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Implement std::hash so that hash_code can be used in STL containers.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
@ SinglePass
Enables Observer-based DCE and additional heuristics that retry combining defined and used instructio...
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.