53#define GET_GICOMBINER_DEPS
54#include "AArch64GenPostLegalizeGILowering.inc"
55#undef GET_GICOMBINER_DEPS
57#define DEBUG_TYPE "aarch64-postlegalizer-lowering"
60using namespace MIPatternMatch;
61using namespace AArch64GISelUtils;
65#define GET_GICOMBINER_TYPES
66#include "AArch64GenPostLegalizeGILowering.inc"
67#undef GET_GICOMBINER_TYPES
72struct ShuffleVectorPseudo {
76 ShuffleVectorPseudo(
unsigned Opc,
Register Dst,
77 std::initializer_list<SrcOp> SrcOps)
78 : Opc(Opc), Dst(Dst), SrcOps(SrcOps){};
79 ShuffleVectorPseudo() =
default;
84std::optional<std::pair<bool, uint64_t>> getExtMask(
ArrayRef<int> M,
87 auto FirstRealElt =
find_if(M, [](
int Elt) {
return Elt >= 0; });
88 if (FirstRealElt ==
M.end())
93 APInt ExpectedElt =
APInt(MaskBits, *FirstRealElt + 1);
99 [&ExpectedElt](
int Elt) { return Elt != ExpectedElt++ && Elt >= 0; }))
109 bool ReverseExt =
false;
121 return std::make_pair(ReverseExt, Imm);
133 int NumInputElements) {
134 if (
M.size() !=
static_cast<size_t>(NumInputElements))
136 int NumLHSMatch = 0, NumRHSMatch = 0;
137 int LastLHSMismatch = -1, LastRHSMismatch = -1;
138 for (
int Idx = 0;
Idx < NumInputElements; ++
Idx) {
144 M[
Idx] ==
Idx ? ++NumLHSMatch : LastLHSMismatch =
Idx;
145 M[
Idx] ==
Idx + NumInputElements ? ++NumRHSMatch : LastRHSMismatch =
Idx;
147 const int NumNeededToMatch = NumInputElements - 1;
148 if (NumLHSMatch == NumNeededToMatch)
149 return std::make_pair(
true, LastLHSMismatch);
150 if (NumRHSMatch == NumNeededToMatch)
151 return std::make_pair(
false, LastRHSMismatch);
158 ShuffleVectorPseudo &MatchInfo) {
159 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
163 LLT Ty =
MRI.getType(Dst);
173 for (
unsigned LaneSize : {64U, 32U, 16U}) {
174 if (
isREVMask(ShuffleMask, EltSize, NumElts, LaneSize)) {
177 Opcode = AArch64::G_REV64;
178 else if (LaneSize == 32U)
179 Opcode = AArch64::G_REV32;
181 Opcode = AArch64::G_REV16;
183 MatchInfo = ShuffleVectorPseudo(Opcode, Dst, {Src});
194 ShuffleVectorPseudo &MatchInfo) {
195 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
196 unsigned WhichResult;
199 unsigned NumElts =
MRI.getType(Dst).getNumElements();
200 if (!
isTRNMask(ShuffleMask, NumElts, WhichResult))
202 unsigned Opc = (WhichResult == 0) ? AArch64::G_TRN1 : AArch64::G_TRN2;
205 MatchInfo = ShuffleVectorPseudo(Opc, Dst, {V1,
V2});
215 ShuffleVectorPseudo &MatchInfo) {
216 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
217 unsigned WhichResult;
220 unsigned NumElts =
MRI.getType(Dst).getNumElements();
221 if (!
isUZPMask(ShuffleMask, NumElts, WhichResult))
223 unsigned Opc = (WhichResult == 0) ? AArch64::G_UZP1 : AArch64::G_UZP2;
226 MatchInfo = ShuffleVectorPseudo(Opc, Dst, {V1,
V2});
231 ShuffleVectorPseudo &MatchInfo) {
232 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
233 unsigned WhichResult;
236 unsigned NumElts =
MRI.getType(Dst).getNumElements();
237 if (!
isZIPMask(ShuffleMask, NumElts, WhichResult))
239 unsigned Opc = (WhichResult == 0) ? AArch64::G_ZIP1 : AArch64::G_ZIP2;
242 MatchInfo = ShuffleVectorPseudo(Opc, Dst, {V1,
V2});
249 ShuffleVectorPseudo &MatchInfo) {
268 auto *InsMI =
getOpcodeDef(TargetOpcode::G_INSERT_VECTOR_ELT,
269 MI.getOperand(1).getReg(),
MRI);
273 if (!
getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, InsMI->getOperand(1).getReg(),
281 MatchInfo = ShuffleVectorPseudo(AArch64::G_DUP,
MI.getOperand(0).getReg(),
282 {InsMI->getOperand(2).getReg()});
289 ShuffleVectorPseudo &MatchInfo) {
290 assert(Lane >= 0 &&
"Expected positive lane?");
291 int NumElements =
MRI.getType(
MI.getOperand(1).getReg()).getNumElements();
296 MI.getOperand(Lane < NumElements ? 1 : 2).getReg(),
MRI);
298 if (NumElements <= Lane)
303 Register Reg = BuildVecMI->getOperand(Lane + 1).getReg();
305 ShuffleVectorPseudo(AArch64::G_DUP,
MI.getOperand(0).getReg(), {Reg});
310 ShuffleVectorPseudo &MatchInfo) {
311 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
315 int Lane = *MaybeLane;
319 if (matchDupFromInsertVectorElt(Lane,
MI,
MRI, MatchInfo))
321 if (matchDupFromBuildVector(Lane,
MI,
MRI, MatchInfo))
338 unsigned ExpectedElt =
M[0];
339 for (
unsigned I = 1;
I < NumElts; ++
I) {
343 if (ExpectedElt == NumElts)
348 if (ExpectedElt !=
static_cast<unsigned>(M[
I]))
356 ShuffleVectorPseudo &MatchInfo) {
357 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
359 LLT DstTy =
MRI.getType(Dst);
362 auto Mask =
MI.getOperand(3).getShuffleMask();
365 uint64_t ExtFactor =
MRI.getType(V1).getScalarSizeInBits() / 8;
368 if (!getOpcodeDef<GImplicitDef>(V2,
MRI) ||
369 !isSingletonExtMask(Mask, DstTy))
373 MatchInfo = ShuffleVectorPseudo(AArch64::G_EXT, Dst, {V1, V1,
Imm});
377 std::tie(ReverseExt, Imm) = *ExtInfo;
381 MatchInfo = ShuffleVectorPseudo(AArch64::G_EXT, Dst, {V1,
V2,
Imm});
388 ShuffleVectorPseudo &MatchInfo) {
390 MIRBuilder.buildInstr(MatchInfo.Opc, {MatchInfo.Dst}, MatchInfo.SrcOps);
391 MI.eraseFromParent();
399 if (MatchInfo.SrcOps[2].getImm() == 0)
400 MIRBuilder.buildCopy(MatchInfo.Dst, MatchInfo.SrcOps[0]);
404 MIRBuilder.buildConstant(
LLT::scalar(32), MatchInfo.SrcOps[2].getImm());
405 MIRBuilder.buildInstr(MatchInfo.Opc, {MatchInfo.Dst},
406 {MatchInfo.SrcOps[0], MatchInfo.SrcOps[1], Cst});
408 MI.eraseFromParent();
412 assert(
MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT);
421 auto &
Insert = cast<GInsertVectorElement>(
MI);
444 "Expected a power-2 vector size");
472 std::tuple<Register, int, Register, int> &MatchInfo) {
473 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
476 int NumElts =
MRI.getType(Dst).getNumElements();
477 auto DstIsLeftAndDstLane =
isINSMask(ShuffleMask, NumElts);
478 if (!DstIsLeftAndDstLane)
482 std::tie(DstIsLeft, DstLane) = *DstIsLeftAndDstLane;
488 int SrcLane = ShuffleMask[DstLane];
489 if (SrcLane >= NumElts) {
494 MatchInfo = std::make_tuple(DstVec, DstLane, SrcVec, SrcLane);
500 std::tuple<Register, int, Register, int> &MatchInfo) {
503 auto ScalarTy =
MRI.getType(Dst).getElementType();
505 int DstLane, SrcLane;
506 std::tie(DstVec, DstLane, SrcVec, SrcLane) = MatchInfo;
511 MI.eraseFromParent();
519 assert(Ty.
isVector() &&
"vector shift count is not a vector type");
526 return Cnt >= 1 && Cnt <= ElementBits;
532 assert(
MI.getOpcode() == TargetOpcode::G_ASHR ||
533 MI.getOpcode() == TargetOpcode::G_LSHR);
534 LLT Ty =
MRI.getType(
MI.getOperand(1).getReg());
542 unsigned Opc =
MI.getOpcode();
543 assert(Opc == TargetOpcode::G_ASHR || Opc == TargetOpcode::G_LSHR);
545 Opc == TargetOpcode::G_ASHR ? AArch64::G_VASHR : AArch64::G_VLSHR;
547 auto ImmDef = MIB.buildConstant(
LLT::scalar(32), Imm);
548 MIB.buildInstr(NewOpc, {
MI.getOperand(0)}, {
MI.getOperand(1), ImmDef});
549 MI.eraseFromParent();
559std::optional<std::pair<uint64_t, CmpInst::Predicate>>
562 const auto &Ty =
MRI.getType(RHS);
566 assert((
Size == 32 ||
Size == 64) &&
"Expected 32 or 64 bit compare only?");
573 uint64_t OriginalC = ValAndVReg->Value.getZExtValue();
592 (
Size == 32 &&
static_cast<int32_t
>(
C) == INT32_MIN))
618 if ((
Size == 32 &&
static_cast<int32_t
>(
C) == INT32_MAX) ||
632 if ((
Size == 32 &&
static_cast<uint32_t>(
C) == UINT32_MAX) ||
647 auto IsMaterializableInSingleInstruction = [=](
uint64_t Imm) {
650 return Insn.size() == 1;
653 if (!IsMaterializableInSingleInstruction(OriginalC) &&
654 IsMaterializableInSingleInstruction(
C))
668bool matchAdjustICmpImmAndPred(
670 std::pair<uint64_t, CmpInst::Predicate> &MatchInfo) {
671 assert(
MI.getOpcode() == TargetOpcode::G_ICMP);
674 if (
auto MaybeNewImmAndPred = tryAdjustICmpImmAndPred(RHS, Pred,
MRI)) {
675 MatchInfo = *MaybeNewImmAndPred;
681void applyAdjustICmpImmAndPred(
682 MachineInstr &
MI, std::pair<uint64_t, CmpInst::Predicate> &MatchInfo,
690 RHS.setReg(Cst->getOperand(0).getReg());
691 MI.getOperand(1).setPredicate(MatchInfo.second);
696 std::pair<unsigned, int> &MatchInfo) {
697 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
699 const LLT SrcTy =
MRI.getType(Src1Reg);
700 const LLT DstTy =
MRI.getType(
MI.getOperand(0).getReg());
719 if (ScalarSize == 64)
720 Opc = AArch64::G_DUPLANE64;
721 else if (ScalarSize == 32)
722 Opc = AArch64::G_DUPLANE32;
725 if (ScalarSize == 32)
726 Opc = AArch64::G_DUPLANE32;
727 else if (ScalarSize == 16)
728 Opc = AArch64::G_DUPLANE16;
732 Opc = AArch64::G_DUPLANE8;
733 else if (ScalarSize == 16)
734 Opc = AArch64::G_DUPLANE16;
738 Opc = AArch64::G_DUPLANE8;
746 MatchInfo.first = Opc;
747 MatchInfo.second = *LaneIdx;
753 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
755 const LLT SrcTy =
MRI.getType(Src1Reg);
757 B.setInstrAndDebugLoc(
MI);
758 auto Lane =
B.buildConstant(
LLT::scalar(64), MatchInfo.second);
764 auto Undef =
B.buildUndef(SrcTy);
766 {Src1Reg, Undef.getReg(0)})
769 B.buildInstr(MatchInfo.first, {MI.getOperand(0).getReg()}, {DupSrc, Lane});
770 MI.eraseFromParent();
774 auto &Unmerge = cast<GUnmerge>(
MI);
775 Register Src1Reg = Unmerge.getReg(Unmerge.getNumOperands() - 1);
776 const LLT SrcTy =
MRI.getType(Src1Reg);
785 auto &Unmerge = cast<GUnmerge>(
MI);
786 Register Src1Reg = Unmerge.getReg(Unmerge.getNumOperands() - 1);
787 const LLT SrcTy =
MRI.getType(Src1Reg);
789 "Expected a fixed length vector");
792 B.buildExtractVectorElementConstant(Unmerge.getReg(
I), Src1Reg,
I);
793 MI.eraseFromParent();
797 assert(
MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR);
806 int64_t Cst =
Splat->getCst();
807 return (Cst != 0 && Cst != -1);
812 B.setInstrAndDebugLoc(
MI);
813 B.buildInstr(AArch64::G_DUP, {
MI.getOperand(0).
getReg()},
815 MI.eraseFromParent();
822 if (!
MRI.hasOneNonDBGUse(CmpOp))
827 if (
MI.getOpcode() == TargetOpcode::G_SEXT_INREG)
829 if (
MI.getOpcode() != TargetOpcode::G_AND)
836 return (Mask == 0xFF || Mask == 0xFFFF || Mask == 0xFFFFFFFF);
840 if (IsSupportedExtend(*Def))
843 unsigned Opc =
Def->getOpcode();
844 if (Opc != TargetOpcode::G_SHL && Opc != TargetOpcode::G_ASHR &&
845 Opc != TargetOpcode::G_LSHR)
852 uint64_t ShiftAmt = MaybeShiftAmt->Value.getZExtValue();
859 if (IsSupportedExtend(*ShiftLHS))
860 return (ShiftAmt <= 4) ? 2 : 1;
862 LLT Ty =
MRI.getType(
Def->getOperand(0).getReg());
866 if ((ShiftSize == 32 && ShiftAmt <= 31) ||
867 (ShiftSize == 64 && ShiftAmt <= 63))
875 assert(
MI.getOpcode() == TargetOpcode::G_ICMP);
901 Register TheLHS = GetRegForProfit(LHS);
902 Register TheRHS = GetRegForProfit(RHS);
916 MI.getOperand(2).setReg(RHS);
917 MI.getOperand(3).setReg(LHS);
928 LLT DstTy =
MRI.getType(LHS);
930 assert(DstTy ==
MRI.getType(RHS) &&
"Src and Dst types must match!");
982 assert(
MI.getOpcode() == TargetOpcode::G_FCMP);
986 LLT DstTy =
MRI.getType(Dst);
990 unsigned EltSize =
MRI.getType(LHS).getScalarSizeInBits();
991 if (EltSize == 16 && !
ST.hasFullFP16())
993 if (EltSize != 16 && EltSize != 32 && EltSize != 64)
1002 assert(
MI.getOpcode() == TargetOpcode::G_FCMP);
1005 const auto &CmpMI = cast<GFCmp>(
MI);
1012 LLT DstTy =
MRI.getType(Dst);
1019 bool Invert =
false;
1021 if ((Pred == CmpInst::Predicate::FCMP_ORD ||
1022 Pred == CmpInst::Predicate::FCMP_UNO) &&
1039 ST.getTargetLowering()->getTargetMachine().Options.NoNaNsFPMath;
1041 auto Cmp = getVectorFCMP(
CC, LHS, RHS, IsZero, NoNans,
MRI);
1046 auto Cmp2 = getVectorFCMP(CC2, LHS, RHS, IsZero, NoNans,
MRI);
1047 auto Cmp2Dst = Cmp2(MIB);
1048 auto Cmp1Dst =
Cmp(MIB);
1053 MRI.replaceRegWith(Dst, CmpRes);
1054 MI.eraseFromParent();
1059 assert(
MI.getOpcode() == TargetOpcode::G_STORE);
1061 if (
MRI.getType(DstReg).isVector())
1067 return MRI.getType(SrcReg).getSizeInBits() <= 64;
1073 assert(
MI.getOpcode() == TargetOpcode::G_STORE);
1075 MI.getOperand(0).setReg(SrcReg);
1083 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
1085 LLT DstTy =
MRI.getType(DstReg);
1091 assert(
MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
1092 B.setInstrAndDebugLoc(
MI);
1094 Helper.lower(
MI, 0,
LLT());
1101 auto &Unmerge = cast<GUnmerge>(
MI);
1102 if (Unmerge.getNumDefs() != 2)
1104 if (!
MRI.use_nodbg_empty(Unmerge.getReg(1)))
1107 LLT DstTy =
MRI.getType(Unmerge.getReg(0));
1119 if (!LowestVal || LowestVal->Value.getZExtValue() != DstTy.
getSizeInBytes())
1122 if (!getOpcodeDef<GImplicitDef>(ExtSrc2,
MRI))
1125 MatchInfo = ExtSrc1;
1135 MI.getOperand(0).setReg(
MI.getOperand(1).getReg());
1136 MI.getOperand(1).setReg(Dst1);
1137 MI.getOperand(2).setReg(SrcReg);
1148 LLT DstTy =
MRI.getType(
MI.getOperand(0).getReg());
1154 unsigned I1Opc =
I1->getOpcode();
1156 if (((I1Opc == TargetOpcode::G_ZEXT && I2Opc == TargetOpcode::G_ZEXT) ||
1157 (I1Opc == TargetOpcode::G_SEXT && I2Opc == TargetOpcode::G_SEXT)) &&
1158 (
MRI.getType(
I1->getOperand(0).getReg()).getScalarSizeInBits() ==
1159 MRI.getType(
I1->getOperand(1).getReg()).getScalarSizeInBits() * 2) &&
1174 assert(
MI.getOpcode() == TargetOpcode::G_MUL &&
1175 "Expected a G_MUL instruction");
1178 LLT DstTy =
MRI.getType(
MI.getOperand(0).getReg());
1183 unsigned I1Opc =
I1->getOpcode();
1185 if (((I1Opc == TargetOpcode::G_ZEXT && I2Opc == TargetOpcode::G_ZEXT) ||
1186 (I1Opc == TargetOpcode::G_SEXT && I2Opc == TargetOpcode::G_SEXT)) &&
1187 (
MRI.getType(
I1->getOperand(0).getReg()).getScalarSizeInBits() ==
1188 MRI.getType(
I1->getOperand(1).getReg()).getScalarSizeInBits() * 2) &&
1192 B.setInstrAndDebugLoc(
MI);
1193 B.buildInstr(
I1->getOpcode() == TargetOpcode::G_ZEXT ? AArch64::G_UMULL
1195 {MI.getOperand(0).getReg()},
1196 {I1->getOperand(1).getReg(), I2->getOperand(1).getReg()});
1197 MI.eraseFromParent();
1202 B.setInstrAndDebugLoc(
MI);
1203 Helper.fewerElementsVector(
1210class AArch64PostLegalizerLoweringImpl :
public Combiner {
1214 const AArch64PostLegalizerLoweringImplRuleConfig &RuleConfig;
1218 AArch64PostLegalizerLoweringImpl(
1221 const AArch64PostLegalizerLoweringImplRuleConfig &RuleConfig,
1224 static const char *
getName() {
return "AArch6400PreLegalizerCombiner"; }
1229#define GET_GICOMBINER_CLASS_MEMBERS
1230#include "AArch64GenPostLegalizeGILowering.inc"
1231#undef GET_GICOMBINER_CLASS_MEMBERS
1234#define GET_GICOMBINER_IMPL
1235#include "AArch64GenPostLegalizeGILowering.inc"
1236#undef GET_GICOMBINER_IMPL
1238AArch64PostLegalizerLoweringImpl::AArch64PostLegalizerLoweringImpl(
1241 const AArch64PostLegalizerLoweringImplRuleConfig &RuleConfig,
1243 :
Combiner(MF, CInfo, TPC, nullptr, CSEInfo),
1244 Helper(Observer,
B,
true), RuleConfig(RuleConfig),
1247#include
"AArch64GenPostLegalizeGILowering.inc"
1256 AArch64PostLegalizerLowering();
1259 return "AArch64PostLegalizerLowering";
1266 AArch64PostLegalizerLoweringImplRuleConfig RuleConfig;
1270void AArch64PostLegalizerLowering::getAnalysisUsage(
AnalysisUsage &AU)
const {
1277AArch64PostLegalizerLowering::AArch64PostLegalizerLowering()
1281 if (!RuleConfig.parseCommandLineOption())
1285bool AArch64PostLegalizerLowering::runOnMachineFunction(
MachineFunction &MF) {
1287 MachineFunctionProperties::Property::FailedISel))
1290 MachineFunctionProperties::Property::Legalized) &&
1291 "Expected a legalized function?");
1292 auto *TPC = &getAnalysis<TargetPassConfig>();
1298 F.hasOptSize(),
F.hasMinSize());
1300 CInfo.MaxIterations = 1;
1301 CInfo.ObserverLvl = CombinerInfo::ObserverLevel::SinglePass;
1303 CInfo.EnableFullDCE =
false;
1304 AArch64PostLegalizerLoweringImpl Impl(MF, CInfo, TPC,
nullptr,
1306 return Impl.combineMachineInstrs();
1309char AArch64PostLegalizerLowering::ID = 0;
1311 "Lower AArch64 MachineInstrs after legalization",
false,
1320 return new AArch64PostLegalizerLowering();
unsigned const MachineRegisterInfo * MRI
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, int64_t &Cnt)
isVShiftRImm - Check if this is a valid build_vector for the immediate operand of a vector shift righ...
static bool isLegalArithImmed(uint64_t C)
static bool isCMN(SDValue Op, ISD::CondCode CC, SelectionDAG &DAG)
static bool isINSMask(ArrayRef< int > M, int NumInputElements, bool &DstIsLeft, int &Anomaly)
static unsigned getCmpOperandFoldingProfit(SDValue Op)
Returns how profitable it is to fold a comparison's operand's shift and/or extension operations.
This file declares the targeting of the Machinelegalizer class for AArch64.
SmallVector< AArch64_IMM::ImmInsnModel, 4 > Insn
#define GET_GICOMBINER_CONSTRUCTOR_INITS
Lower AArch64 MachineInstrs after legalization
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This contains common combine transformations that may be used in a combine pass,or by the target else...
Option class for Targets to specify which operations are combined how and when.
This contains the base class for all Combiners generated by TableGen.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This contains common code to allow clients to notify changes to machine instr.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Target-Independent Code Generator Pass Configuration Options pass.
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
unsigned logBase2() const
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesCFG()
This function should be called by the pass, iff they do not:
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
virtual bool tryCombineAll(MachineInstr &I) const =0
FunctionPass class - This class is used to implement most global optimizations.
Abstract class that contains various methods for clients to notify about changes.
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.
constexpr unsigned getScalarSizeInBits() const
constexpr LLT multiplyElements(int Factor) const
Produce a vector type that is Factor times bigger, preserving the element type.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr bool isScalable() const
Returns true if the LLT is a scalable vector.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr ElementCount getElementCount() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr LLT changeElementCount(ElementCount EC) const
Return a vector or scalar with the same element type and the new element count.
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
bool hasProperty(Property P) const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
Helper class to build MachineInstr.
MachineInstrBuilder buildNot(const DstOp &Dst, const SrcOp &Src0)
Build and insert a bitwise not, NegOne = G_CONSTANT -1 Res = G_OR Op0, NegOne.
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_MUL Op0, Op1.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineFunction & getMF()
Getter for the function we currently build.
void setInstrAndDebugLoc(MachineInstr &MI)
Set the insertion point to before MI, and set the debug loc to MI's loc.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildOr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_OR Op0, Op1.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Wrapper class representing virtual and physical registers.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Target-Independent Code Generator Pass Configuration Options.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
std::optional< RegOrConstant > getAArch64VectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI)
void changeVectorFCMPPredToAArch64CC(const CmpInst::Predicate P, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2, bool &Invert)
Find the AArch64 condition codes necessary to represent P for a vector floating point comparison.
std::optional< int64_t > getAArch64VectorSplatScalar(const MachineInstr &MI, const MachineRegisterInfo &MRI)
void expandMOVImm(uint64_t Imm, unsigned BitSize, SmallVectorImpl< ImmInsnModel > &Insn)
Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more real move-immediate instructions to...
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
operand_type_match m_Reg()
SpecificConstantMatch m_ZeroInt()
{ Convenience matchers for specific integer values.
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
UnaryOp_match< SrcTy, TargetOpcode::G_TRUNC > m_GTrunc(const SrcTy &Src)
@ Undef
Value of the register doesn't matter.
Reg
All possible values of the reg field in the ModR/M byte.
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
MachineInstr * getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI)
See if Reg is defined by an single def instruction that is Opcode.
void initializeAArch64PostLegalizerLoweringPass(PassRegistry &)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool isTRNMask(ArrayRef< int > M, unsigned NumElts, unsigned &WhichResult)
Return true for trn1 or trn2 masks of the form: <0, 8, 2, 10, 4, 12, 6, 14> or <1,...
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
FunctionPass * createAArch64PostLegalizerLowering()
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
bool isUZPMask(ArrayRef< int > M, unsigned NumElts, unsigned &WhichResultOut)
Return true for uzp1 or uzp2 masks of the form: <0, 2, 4, 6, 8, 10, 12, 14> or <1,...
bool isREVMask(ArrayRef< int > M, unsigned EltSize, unsigned NumElts, unsigned BlockSize)
isREVMask - Check if a vector shuffle corresponds to a REV instruction with the specified blocksize.
void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
bool isZIPMask(ArrayRef< int > M, unsigned NumElts, unsigned &WhichResultOut)
Return true for zip1 or zip2 masks of the form: <0, 8, 1, 9, 2, 10, 3, 11> or <4, 12,...
std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
int getSplatIndex(ArrayRef< int > Mask)
If all non-negative Mask elements are the same value, return that value.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.