32#define GET_GICOMBINER_DEPS
33#include "AArch64GenPreLegalizeGICombiner.inc"
34#undef GET_GICOMBINER_DEPS
36#define DEBUG_TYPE "aarch64-prelegalizer-combiner"
43#define GET_GICOMBINER_TYPES
44#include "AArch64GenPreLegalizeGICombiner.inc"
45#undef GET_GICOMBINER_TYPES
52 assert(
MI.getOpcode() == TargetOpcode::G_ICMP && VT);
70 LLT WideTy =
MRI.getType(WideReg);
82 assert(
MI.getOpcode() == TargetOpcode::G_ICMP);
84 LLT WideTy =
MRI.getType(WideReg);
87 Builder.setInstrAndDebugLoc(
MI);
88 auto WideZero = Builder.buildConstant(WideTy, 0);
90 MI.getOperand(2).setReg(WideReg);
91 MI.getOperand(3).setReg(WideZero.getReg(0));
101 std::pair<uint64_t, uint64_t> &MatchInfo) {
102 assert(
MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
104 auto &GlobalOp =
MI.getOperand(1);
105 auto *GV = GlobalOp.getGlobal();
106 if (GV->isThreadLocal())
130 for (
auto &UseInstr :
MRI.use_nodbg_instructions(Dst)) {
131 if (UseInstr.getOpcode() != TargetOpcode::G_PTR_ADD)
134 UseInstr.getOperand(2).getReg(),
MRI);
137 MinOffset = std::min(MinOffset, Cst->Value.getZExtValue());
142 uint64_t CurrOffset = GlobalOp.getOffset();
143 uint64_t NewOffset = MinOffset + CurrOffset;
144 if (NewOffset <= CurrOffset)
156 if (NewOffset >= (1 << 20))
159 Type *
T = GV->getValueType();
161 NewOffset > GV->getDataLayout().getTypeAllocSize(
T))
163 MatchInfo = std::make_pair(NewOffset, MinOffset);
169 std::pair<uint64_t, uint64_t> &MatchInfo) {
191 std::tie(
Offset, MinOffset) = MatchInfo;
192 B.setInstrAndDebugLoc(*std::next(
MI.getIterator()));
194 auto &GlobalOp =
MI.getOperand(1);
195 auto *GV = GlobalOp.getGlobal();
196 GlobalOp.ChangeToGA(GV,
Offset, GlobalOp.getTargetFlags());
199 MI.getOperand(0).setReg(NewGVDst);
203 B.buildConstant(
LLT::scalar(64), -
static_cast<int64_t
>(MinOffset)));
212 std::tuple<Register, Register, bool> &MatchInfo) {
213 assert(
MI.getOpcode() == TargetOpcode::G_VECREDUCE_ADD &&
214 "Expected a G_VECREDUCE_ADD instruction");
215 assert(STI.hasDotProd() &&
"Target should have Dot Product feature");
220 LLT DstTy =
MRI.getType(DstReg);
221 LLT MidTy =
MRI.getType(MidReg);
232 if (!
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
244 if ((I1Opc == TargetOpcode::G_ZEXT || I1Opc == TargetOpcode::G_SEXT) &&
254 unsigned I1Opc =
I1->getOpcode();
255 if (I1Opc == TargetOpcode::G_MUL) {
257 if (!tryMatchingMulOfExt(I1, Out1, Out2, I1Opc))
259 SrcTy =
MRI.getType(Out1);
260 std::get<0>(MatchInfo) = Out1;
261 std::get<1>(MatchInfo) = Out2;
262 }
else if (I1Opc == TargetOpcode::G_ZEXT || I1Opc == TargetOpcode::G_SEXT) {
266 if (M->getOpcode() == TargetOpcode::G_MUL &&
267 tryMatchingMulOfExt(M, Out1, Out2, I1Opc)) {
268 SrcTy =
MRI.getType(Out1);
269 std::get<0>(MatchInfo) = Out1;
270 std::get<1>(MatchInfo) = Out2;
272 SrcTy =
MRI.getType(I1Op);
273 std::get<0>(MatchInfo) = I1Op;
274 std::get<1>(MatchInfo) = 0;
280 if (I1Opc == TargetOpcode::G_ZEXT)
281 std::get<2>(MatchInfo) = 0;
282 else if (I1Opc == TargetOpcode::G_SEXT)
283 std::get<2>(MatchInfo) = 1;
287 if (SrcTy.getScalarSizeInBits() != 8 || SrcTy.getNumElements() % 8 != 0)
297 std::tuple<Register, Register, bool> &MatchInfo) {
298 assert(
MI.getOpcode() == TargetOpcode::G_VECREDUCE_ADD &&
299 "Expected a G_VECREDUCE_ADD instruction");
300 assert(STI.hasDotProd() &&
"Target should have Dot Product feature");
304 std::get<2>(MatchInfo) ? AArch64::G_SDOT : AArch64::G_UDOT;
305 Register Ext1SrcReg = std::get<0>(MatchInfo);
310 if (std::get<1>(MatchInfo) == 0)
311 Ext2SrcReg = Builder.buildConstant(
MRI.getType(Ext1SrcReg), 1)
315 Ext2SrcReg = std::get<1>(MatchInfo);
318 LLT SrcTy =
MRI.getType(Ext1SrcReg);
321 if (SrcTy.getNumElements() % 16 == 0) {
322 NumOfDotMI = SrcTy.getNumElements() / 16;
324 }
else if (SrcTy.getNumElements() % 8 == 0) {
325 NumOfDotMI = SrcTy.getNumElements() / 8;
332 if (NumOfDotMI == 1) {
333 auto Zeroes = Builder.buildConstant(MidTy, 0)->getOperand(0).getReg();
334 auto Dot = Builder.buildInstr(DotOpcode, {MidTy},
335 {Zeroes, Ext1SrcReg, Ext2SrcReg});
336 Builder.buildVecReduceAdd(
MI.getOperand(0), Dot->getOperand(0));
341 if (SrcTy.getNumElements() % 16 != 0) {
347 LLT LeftoverTy1, LeftoverTy2;
349 LeftoverTy1, Ext1UnmergeReg, Leftover1, Builder,
352 LeftoverTy2, Ext2UnmergeReg, Leftover2, Builder,
365 {Leftover1[0], v8Zeroes})
370 {Leftover2[0], v8Zeroes})
375 unsigned SrcNumElts = SrcTy.getNumElements();
377 Ext1UnmergeReg, Builder,
MRI);
379 Ext2UnmergeReg, Builder,
MRI);
384 unsigned NumElements = 0;
385 for (
unsigned i = 0; i < Ext1UnmergeReg.
size(); i++) {
388 if (
MRI.getType(Ext1UnmergeReg[i]).getNumElements() == 16) {
395 auto Zeroes = Builder.buildConstant(ZeroesLLT, 0)->getOperand(0).getReg();
398 .buildInstr(DotOpcode, {
MRI.getType(Zeroes)},
399 {Zeroes, Ext1UnmergeReg[i], Ext2UnmergeReg[i]})
408 Builder.buildVecReduceAdd(
MI.getOperand(0).getReg(),
409 ConcatMI->getOperand(0).getReg());
413 MI.eraseFromParent();
419 std::pair<Register, bool> &MatchInfo) {
420 assert(
MI.getOpcode() == TargetOpcode::G_VECREDUCE_ADD &&
421 "Expected G_VECREDUCE_ADD Opcode");
427 if (ExtOpc == TargetOpcode::G_ZEXT)
428 std::get<1>(MatchInfo) = 0;
429 else if (ExtOpc == TargetOpcode::G_SEXT)
430 std::get<1>(MatchInfo) = 1;
436 LLT ExtSrcTy =
MRI.getType(ExtSrcReg);
437 LLT DstTy =
MRI.getType(
MI.getOperand(0).getReg());
444 std::get<0>(MatchInfo) = ExtSrcReg;
452 std::pair<Register, bool> &MatchInfo) {
453 assert(
MI.getOpcode() == TargetOpcode::G_VECREDUCE_ADD &&
454 "Expected G_VECREDUCE_ADD Opcode");
456 unsigned Opc = std::get<1>(MatchInfo) ? AArch64::G_SADDLV : AArch64::G_UADDLV;
457 Register SrcReg = std::get<0>(MatchInfo);
459 LLT SrcTy =
MRI.getType(SrcReg);
460 LLT DstTy =
MRI.getType(DstReg);
466 unsigned SrcScalSize = SrcTy.getScalarSizeInBits();
467 unsigned SrcNumElem = SrcTy.getNumElements();
468 if ((SrcScalSize == 8 && SrcNumElem > 16) ||
469 (SrcScalSize == 16 && SrcNumElem > 8) ||
470 (SrcScalSize == 32 && SrcNumElem > 4)) {
474 if (SrcScalSize == 8)
476 else if (SrcScalSize == 16)
478 else if (SrcScalSize == 32)
485 extractParts(SrcReg, SrcTy, MainTy, LeftoverTy, WorkingRegisters,
486 LeftoverRegs,
B,
MRI);
496 for (
unsigned I = 0;
I < WorkingRegisters.
size();
I++) {
499 LLT WorkingRegTy =
MRI.getType(WorkingRegisters[
I]);
502 WorkingRegisters[
I] =
503 B.buildInstr(std::get<1>(MatchInfo) ? TargetOpcode::G_SEXT
504 : TargetOpcode::G_ZEXT,
512 :
LLT::fixed_vector(2, 64);
514 B.buildInstr(
Opc, {addlvTy}, {WorkingRegisters[
I]}).getReg(0);
521 if (MidScalarSize == 32 || MidScalarSize == 64) {
522 WorkingRegisters[
I] =
B.buildInstr(AArch64::G_EXTRACT_VECTOR_ELT,
523 {MidScalarLLT}, {addlvReg, zeroReg})
526 Register extractReg =
B.buildInstr(AArch64::G_EXTRACT_VECTOR_ELT,
529 WorkingRegisters[
I] =
530 B.buildTrunc({MidScalarLLT}, {extractReg}).
getReg(0);
535 if (WorkingRegisters.
size() > 1) {
536 outReg =
B.buildAdd(MidScalarLLT, WorkingRegisters[0], WorkingRegisters[1])
538 for (
unsigned I = 2;
I < WorkingRegisters.
size();
I++) {
539 outReg =
B.buildAdd(MidScalarLLT, outReg, WorkingRegisters[
I]).getReg(0);
542 outReg = WorkingRegisters[0];
548 B.buildInstr(std::get<1>(MatchInfo) ? TargetOpcode::G_SEXT
549 : TargetOpcode::G_ZEXT,
552 B.buildCopy(DstReg, outReg);
555 MI.eraseFromParent();
563 assert((
MI.getOpcode() == TargetOpcode::G_ADD ||
564 MI.getOpcode() == TargetOpcode::G_SUB ||
565 MI.getOpcode() == TargetOpcode::G_MUL) &&
566 "Expected a G_ADD, G_SUB or G_MUL instruction\n");
569 LLT DstTy =
MRI.getType(DstReg);
574 Register ExtDstReg =
MI.getOperand(1).getReg();
575 LLT Ext1SrcTy =
MRI.getType(SrcReg1);
576 LLT Ext2SrcTy =
MRI.getType(SrcReg2);
577 unsigned ExtDstScal =
MRI.getType(ExtDstReg).getScalarSizeInBits();
579 if (((Ext1SrcScal == 8 && ExtDstScal == 32) ||
580 ((Ext1SrcScal == 8 || Ext1SrcScal == 16) && ExtDstScal == 64)) &&
581 Ext1SrcTy == Ext2SrcTy)
590 LLT SrcTy =
MRI.getType(SrcReg1);
592 unsigned Opc = isSExt ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT;
596 B.buildInstr(
MI.getOpcode(), {MidTy}, {Ext1Reg, Ext2Reg}).getReg(0);
601 if (
MI.getOpcode() == TargetOpcode::G_ADD ||
602 MI.getOpcode() == TargetOpcode::G_MUL)
603 B.buildInstr(
Opc, {DstReg}, {AddReg});
605 B.buildSExt(DstReg, AddReg);
607 MI.eraseFromParent();
641 auto &
MRI = *
B.getMRI();
650 LLT WideTy0 =
MRI.getType(Op0Wide);
651 LLT WideTy1 =
MRI.getType(Op1Wide);
653 LLT OpTy =
MRI.getType(ResVal);
660 if (Op0WideDef->
getOpcode() != TargetOpcode::G_ASSERT_ZEXT ||
661 Op1WideDef->
getOpcode() != TargetOpcode::G_ASSERT_ZEXT ||
669 (OpTySize != 8 && OpTySize != 16))
673 Register ResStatus =
MI.getOperand(1).getReg();
674 if (!
MRI.hasOneNonDBGUse(ResStatus))
677 if (CondUser->
getOpcode() != TargetOpcode::G_BRCOND)
685 if (
any_of(
MRI.use_nodbg_instructions(ResVal),
688 (I.getParent() == FailMBB || I.getParent() == CurrentMBB);
693 B.setInstrAndDebugLoc(*
MI.getNextNode());
694 MI.eraseFromParent();
697 Register AddDst =
MRI.cloneVirtualRegister(Op0Wide);
698 B.buildInstr(TargetOpcode::G_ADD, {AddDst}, {Op0Wide, Op1Wide});
702 Register CondBit =
MRI.cloneVirtualRegister(Op0Wide);
705 B.buildConstant(
LLT::scalar(32), OpTySize == 8 ? 1 << 8 : 1 << 16));
711 B.buildZExtOrTrunc(ResVal, AddDst);
715 auto OldR = U.getParent()->getOperand(0).getReg();
717 U.getParent()->eraseFromParent();
725class AArch64PreLegalizerCombinerImpl :
public Combiner {
727 const CombinerHelper Helper;
728 const AArch64PreLegalizerCombinerImplRuleConfig &RuleConfig;
729 const AArch64Subtarget &STI;
732 AArch64PreLegalizerCombinerImpl(
733 MachineFunction &MF, CombinerInfo &CInfo,
const TargetPassConfig *TPC,
734 GISelValueTracking &VT, GISelCSEInfo *CSEInfo,
735 const AArch64PreLegalizerCombinerImplRuleConfig &RuleConfig,
736 const AArch64Subtarget &STI, MachineDominatorTree *MDT,
737 const LegalizerInfo *LI);
739 static const char *
getName() {
return "AArch6400PreLegalizerCombiner"; }
741 bool tryCombineAll(MachineInstr &
I)
const override;
743 bool tryCombineAllImpl(MachineInstr &
I)
const;
746#define GET_GICOMBINER_CLASS_MEMBERS
747#include "AArch64GenPreLegalizeGICombiner.inc"
748#undef GET_GICOMBINER_CLASS_MEMBERS
751#define GET_GICOMBINER_IMPL
752#include "AArch64GenPreLegalizeGICombiner.inc"
753#undef GET_GICOMBINER_IMPL
755AArch64PreLegalizerCombinerImpl::AArch64PreLegalizerCombinerImpl(
758 const AArch64PreLegalizerCombinerImplRuleConfig &RuleConfig,
761 :
Combiner(MF, CInfo, TPC, &VT, CSEInfo),
762 Helper(Observer,
B,
true, &VT, MDT, LI),
763 RuleConfig(RuleConfig), STI(STI),
765#include
"AArch64GenPreLegalizeGICombiner.inc"
770bool AArch64PreLegalizerCombinerImpl::tryCombineAll(
MachineInstr &
MI)
const {
771 if (tryCombineAllImpl(
MI))
774 unsigned Opc =
MI.getOpcode();
776 case TargetOpcode::G_SHUFFLE_VECTOR:
778 case TargetOpcode::G_UADDO:
779 return tryToSimplifyUADDO(
MI,
B, Helper, Observer);
780 case TargetOpcode::G_MEMCPY_INLINE:
782 case TargetOpcode::G_MEMCPY:
783 case TargetOpcode::G_MEMMOVE:
784 case TargetOpcode::G_MEMSET: {
787 unsigned MaxLen = CInfo.EnableOpt ? 0 : 32;
791 if (
Opc == TargetOpcode::G_MEMSET)
803class AArch64PreLegalizerCombiner :
public MachineFunctionPass {
807 AArch64PreLegalizerCombiner();
809 StringRef getPassName()
const override {
810 return "AArch64PreLegalizerCombiner";
813 bool runOnMachineFunction(MachineFunction &MF)
override;
815 void getAnalysisUsage(AnalysisUsage &AU)
const override;
818 AArch64PreLegalizerCombinerImplRuleConfig RuleConfig;
822void AArch64PreLegalizerCombiner::getAnalysisUsage(AnalysisUsage &AU)
const {
826 AU.
addRequired<GISelValueTrackingAnalysisLegacy>();
835AArch64PreLegalizerCombiner::AArch64PreLegalizerCombiner()
836 : MachineFunctionPass(
ID) {
837 if (!RuleConfig.parseCommandLineOption())
841bool AArch64PreLegalizerCombiner::runOnMachineFunction(
MachineFunction &MF) {
844 auto &TPC = getAnalysis<TargetPassConfig>();
848 getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
849 auto *CSEInfo = &
Wrapper.get(TPC.getCSEConfig());
852 const auto *LI =
ST.getLegalizerInfo();
858 &getAnalysis<GISelValueTrackingAnalysisLegacy>().get(MF);
860 &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
862 nullptr, EnableOpt,
F.hasOptSize(),
865 CInfo.MaxIterations = 1;
869 CInfo.EnableFullDCE =
true;
870 AArch64PreLegalizerCombinerImpl Impl(MF, CInfo, &TPC, *VT, CSEInfo,
871 RuleConfig, ST, MDT, LI);
872 return Impl.combineMachineInstrs();
875char AArch64PreLegalizerCombiner::ID = 0;
877 "Combine AArch64 machine instrs before legalization",
883 "Combine AArch64 machine instrs before legalization",
false,
888 return new AArch64PreLegalizerCombiner();
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
#define GET_GICOMBINER_CONSTRUCTOR_INITS
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Provides analysis for continuously CSEing during GISel passes.
This contains common combine transformations that may be used in a combine pass,or by the target else...
Option class for Targets to specify which operations are combined how and when.
This contains the base class for all Combiners generated by TableGen.
Provides analysis for querying information about KnownBits during GISel passes.
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
static StringRef getName(Value *V)
Target-Independent Code Generator Pass Configuration Options pass.
unsigned ClassifyGlobalReference(const GlobalValue *GV, const TargetMachine &TM) const
ClassifyGlobalReference - Find the target operand flags that describe how a global value should be re...
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const
MachineRegisterInfo::replaceRegWith() and inform the observer of the changes.
bool tryEmitMemcpyInline(MachineInstr &MI) const
Emit loads and stores that perform the given memcpy.
bool tryCombineShuffleVector(MachineInstr &MI) const
Try to combine G_SHUFFLE_VECTOR into G_CONCAT_VECTORS.
bool tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen=0) const
Optimize memcpy intrinsics et al, e.g.
FunctionPass class - This class is used to implement most global optimizations.
The actual analysis pass wrapper.
Simple wrapper that does the following.
Abstract class that contains various methods for clients to notify about changes.
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.
virtual void erasingInstr(MachineInstr &MI)=0
An instruction is about to be erased.
To use KnownBitsInfo analysis in a pass, KnownBitsInfo &Info = getAnalysis<GISelValueTrackingInfoAnal...
unsigned computeNumSignBits(Register R, const APInt &DemandedElts, unsigned Depth=0)
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
constexpr unsigned getScalarSizeInBits() const
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr LLT changeElementSize(unsigned NewEltSize) const
If this type is a vector, return a vector with the same number of elements but the new element size.
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Helper class to build MachineInstr.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
MachineBasicBlock * getMBB() const
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
Target-Independent Code Generator Pass Configuration Options.
The instances of the Type class are immutable: once they are created, they are never changed.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool tryEmitBZero(MachineInstr &MI, MachineIRBuilder &MIRBuilder, bool MinSize)
Replace a G_MEMSET with a value of 0 with a G_BZERO instruction if it is supported and beneficial to ...
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
operand_type_match m_Reg()
SpecificConstantMatch m_SpecificICst(const APInt &RequestedValue)
Matches a constant equal to RequestedValue.
UnaryOp_match< SrcTy, TargetOpcode::G_ZEXT > m_GZExt(const SrcTy &Src)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
UnaryOp_match< SrcTy, TargetOpcode::G_TRUNC > m_GTrunc(const SrcTy &Src)
This is an optimization pass for GlobalISel generic memory operations.
FunctionPass * createAArch64PreLegalizerCombiner()
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
LLVM_ABI MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
LLVM_ABI void extractParts(Register Reg, LLT Ty, int NumParts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
Helper function to split a wide generic register into bitwise blocks with the given Type (which impli...
LLVM_ABI void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
@ SinglePass
Enables Observer-based DCE and additional heuristics that retry combining defined and used instructio...