32#define GET_GICOMBINER_DEPS
33#include "AArch64GenPreLegalizeGICombiner.inc"
34#undef GET_GICOMBINER_DEPS
36#define DEBUG_TYPE "aarch64-prelegalizer-combiner"
39using namespace MIPatternMatch;
43#define GET_GICOMBINER_TYPES
44#include "AArch64GenPreLegalizeGICombiner.inc"
45#undef GET_GICOMBINER_TYPES
50 assert(
MI.getOpcode() == TargetOpcode::G_FCONSTANT);
52 const unsigned DstSize =
MRI.getType(DstReg).getSizeInBits();
53 if (DstSize != 32 && DstSize != 64)
59 return all_of(
MRI.use_nodbg_instructions(DstReg),
65 assert(
MI.getOpcode() == TargetOpcode::G_FCONSTANT);
67 const APFloat &ImmValAPF =
MI.getOperand(1).getFPImm()->getValueAPF();
77 assert(
MI.getOpcode() == TargetOpcode::G_ICMP && VT);
84 LLT LHSTy =
MRI.getType(LHS);
95 LLT WideTy =
MRI.getType(WideReg);
107 assert(
MI.getOpcode() == TargetOpcode::G_ICMP);
109 LLT WideTy =
MRI.getType(WideReg);
115 MI.getOperand(2).setReg(WideReg);
116 MI.getOperand(3).setReg(WideZero.getReg(0));
126 std::pair<uint64_t, uint64_t> &MatchInfo) {
127 assert(
MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
129 auto &GlobalOp =
MI.getOperand(1);
130 auto *GV = GlobalOp.getGlobal();
131 if (GV->isThreadLocal())
155 for (
auto &UseInstr :
MRI.use_nodbg_instructions(Dst)) {
156 if (UseInstr.getOpcode() != TargetOpcode::G_PTR_ADD)
159 UseInstr.getOperand(2).getReg(),
MRI);
162 MinOffset = std::min(MinOffset, Cst->Value.getZExtValue());
167 uint64_t CurrOffset = GlobalOp.getOffset();
168 uint64_t NewOffset = MinOffset + CurrOffset;
169 if (NewOffset <= CurrOffset)
181 if (NewOffset >= (1 << 20))
184 Type *
T = GV->getValueType();
186 NewOffset > GV->getDataLayout().getTypeAllocSize(
T))
188 MatchInfo = std::make_pair(NewOffset, MinOffset);
194 std::pair<uint64_t, uint64_t> &MatchInfo) {
216 std::tie(
Offset, MinOffset) = MatchInfo;
217 B.setInstrAndDebugLoc(*std::next(
MI.getIterator()));
219 auto &GlobalOp =
MI.getOperand(1);
220 auto *GV = GlobalOp.getGlobal();
221 GlobalOp.ChangeToGA(GV,
Offset, GlobalOp.getTargetFlags());
224 MI.getOperand(0).setReg(NewGVDst);
228 B.buildConstant(
LLT::scalar(64), -
static_cast<int64_t
>(MinOffset)));
237 std::tuple<Register, Register, bool> &MatchInfo) {
238 assert(
MI.getOpcode() == TargetOpcode::G_VECREDUCE_ADD &&
239 "Expected a G_VECREDUCE_ADD instruction");
240 assert(STI.hasDotProd() &&
"Target should have Dot Product feature");
245 LLT DstTy =
MRI.getType(DstReg);
246 LLT MidTy =
MRI.getType(MidReg);
257 if (!
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
269 if ((I1Opc == TargetOpcode::G_ZEXT || I1Opc == TargetOpcode::G_SEXT) &&
279 unsigned I1Opc =
I1->getOpcode();
280 if (I1Opc == TargetOpcode::G_MUL) {
282 if (!tryMatchingMulOfExt(I1, Out1, Out2, I1Opc))
284 SrcTy =
MRI.getType(Out1);
285 std::get<0>(MatchInfo) = Out1;
286 std::get<1>(MatchInfo) = Out2;
287 }
else if (I1Opc == TargetOpcode::G_ZEXT || I1Opc == TargetOpcode::G_SEXT) {
291 if (
M->getOpcode() == TargetOpcode::G_MUL &&
292 tryMatchingMulOfExt(M, Out1, Out2, I1Opc)) {
293 SrcTy =
MRI.getType(Out1);
294 std::get<0>(MatchInfo) = Out1;
295 std::get<1>(MatchInfo) = Out2;
297 SrcTy =
MRI.getType(I1Op);
298 std::get<0>(MatchInfo) = I1Op;
299 std::get<1>(MatchInfo) = 0;
305 if (I1Opc == TargetOpcode::G_ZEXT)
306 std::get<2>(MatchInfo) = 0;
307 else if (I1Opc == TargetOpcode::G_SEXT)
308 std::get<2>(MatchInfo) = 1;
322 std::tuple<Register, Register, bool> &MatchInfo) {
323 assert(
MI.getOpcode() == TargetOpcode::G_VECREDUCE_ADD &&
324 "Expected a G_VECREDUCE_ADD instruction");
325 assert(STI.hasDotProd() &&
"Target should have Dot Product feature");
329 std::get<2>(MatchInfo) ? AArch64::G_SDOT : AArch64::G_UDOT;
330 Register Ext1SrcReg = std::get<0>(MatchInfo);
335 if (std::get<1>(MatchInfo) == 0)
340 Ext2SrcReg = std::get<1>(MatchInfo);
343 LLT SrcTy =
MRI.getType(Ext1SrcReg);
357 if (NumOfDotMI == 1) {
359 auto Dot = Builder.
buildInstr(DotOpcode, {MidTy},
360 {Zeroes, Ext1SrcReg, Ext2SrcReg});
372 LLT LeftoverTy1, LeftoverTy2;
374 LeftoverTy1, Ext1UnmergeReg, Leftover1, Builder,
377 LeftoverTy2, Ext2UnmergeReg, Leftover2, Builder,
390 {Leftover1[0], v8Zeroes})
395 {Leftover2[0], v8Zeroes})
402 Ext1UnmergeReg, Builder,
MRI);
404 Ext2UnmergeReg, Builder,
MRI);
409 unsigned NumElements = 0;
410 for (
unsigned i = 0; i < Ext1UnmergeReg.
size(); i++) {
413 if (
MRI.getType(Ext1UnmergeReg[i]).getNumElements() == 16) {
423 .buildInstr(DotOpcode, {
MRI.getType(Zeroes)},
424 {Zeroes, Ext1UnmergeReg[i], Ext2UnmergeReg[i]})
434 ConcatMI->getOperand(0).getReg());
438 MI.eraseFromParent();
444 std::pair<Register, bool> &MatchInfo) {
445 assert(
MI.getOpcode() == TargetOpcode::G_VECREDUCE_ADD &&
446 "Expected G_VECREDUCE_ADD Opcode");
452 if (ExtOpc == TargetOpcode::G_ZEXT)
453 std::get<1>(MatchInfo) = 0;
454 else if (ExtOpc == TargetOpcode::G_SEXT)
455 std::get<1>(MatchInfo) = 1;
461 LLT ExtSrcTy =
MRI.getType(ExtSrcReg);
462 LLT DstTy =
MRI.getType(
MI.getOperand(0).getReg());
469 std::get<0>(MatchInfo) = ExtSrcReg;
477 std::pair<Register, bool> &MatchInfo) {
478 assert(
MI.getOpcode() == TargetOpcode::G_VECREDUCE_ADD &&
479 "Expected G_VECREDUCE_ADD Opcode");
481 unsigned Opc = std::get<1>(MatchInfo) ? AArch64::G_SADDLV : AArch64::G_UADDLV;
482 Register SrcReg = std::get<0>(MatchInfo);
484 LLT SrcTy =
MRI.getType(SrcReg);
485 LLT DstTy =
MRI.getType(DstReg);
493 if ((SrcScalSize == 8 && SrcNumElem > 16) ||
494 (SrcScalSize == 16 && SrcNumElem > 8) ||
495 (SrcScalSize == 32 && SrcNumElem > 4)) {
499 if (SrcScalSize == 8)
501 else if (SrcScalSize == 16)
503 else if (SrcScalSize == 32)
510 extractParts(SrcReg, SrcTy, MainTy, LeftoverTy, WorkingRegisters,
511 LeftoverRegs,
B,
MRI);
521 for (
unsigned I = 0;
I < WorkingRegisters.
size();
I++) {
524 LLT WorkingRegTy =
MRI.getType(WorkingRegisters[
I]);
527 WorkingRegisters[
I] =
528 B.buildInstr(std::get<1>(MatchInfo) ? TargetOpcode::G_SEXT
529 : TargetOpcode::G_ZEXT,
537 :
LLT::fixed_vector(2, 64);
539 B.buildInstr(
Opc, {addlvTy}, {WorkingRegisters[
I]}).
getReg(0);
546 if (MidScalarSize == 32 || MidScalarSize == 64) {
547 WorkingRegisters[
I] =
B.buildInstr(AArch64::G_EXTRACT_VECTOR_ELT,
548 {MidScalarLLT}, {addlvReg, zeroReg})
551 Register extractReg =
B.buildInstr(AArch64::G_EXTRACT_VECTOR_ELT,
554 WorkingRegisters[
I] =
555 B.buildTrunc({MidScalarLLT}, {extractReg}).
getReg(0);
560 if (WorkingRegisters.
size() > 1) {
561 outReg =
B.buildAdd(MidScalarLLT, WorkingRegisters[0], WorkingRegisters[1])
563 for (
unsigned I = 2;
I < WorkingRegisters.
size();
I++) {
564 outReg =
B.buildAdd(MidScalarLLT, outReg, WorkingRegisters[
I]).getReg(0);
567 outReg = WorkingRegisters[0];
573 B.buildInstr(std::get<1>(MatchInfo) ? TargetOpcode::G_SEXT
574 : TargetOpcode::G_ZEXT,
577 B.buildCopy(DstReg, outReg);
580 MI.eraseFromParent();
588 assert((
MI.getOpcode() == TargetOpcode::G_ADD ||
589 MI.getOpcode() == TargetOpcode::G_SUB ||
590 MI.getOpcode() == TargetOpcode::G_MUL) &&
591 "Expected a G_ADD, G_SUB or G_MUL instruction\n");
594 LLT DstTy =
MRI.getType(DstReg);
599 Register ExtDstReg =
MI.getOperand(1).getReg();
600 LLT Ext1SrcTy =
MRI.getType(SrcReg1);
601 LLT Ext2SrcTy =
MRI.getType(SrcReg2);
602 unsigned ExtDstScal =
MRI.getType(ExtDstReg).getScalarSizeInBits();
604 if (((Ext1SrcScal == 8 && ExtDstScal == 32) ||
605 ((Ext1SrcScal == 8 || Ext1SrcScal == 16) && ExtDstScal == 64)) &&
606 Ext1SrcTy == Ext2SrcTy)
615 LLT SrcTy =
MRI.getType(SrcReg1);
617 unsigned Opc = isSExt ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT;
621 B.buildInstr(
MI.getOpcode(), {MidTy}, {Ext1Reg, Ext2Reg}).getReg(0);
626 if (
MI.getOpcode() == TargetOpcode::G_ADD ||
627 MI.getOpcode() == TargetOpcode::G_MUL)
628 B.buildInstr(
Opc, {DstReg}, {AddReg});
630 B.buildSExt(DstReg, AddReg);
632 MI.eraseFromParent();
666 auto &
MRI = *
B.getMRI();
675 LLT WideTy0 =
MRI.getType(Op0Wide);
676 LLT WideTy1 =
MRI.getType(Op1Wide);
678 LLT OpTy =
MRI.getType(ResVal);
685 if (Op0WideDef->
getOpcode() != TargetOpcode::G_ASSERT_ZEXT ||
686 Op1WideDef->
getOpcode() != TargetOpcode::G_ASSERT_ZEXT ||
694 (OpTySize != 8 && OpTySize != 16))
698 Register ResStatus =
MI.getOperand(1).getReg();
699 if (!
MRI.hasOneNonDBGUse(ResStatus))
702 if (CondUser->
getOpcode() != TargetOpcode::G_BRCOND)
710 if (
any_of(
MRI.use_nodbg_instructions(ResVal),
713 (I.getParent() == FailMBB || I.getParent() == CurrentMBB);
718 B.setInstrAndDebugLoc(*
MI.getNextNode());
719 MI.eraseFromParent();
722 Register AddDst =
MRI.cloneVirtualRegister(Op0Wide);
723 B.buildInstr(TargetOpcode::G_ADD, {AddDst}, {Op0Wide, Op1Wide});
727 Register CondBit =
MRI.cloneVirtualRegister(Op0Wide);
730 B.buildConstant(
LLT::scalar(32), OpTySize == 8 ? 1 << 8 : 1 << 16));
736 B.buildZExtOrTrunc(ResVal, AddDst);
740 auto OldR =
U.getParent()->getOperand(0).getReg();
742 U.getParent()->eraseFromParent();
750class AArch64PreLegalizerCombinerImpl :
public Combiner {
753 const AArch64PreLegalizerCombinerImplRuleConfig &RuleConfig;
757 AArch64PreLegalizerCombinerImpl(
760 const AArch64PreLegalizerCombinerImplRuleConfig &RuleConfig,
764 static const char *
getName() {
return "AArch6400PreLegalizerCombiner"; }
771#define GET_GICOMBINER_CLASS_MEMBERS
772#include "AArch64GenPreLegalizeGICombiner.inc"
773#undef GET_GICOMBINER_CLASS_MEMBERS
776#define GET_GICOMBINER_IMPL
777#include "AArch64GenPreLegalizeGICombiner.inc"
778#undef GET_GICOMBINER_IMPL
780AArch64PreLegalizerCombinerImpl::AArch64PreLegalizerCombinerImpl(
783 const AArch64PreLegalizerCombinerImplRuleConfig &RuleConfig,
786 :
Combiner(MF, CInfo, TPC, &VT, CSEInfo),
787 Helper(Observer,
B,
true, &VT, MDT, LI),
788 RuleConfig(RuleConfig), STI(STI),
790#include
"AArch64GenPreLegalizeGICombiner.inc"
795bool AArch64PreLegalizerCombinerImpl::tryCombineAll(
MachineInstr &
MI)
const {
796 if (tryCombineAllImpl(
MI))
799 unsigned Opc =
MI.getOpcode();
801 case TargetOpcode::G_SHUFFLE_VECTOR:
803 case TargetOpcode::G_UADDO:
804 return tryToSimplifyUADDO(
MI,
B, Helper, Observer);
805 case TargetOpcode::G_MEMCPY_INLINE:
807 case TargetOpcode::G_MEMCPY:
808 case TargetOpcode::G_MEMMOVE:
809 case TargetOpcode::G_MEMSET: {
812 unsigned MaxLen = CInfo.EnableOpt ? 0 : 32;
816 if (
Opc == TargetOpcode::G_MEMSET)
832 AArch64PreLegalizerCombiner();
835 return "AArch64PreLegalizerCombiner";
843 AArch64PreLegalizerCombinerImplRuleConfig RuleConfig;
847void AArch64PreLegalizerCombiner::getAnalysisUsage(
AnalysisUsage &AU)
const {
860AArch64PreLegalizerCombiner::AArch64PreLegalizerCombiner()
862 if (!RuleConfig.parseCommandLineOption())
866bool AArch64PreLegalizerCombiner::runOnMachineFunction(
MachineFunction &MF) {
869 auto &TPC = getAnalysis<TargetPassConfig>();
873 getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
874 auto *CSEInfo = &
Wrapper.get(TPC.getCSEConfig());
877 const auto *LI =
ST.getLegalizerInfo();
883 &getAnalysis<GISelValueTrackingAnalysisLegacy>().get(MF);
885 &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
887 nullptr, EnableOpt,
F.hasOptSize(),
890 CInfo.MaxIterations = 1;
891 CInfo.ObserverLvl = CombinerInfo::ObserverLevel::SinglePass;
894 CInfo.EnableFullDCE =
true;
895 AArch64PreLegalizerCombinerImpl Impl(MF, CInfo, &TPC, *VT, CSEInfo,
896 RuleConfig, ST, MDT, LI);
897 return Impl.combineMachineInstrs();
900char AArch64PreLegalizerCombiner::ID = 0;
902 "Combine AArch64 machine instrs before legalization",
913 return new AArch64PreLegalizerCombiner();
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
#define GET_GICOMBINER_CONSTRUCTOR_INITS
Combine AArch64 machine instrs before legalization
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Provides analysis for continuously CSEing during GISel passes.
This contains common combine transformations that may be used in a combine pass,or by the target else...
Option class for Targets to specify which operations are combined how and when.
This contains the base class for all Combiners generated by TableGen.
Provides analysis for querying information about KnownBits during GISel passes.
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
static StringRef getName(Value *V)
Target-Independent Code Generator Pass Configuration Options pass.
unsigned ClassifyGlobalReference(const GlobalValue *GV, const TargetMachine &TM) const
ClassifyGlobalReference - Find the target operand flags that describe how a global value should be re...
APInt bitcastToAPInt() const
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const
MachineRegisterInfo::replaceRegWith() and inform the observer of the changes.
bool tryEmitMemcpyInline(MachineInstr &MI) const
Emit loads and stores that perform the given memcpy.
bool tryCombineShuffleVector(MachineInstr &MI) const
Try to combine G_SHUFFLE_VECTOR into G_CONCAT_VECTORS.
bool tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen=0) const
Optimize memcpy intrinsics et al, e.g.
virtual bool tryCombineAll(MachineInstr &I) const =0
FunctionPass class - This class is used to implement most global optimizations.
The actual analysis pass wrapper.
Simple wrapper that does the following.
Abstract class that contains various methods for clients to notify about changes.
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.
virtual void erasingInstr(MachineInstr &MI)=0
An instruction is about to be erased.
To use KnownBitsInfo analysis in a pass, KnownBitsInfo &Info = getAnalysis<GISelValueTrackingInfoAnal...
unsigned computeNumSignBits(Register R, const APInt &DemandedElts, unsigned Depth=0)
bool isEquality() const
Return true if this predicate is either EQ or NE.
constexpr unsigned getScalarSizeInBits() const
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr LLT changeElementSize(unsigned NewEltSize) const
If this type is a vector, return a vector with the same number of elements but the new element size.
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
Analysis pass which computes a MachineDominatorTree.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Helper class to build MachineInstr.
MachineInstrBuilder buildConcatVectors(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_CONCAT_VECTORS Op0, ...
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
void setInstrAndDebugLoc(MachineInstr &MI)
Set the insertion point to before MI, and set the debug loc to MI's loc.
MachineInstrBuilder buildVecReduceAdd(const DstOp &Dst, const SrcOp &Src)
Build and insert Res = G_VECREDUCE_ADD Src.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
MachineBasicBlock * getMBB() const
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Wrapper class representing virtual and physical registers.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
Target-Independent Code Generator Pass Configuration Options.
The instances of the Type class are immutable: once they are created, they are never changed.
A Use represents the edge between a Value definition and its users.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool tryEmitBZero(MachineInstr &MI, MachineIRBuilder &MIRBuilder, bool MinSize)
Replace a G_MEMSET with a value of 0 with a G_BZERO instruction if it is supported and beneficial to ...
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
operand_type_match m_Reg()
UnaryOp_match< SrcTy, TargetOpcode::G_ZEXT > m_GZExt(const SrcTy &Src)
SpecificConstantMatch m_SpecificICst(APInt RequestedValue)
Matches a constant equal to RequestedValue.
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
UnaryOp_match< SrcTy, TargetOpcode::G_TRUNC > m_GTrunc(const SrcTy &Src)
This is an optimization pass for GlobalISel generic memory operations.
FunctionPass * createAArch64PreLegalizerCombiner()
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
LLVM_ABI MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
LLVM_ABI void extractParts(Register Reg, LLT Ty, int NumParts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
Helper function to split a wide generic register into bitwise blocks with the given Type (which impli...
LLVM_ABI void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
auto instrs(const MachineBasicBlock &BB)