54#define DEBUG_TYPE "x86-avoid-sfb"
61 "x86-sfb-inspection-limit",
62 cl::desc(
"X86: Number of instructions backward to "
63 "inspect for store forwarding blocks."),
68using DisplacementSizeMap = std::map<int64_t, unsigned>;
70class X86AvoidSFBImpl {
73 bool runOnMachineFunction(MachineFunction &MF);
76 MachineRegisterInfo *MRI =
nullptr;
77 const X86InstrInfo *TII =
nullptr;
78 const X86RegisterInfo *TRI =
nullptr;
80 BlockedLoadsStoresPairs;
81 SmallVector<MachineInstr *, 2> ForRemoval;
86 void findPotentiallylBlockedCopies(MachineFunction &MF);
90 void breakBlockedCopies(MachineInstr *LoadInst, MachineInstr *StoreInst,
91 const DisplacementSizeMap &BlockingStoresDispSizeMap);
93 void buildCopies(
int Size, MachineInstr *LoadInst, int64_t LdDispImm,
94 MachineInstr *StoreInst, int64_t StDispImm,
95 int64_t LMMOffset, int64_t SMMOffset);
97 void buildCopy(MachineInstr *LoadInst,
unsigned NLoadOpcode, int64_t LoadDisp,
98 MachineInstr *StoreInst,
unsigned NStoreOpcode,
99 int64_t StoreDisp,
unsigned Size, int64_t LMMOffset,
102 bool alias(
const MachineMemOperand &Op1,
const MachineMemOperand &Op2)
const;
104 unsigned getRegSizeInBytes(MachineInstr *Inst);
110 X86AvoidSFBLegacy() : MachineFunctionPass(ID) {}
112 StringRef getPassName()
const override {
113 return "X86 Avoid Store Forwarding Blocks";
116 bool runOnMachineFunction(MachineFunction &MF)
override;
118 void getAnalysisUsage(AnalysisUsage &AU)
const override {
126char X86AvoidSFBLegacy::ID = 0;
135 return new X86AvoidSFBLegacy();
139 return Opcode == X86::MOVUPSrm || Opcode == X86::MOVAPSrm ||
140 Opcode == X86::VMOVUPSrm || Opcode == X86::VMOVAPSrm ||
141 Opcode == X86::VMOVUPDrm || Opcode == X86::VMOVAPDrm ||
142 Opcode == X86::VMOVDQUrm || Opcode == X86::VMOVDQArm ||
143 Opcode == X86::VMOVUPSZ128rm || Opcode == X86::VMOVAPSZ128rm ||
144 Opcode == X86::VMOVUPDZ128rm || Opcode == X86::VMOVAPDZ128rm ||
145 Opcode == X86::VMOVDQU64Z128rm || Opcode == X86::VMOVDQA64Z128rm ||
146 Opcode == X86::VMOVDQU32Z128rm || Opcode == X86::VMOVDQA32Z128rm;
149 return Opcode == X86::VMOVUPSYrm || Opcode == X86::VMOVAPSYrm ||
150 Opcode == X86::VMOVUPDYrm || Opcode == X86::VMOVAPDYrm ||
151 Opcode == X86::VMOVDQUYrm || Opcode == X86::VMOVDQAYrm ||
152 Opcode == X86::VMOVUPSZ256rm || Opcode == X86::VMOVAPSZ256rm ||
153 Opcode == X86::VMOVUPDZ256rm || Opcode == X86::VMOVAPDZ256rm ||
154 Opcode == X86::VMOVDQU64Z256rm || Opcode == X86::VMOVDQA64Z256rm ||
155 Opcode == X86::VMOVDQU32Z256rm || Opcode == X86::VMOVDQA32Z256rm;
166 return StOpcode == X86::MOVUPSmr || StOpcode == X86::MOVAPSmr;
169 return StOpcode == X86::VMOVUPSmr || StOpcode == X86::VMOVAPSmr;
172 return StOpcode == X86::VMOVUPDmr || StOpcode == X86::VMOVAPDmr;
175 return StOpcode == X86::VMOVDQUmr || StOpcode == X86::VMOVDQAmr;
176 case X86::VMOVUPSZ128rm:
177 case X86::VMOVAPSZ128rm:
178 return StOpcode == X86::VMOVUPSZ128mr || StOpcode == X86::VMOVAPSZ128mr;
179 case X86::VMOVUPDZ128rm:
180 case X86::VMOVAPDZ128rm:
181 return StOpcode == X86::VMOVUPDZ128mr || StOpcode == X86::VMOVAPDZ128mr;
182 case X86::VMOVUPSYrm:
183 case X86::VMOVAPSYrm:
184 return StOpcode == X86::VMOVUPSYmr || StOpcode == X86::VMOVAPSYmr;
185 case X86::VMOVUPDYrm:
186 case X86::VMOVAPDYrm:
187 return StOpcode == X86::VMOVUPDYmr || StOpcode == X86::VMOVAPDYmr;
188 case X86::VMOVDQUYrm:
189 case X86::VMOVDQAYrm:
190 return StOpcode == X86::VMOVDQUYmr || StOpcode == X86::VMOVDQAYmr;
191 case X86::VMOVUPSZ256rm:
192 case X86::VMOVAPSZ256rm:
193 return StOpcode == X86::VMOVUPSZ256mr || StOpcode == X86::VMOVAPSZ256mr;
194 case X86::VMOVUPDZ256rm:
195 case X86::VMOVAPDZ256rm:
196 return StOpcode == X86::VMOVUPDZ256mr || StOpcode == X86::VMOVAPDZ256mr;
197 case X86::VMOVDQU64Z128rm:
198 case X86::VMOVDQA64Z128rm:
199 return StOpcode == X86::VMOVDQU64Z128mr || StOpcode == X86::VMOVDQA64Z128mr;
200 case X86::VMOVDQU32Z128rm:
201 case X86::VMOVDQA32Z128rm:
202 return StOpcode == X86::VMOVDQU32Z128mr || StOpcode == X86::VMOVDQA32Z128mr;
203 case X86::VMOVDQU64Z256rm:
204 case X86::VMOVDQA64Z256rm:
205 return StOpcode == X86::VMOVDQU64Z256mr || StOpcode == X86::VMOVDQA64Z256mr;
206 case X86::VMOVDQU32Z256rm:
207 case X86::VMOVDQA32Z256rm:
208 return StOpcode == X86::VMOVDQU32Z256mr || StOpcode == X86::VMOVDQA32Z256mr;
216 PBlock |= Opcode == X86::MOV64mr || Opcode == X86::MOV64mi32 ||
217 Opcode == X86::MOV32mr || Opcode == X86::MOV32mi ||
218 Opcode == X86::MOV16mr || Opcode == X86::MOV16mi ||
219 Opcode == X86::MOV8mr || Opcode == X86::MOV8mi;
221 PBlock |= Opcode == X86::VMOVUPSmr || Opcode == X86::VMOVAPSmr ||
222 Opcode == X86::VMOVUPDmr || Opcode == X86::VMOVAPDmr ||
223 Opcode == X86::VMOVDQUmr || Opcode == X86::VMOVDQAmr ||
224 Opcode == X86::VMOVUPSZ128mr || Opcode == X86::VMOVAPSZ128mr ||
225 Opcode == X86::VMOVUPDZ128mr || Opcode == X86::VMOVAPDZ128mr ||
226 Opcode == X86::VMOVDQU64Z128mr ||
227 Opcode == X86::VMOVDQA64Z128mr ||
228 Opcode == X86::VMOVDQU32Z128mr || Opcode == X86::VMOVDQA32Z128mr;
239 switch (LoadOpcode) {
240 case X86::VMOVUPSYrm:
241 case X86::VMOVAPSYrm:
242 return X86::VMOVUPSrm;
243 case X86::VMOVUPDYrm:
244 case X86::VMOVAPDYrm:
245 return X86::VMOVUPDrm;
246 case X86::VMOVDQUYrm:
247 case X86::VMOVDQAYrm:
248 return X86::VMOVDQUrm;
249 case X86::VMOVUPSZ256rm:
250 case X86::VMOVAPSZ256rm:
251 return X86::VMOVUPSZ128rm;
252 case X86::VMOVUPDZ256rm:
253 case X86::VMOVAPDZ256rm:
254 return X86::VMOVUPDZ128rm;
255 case X86::VMOVDQU64Z256rm:
256 case X86::VMOVDQA64Z256rm:
257 return X86::VMOVDQU64Z128rm;
258 case X86::VMOVDQU32Z256rm:
259 case X86::VMOVDQA32Z256rm:
260 return X86::VMOVDQU32Z128rm;
268 switch (StoreOpcode) {
269 case X86::VMOVUPSYmr:
270 case X86::VMOVAPSYmr:
271 return X86::VMOVUPSmr;
272 case X86::VMOVUPDYmr:
273 case X86::VMOVAPDYmr:
274 return X86::VMOVUPDmr;
275 case X86::VMOVDQUYmr:
276 case X86::VMOVDQAYmr:
277 return X86::VMOVDQUmr;
278 case X86::VMOVUPSZ256mr:
279 case X86::VMOVAPSZ256mr:
280 return X86::VMOVUPSZ128mr;
281 case X86::VMOVUPDZ256mr:
282 case X86::VMOVAPDZ256mr:
283 return X86::VMOVUPDZ128mr;
284 case X86::VMOVDQU64Z256mr:
285 case X86::VMOVDQA64Z256mr:
286 return X86::VMOVDQU64Z128mr;
287 case X86::VMOVDQU32Z256mr:
288 case X86::VMOVDQA32Z256mr:
289 return X86::VMOVDQU32Z128mr;
299 assert(AddrOffset != -1 &&
"Expected Memory Operand");
325 if (!((
Base.isReg() &&
Base.getReg() != X86::NoRegister) ||
Base.isFI()))
331 if (!(Index.isReg() && Index.getReg() == X86::NoRegister))
333 if (!(Segment.
isReg() && Segment.
getReg() == X86::NoRegister))
346 unsigned BlockCount = 0;
350 PBInst !=
E; ++PBInst) {
351 if (PBInst->isMetaInstruction())
354 if (BlockCount >= InspectionLimit)
357 if (
MI.getDesc().isCall())
358 return PotentialBlockers;
365 if (BlockCount < InspectionLimit) {
367 int LimitLeft = InspectionLimit - BlockCount;
371 if (PBInst.isMetaInstruction())
374 if (PredCount >= LimitLeft)
376 if (PBInst.getDesc().isCall())
382 return PotentialBlockers;
387 unsigned NStoreOpcode, int64_t StoreDisp,
388 unsigned Size, int64_t LMMOffset,
397 MRI->createVirtualRegister(
TII->getRegClass(
TII->get(NLoadOpcode), 0));
398 MachineInstr *NewLoad =
408 if (LoadBase.
isReg())
413 MachineInstr *StInst = StoreInst;
416 if (PrevInstrIt.getNodePtr() == LoadInst)
418 MachineInstr *NewStore =
428 if (StoreBase.
isReg())
431 assert(StoreSrcVReg.
isReg() &&
"Expected virtual register");
436void X86AvoidSFBImpl::buildCopies(
int Size, MachineInstr *LoadInst,
437 int64_t LdDispImm, MachineInstr *StoreInst,
438 int64_t StDispImm, int64_t LMMOffset,
440 int LdDisp = LdDispImm;
441 int StDisp = StDispImm;
447 StDisp,
MOV128SZ, LMMOffset, SMMOffset);
456 buildCopy(LoadInst, X86::MOV64rm, LdDisp, StoreInst, X86::MOV64mr, StDisp,
457 MOV64SZ, LMMOffset, SMMOffset);
466 buildCopy(LoadInst, X86::MOV32rm, LdDisp, StoreInst, X86::MOV32mr, StDisp,
467 MOV32SZ, LMMOffset, SMMOffset);
476 buildCopy(LoadInst, X86::MOV16rm, LdDisp, StoreInst, X86::MOV16mr, StDisp,
477 MOV16SZ, LMMOffset, SMMOffset);
486 buildCopy(LoadInst, X86::MOV8rm, LdDisp, StoreInst, X86::MOV8mr, StDisp,
487 MOV8SZ, LMMOffset, SMMOffset);
501 auto *StorePrevNonDbgInstr =
505 if (LoadBase.
isReg()) {
511 if (StorePrevNonDbgInstr ==
LoadInst)
515 if (StoreBase.
isReg()) {
517 if (StorePrevNonDbgInstr ==
LoadInst)
523bool X86AvoidSFBImpl::alias(
const MachineMemOperand &Op1,
524 const MachineMemOperand &Op2)
const {
537void X86AvoidSFBImpl::findPotentiallylBlockedCopies(MachineFunction &MF) {
539 for (
auto &
MI :
MBB) {
543 if (!
MRI->hasOneNonDBGUse(DefVR))
545 for (MachineOperand &StoreMO :
547 MachineInstr &StoreMI = *StoreMO.getParent();
555 BlockedLoadsStoresPairs.push_back(std::make_pair(&
MI, &StoreMI));
561unsigned X86AvoidSFBImpl::getRegSizeInBytes(MachineInstr *LoadInst) {
562 const auto *TRC =
TII->getRegClass(
TII->get(LoadInst->
getOpcode()), 0);
563 return TRI->getRegSizeInBits(*TRC) / 8;
566void X86AvoidSFBImpl::breakBlockedCopies(
567 MachineInstr *LoadInst, MachineInstr *StoreInst,
568 const DisplacementSizeMap &BlockingStoresDispSizeMap) {
571 int64_t LMMOffset = 0;
572 int64_t SMMOffset = 0;
574 int64_t LdDisp1 = LdDispImm;
576 int64_t StDisp1 = StDispImm;
580 int64_t LdStDelta = StDispImm - LdDispImm;
582 for (
auto DispSizePair : BlockingStoresDispSizeMap) {
583 LdDisp2 = DispSizePair.first;
584 StDisp2 = DispSizePair.first + LdStDelta;
585 Size2 = DispSizePair.second;
587 if (LdDisp2 < LdDisp1) {
588 int OverlapDelta = LdDisp1 - LdDisp2;
589 LdDisp2 += OverlapDelta;
590 StDisp2 += OverlapDelta;
591 Size2 -= OverlapDelta;
593 Size1 = LdDisp2 - LdDisp1;
597 buildCopies(Size1, LoadInst, LdDisp1, StoreInst, StDisp1, LMMOffset,
600 buildCopies(Size2, LoadInst, LdDisp2, StoreInst, StDisp2, LMMOffset + Size1,
602 LdDisp1 = LdDisp2 + Size2;
603 StDisp1 = StDisp2 + Size2;
604 LMMOffset += Size1 + Size2;
605 SMMOffset += Size1 + Size2;
607 unsigned Size3 = (LdDispImm + getRegSizeInBytes(LoadInst)) - LdDisp1;
608 buildCopies(Size3, LoadInst, LdDisp1, StoreInst, StDisp1, LMMOffset,
618 if (LoadBase.
isReg())
624 int64_t StoreDispImm,
unsigned StoreSize) {
625 return ((StoreDispImm >= LoadDispImm) &&
626 (StoreDispImm <= LoadDispImm + (LoadSize - StoreSize)));
632 int64_t DispImm,
unsigned Size) {
633 auto [It, Inserted] = BlockingStoresDispSizeMap.try_emplace(DispImm,
Size);
635 if (!Inserted && It->second >
Size)
642 if (BlockingStoresDispSizeMap.size() <= 1)
646 for (
auto DispSizePair : BlockingStoresDispSizeMap) {
647 int64_t CurrDisp = DispSizePair.first;
648 unsigned CurrSize = DispSizePair.second;
649 while (DispSizeStack.
size()) {
650 int64_t PrevDisp = DispSizeStack.
back().first;
651 unsigned PrevSize = DispSizeStack.
back().second;
652 if (CurrDisp + CurrSize > PrevDisp + PrevSize)
658 BlockingStoresDispSizeMap.
clear();
659 for (
auto Disp : DispSizeStack)
660 BlockingStoresDispSizeMap.insert(Disp);
663bool X86AvoidSFBImpl::runOnMachineFunction(MachineFunction &MF) {
671 assert(
MRI->isSSA() &&
"Expected MIR to be in SSA form");
676 findPotentiallylBlockedCopies(MF);
678 for (
auto LoadStoreInstPair : BlockedLoadsStoresPairs) {
679 MachineInstr *LoadInst = LoadStoreInstPair.first;
681 DisplacementSizeMap BlockingStoresDispSizeMap;
683 SmallVector<MachineInstr *, 2> PotentialBlockers =
685 for (
auto *PBInst : PotentialBlockers) {
691 unsigned PBstSize = (*PBInst->memoperands_begin())->getSize().getValue();
703 if (BlockingStoresDispSizeMap.empty())
709 MachineInstr *StoreInst = LoadStoreInstPair.second;
715 breakBlockedCopies(LoadInst, StoreInst, BlockingStoresDispSizeMap);
720 for (
auto *RemovedInst : ForRemoval) {
721 RemovedInst->eraseFromParent();
724 BlockedLoadsStoresPairs.clear();
730bool X86AvoidSFBLegacy::runOnMachineFunction(MachineFunction &MF) {
733 AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
734 X86AvoidSFBImpl Impl(AA);
735 return Impl.runOnMachineFunction(MF);
745 X86AvoidSFBImpl Impl(
AA);
746 bool Changed = Impl.runOnMachineFunction(MF);
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
const HexagonInstrInfo * TII
Register const TargetRegisterInfo * TRI
Promote Memory to Register
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
static unsigned getYMMtoXMMLoadOpcode(unsigned LoadOpcode)
static bool isPotentialBlockedMemCpyLd(unsigned Opcode)
static bool isPotentialBlockedMemCpyPair(unsigned LdOpcode, unsigned StOpcode)
static bool isPotentialBlockingStoreInst(unsigned Opcode, unsigned LoadOpcode)
static bool isXMMLoadOpcode(unsigned Opcode)
static int getAddrOffset(const MachineInstr *MI)
static cl::opt< unsigned > X86AvoidSFBInspectionLimit("x86-sfb-inspection-limit", cl::desc("X86: Number of instructions backward to " "inspect for store forwarding blocks."), cl::init(20), cl::Hidden)
static bool isBlockingStore(int64_t LoadDispImm, unsigned LoadSize, int64_t StoreDispImm, unsigned StoreSize)
static bool isRelevantAddressingMode(MachineInstr *MI)
static cl::opt< bool > DisableX86AvoidStoreForwardBlocks("x86-disable-avoid-SFB", cl::Hidden, cl::desc("X86: Disable Store Forwarding Blocks fixup."), cl::init(false))
static void removeRedundantBlockingStores(DisplacementSizeMap &BlockingStoresDispSizeMap)
static bool hasSameBaseOpValue(MachineInstr *LoadInst, MachineInstr *StoreInst)
static void updateBlockingStoresDispSizeMap(DisplacementSizeMap &BlockingStoresDispSizeMap, int64_t DispImm, unsigned Size)
static MachineOperand & getBaseOperand(MachineInstr *MI)
static unsigned getYMMtoXMMStoreOpcode(unsigned StoreOpcode)
static SmallVector< MachineInstr *, 2 > findPotentialBlockers(MachineInstr *LoadInst)
static void updateKillStatus(MachineInstr *LoadInst, MachineInstr *StoreInst)
static MachineOperand & getDispOperand(MachineInstr *MI)
static bool isYMMLoadOpcode(unsigned Opcode)
static const int MOV128SZ
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
bool isNoAlias(const MemoryLocation &LocA, const MemoryLocation &LocB)
A trivial helper function to check to see if the specified pointers are no-alias.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
AnalysisUsage & addRequired()
FunctionPass class - This class is used to implement most global optimizations.
An instruction for reading from memory.
TypeSize getValue() const
Describe properties that are true of each instruction in the target description file.
instr_iterator instr_begin()
Instructions::iterator instr_iterator
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void dump() const
const MachineOperand & getOperand(unsigned i) const
LocationSize getSize() const
Return the size in bytes of the memory reference.
AAMDNodes getAAInfo() const
Return the AA tags for the memory reference.
const Value * getValue() const
Return the base address of the memory access.
int64_t getOffset() const
For normal values, this is a byte offset added to the base address.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
Register getReg() const
getReg - Returns the register number.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Abstract Attribute helper functions.
int getMemoryOperandNo(uint64_t TSFlags)
unsigned getOperandBias(const MCInstrDesc &Desc)
Compute whether all of the def operands are repeated in the uses and therefore should be skipped.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
FunctionPass * createX86AvoidStoreForwardingBlocksLegacyPass()
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
AnalysisManager< MachineFunction > MachineFunctionAnalysisManager
LLVM_ABI PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
auto reverse(ContainerTy &&C)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
IterT prev_nodbg(IterT It, IterT Begin, bool SkipPseudoOp=true)
Decrement It, then continue decrementing it while it points to a debug instruction.