LLVM 22.0.0git
RISCVZilsdOptimizer.cpp
Go to the documentation of this file.
1//===-- RISCVZilsdOptimizer.cpp - RISC-V Zilsd Load/Store Optimizer ------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains a pass that performs load/store optimizations for the
10// RISC-V Zilsd extension. It combines pairs of 32-bit load/store instructions
11// into single 64-bit LD/SD instructions when possible.
12//
13// The pass runs in two phases:
14// 1. Pre-allocation: Reschedules loads/stores to bring consecutive memory
15// accesses closer together and forms LD/SD pairs with register hints.
16// 2. Post-allocation: Fixes invalid LD/SD instructions if register allocation
17// didn't provide suitable consecutive registers.
18//
19// Note: second phase is integrated into RISCVLoadStoreOptimizer
20//
21//===----------------------------------------------------------------------===//
22
23#include "RISCV.h"
24#include "RISCVInstrInfo.h"
25#include "RISCVRegisterInfo.h"
26#include "RISCVSubtarget.h"
27#include "llvm/ADT/DenseMap.h"
29#include "llvm/ADT/Statistic.h"
40#include "llvm/Support/Debug.h"
41#include <algorithm>
42
43using namespace llvm;
44
45#define DEBUG_TYPE "riscv-zilsd-opt"
46
47STATISTIC(NumLDFormed, "Number of LD instructions formed");
48STATISTIC(NumSDFormed, "Number of SD instructions formed");
49
50static cl::opt<bool>
51 DisableZilsdOpt("disable-riscv-zilsd-opt", cl::Hidden, cl::init(false),
52 cl::desc("Disable Zilsd load/store optimization"));
53
55 "riscv-zilsd-max-reschedule-distance", cl::Hidden, cl::init(10),
56 cl::desc("Maximum distance for rescheduling load/store instructions"));
57
58namespace {
59
60//===----------------------------------------------------------------------===//
61// Pre-allocation Zilsd optimization pass
62//===----------------------------------------------------------------------===//
63class RISCVPreAllocZilsdOpt : public MachineFunctionPass {
64public:
65 static char ID;
66
67 RISCVPreAllocZilsdOpt() : MachineFunctionPass(ID) {}
68
69 bool runOnMachineFunction(MachineFunction &MF) override;
70
71 StringRef getPassName() const override {
72 return "RISC-V pre-allocation Zilsd load/store optimization";
73 }
74
75 MachineFunctionProperties getRequiredProperties() const override {
76 return MachineFunctionProperties().setIsSSA();
77 }
78
79 void getAnalysisUsage(AnalysisUsage &AU) const override {
80 AU.addRequired<AAResultsWrapperPass>();
81 AU.addRequired<MachineDominatorTreeWrapperPass>();
82 AU.setPreservesCFG();
84 }
85 enum class MemoryOffsetKind {
86 Imm = 0,
87 Global = 1,
88 CPI = 2,
89 BlockAddr = 3,
90 Unknown = 4,
91 };
92 using MemOffset = std::pair<MemoryOffsetKind, int>;
93 using BaseRegInfo = std::pair<unsigned, MemoryOffsetKind>;
94
95private:
96 bool isMemoryOp(const MachineInstr &MI);
97 bool rescheduleLoadStoreInstrs(MachineBasicBlock *MBB);
98 bool canFormLdSdPair(MachineInstr *MI0, MachineInstr *MI1);
99 bool rescheduleOps(MachineBasicBlock *MBB,
100 SmallVectorImpl<MachineInstr *> &MIs, BaseRegInfo Base,
101 bool IsLoad,
102 DenseMap<MachineInstr *, unsigned> &MI2LocMap);
103 bool isSafeToMove(MachineInstr *MI, MachineInstr *Target, bool MoveForward);
104 MemOffset getMemoryOpOffset(const MachineInstr &MI);
105
106 const RISCVSubtarget *STI;
107 const RISCVInstrInfo *TII;
108 const RISCVRegisterInfo *TRI;
109 MachineRegisterInfo *MRI;
110 AliasAnalysis *AA;
111 MachineDominatorTree *DT;
112 Align RequiredAlign;
113};
114
115} // end anonymous namespace
116
117char RISCVPreAllocZilsdOpt::ID = 0;
118
119INITIALIZE_PASS_BEGIN(RISCVPreAllocZilsdOpt, "riscv-prera-zilsd-opt",
120 "RISC-V pre-allocation Zilsd optimization", false, false)
123INITIALIZE_PASS_END(RISCVPreAllocZilsdOpt, "riscv-prera-zilsd-opt",
124 "RISC-V pre-allocation Zilsd optimization", false, false)
125
126//===----------------------------------------------------------------------===//
127// Pre-allocation pass implementation
128//===----------------------------------------------------------------------===//
129
130bool RISCVPreAllocZilsdOpt::runOnMachineFunction(MachineFunction &MF) {
131
132 if (DisableZilsdOpt || skipFunction(MF.getFunction()))
133 return false;
134
135 STI = &MF.getSubtarget<RISCVSubtarget>();
136
137 // Only run on RV32 with Zilsd extension
138 if (STI->is64Bit() || !STI->hasStdExtZilsd())
139 return false;
140
141 TII = STI->getInstrInfo();
142 TRI = STI->getRegisterInfo();
143 MRI = &MF.getRegInfo();
144 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
145 DT = &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
146
147 // Check alignment: default is 8-byte, but allow 4-byte with tune feature
148 // If unaligned scalar memory is enabled, allow any alignment
149 RequiredAlign = STI->enableUnalignedScalarMem() ? Align(1)
150 : STI->allowZilsd4ByteAlign() ? Align(4)
151 : Align(8);
152 bool Modified = false;
153 for (auto &MBB : MF) {
154 Modified |= rescheduleLoadStoreInstrs(&MBB);
155 }
156
157 return Modified;
158}
159
160RISCVPreAllocZilsdOpt::MemOffset
161RISCVPreAllocZilsdOpt::getMemoryOpOffset(const MachineInstr &MI) {
162 switch (MI.getOpcode()) {
163 case RISCV::LW:
164 case RISCV::SW: {
165 // For LW/SW, the offset is in operand 2
166 const MachineOperand &OffsetOp = MI.getOperand(2);
167
168 // Handle immediate offset
169 if (OffsetOp.isImm())
170 return std::make_pair(MemoryOffsetKind::Imm, OffsetOp.getImm());
171
172 // Handle symbolic operands with MO_LO flag (from MergeBaseOffset)
173 if (OffsetOp.getTargetFlags() & RISCVII::MO_LO) {
174 if (OffsetOp.isGlobal())
175 return std::make_pair(MemoryOffsetKind::Global, OffsetOp.getOffset());
176 if (OffsetOp.isCPI())
177 return std::make_pair(MemoryOffsetKind::CPI, OffsetOp.getOffset());
178 if (OffsetOp.isBlockAddress())
179 return std::make_pair(MemoryOffsetKind::BlockAddr,
180 OffsetOp.getOffset());
181 }
182
183 break;
184 }
185 default:
186 break;
187 }
188
189 return std::make_pair(MemoryOffsetKind::Unknown, 0);
190}
191
192bool RISCVPreAllocZilsdOpt::canFormLdSdPair(MachineInstr *MI0,
193 MachineInstr *MI1) {
194 if (!MI0->hasOneMemOperand() || !MI1->hasOneMemOperand())
195 return false;
196
197 // Get offsets and check they are consecutive
198 int Offset0 = getMemoryOpOffset(*MI0).second;
199 int Offset1 = getMemoryOpOffset(*MI1).second;
200
201 // Offsets must be 4 bytes apart
202 if (Offset1 - Offset0 != 4)
203 return false;
204
205 // We need to guarantee the alignment(base + offset) is legal.
206 const MachineMemOperand *MMO = *MI0->memoperands_begin();
207 if (MMO->getAlign() < RequiredAlign)
208 return false;
209
210 // Check that the two destination/source registers are different for
211 // load/store respectively.
212 Register FirstReg = MI0->getOperand(0).getReg();
213 Register SecondReg = MI1->getOperand(0).getReg();
214 if (FirstReg == SecondReg)
215 return false;
216
217 return true;
218}
219
220bool RISCVPreAllocZilsdOpt::isSafeToMove(MachineInstr *MI, MachineInstr *Target,
221 bool MoveForward) {
222 MachineBasicBlock *MBB = MI->getParent();
223 MachineBasicBlock::iterator Start = MI->getIterator();
224 MachineBasicBlock::iterator End = Target->getIterator();
225
226 if (!MoveForward)
227 std::swap(Start, End);
228
229 // Increment Start to skip the current instruction
230 if (Start != MBB->end())
231 ++Start;
232
233 Register DefReg = MI->getOperand(0).getReg();
234 Register BaseReg = MI->getOperand(1).getReg();
235
236 unsigned ScanCount = 0;
237 for (auto It = Start; It != End; ++It, ++ScanCount) {
238 // Don't move across calls or terminators
239 if (It->isCall() || It->isTerminator()) {
240 LLVM_DEBUG(dbgs() << "Cannot move across call/terminator: " << *It);
241 return false;
242 }
243
244 // Don't move across instructions that modify memory barrier
245 if (It->hasUnmodeledSideEffects()) {
246 LLVM_DEBUG(dbgs() << "Cannot move across instruction with side effects: "
247 << *It);
248 return false;
249 }
250
251 // Check if the base register is modified
252 if (It->modifiesRegister(BaseReg, TRI)) {
253 LLVM_DEBUG(dbgs() << "Base register " << BaseReg
254 << " modified by: " << *It);
255 return false;
256 }
257
258 // For loads, check if the loaded value is used
259 if (MI->mayLoad() &&
260 (It->readsRegister(DefReg, TRI) || It->modifiesRegister(DefReg, TRI))) {
261 LLVM_DEBUG(dbgs() << "Destination register " << DefReg
262 << " used by: " << *It);
263 return false;
264 }
265
266 // For stores, check if the stored register is modified
267 if (MI->mayStore() && It->modifiesRegister(DefReg, TRI)) {
268 LLVM_DEBUG(dbgs() << "Source register " << DefReg
269 << " modified by: " << *It);
270 return false;
271 }
272
273 // Check for memory operation interference
274 if (It->mayLoadOrStore() && It->mayAlias(AA, *MI, /*UseTBAA*/ false)) {
275 LLVM_DEBUG(dbgs() << "Memory operation interference detected\n");
276 return false;
277 }
278 }
279
280 return true;
281}
282
283bool RISCVPreAllocZilsdOpt::rescheduleOps(
284 MachineBasicBlock *MBB, SmallVectorImpl<MachineInstr *> &MIs,
285 BaseRegInfo Base, bool IsLoad,
286 DenseMap<MachineInstr *, unsigned> &MI2LocMap) {
287 // Sort by offset, at this point it ensure base reg and MemoryOffsetKind are
288 // same, so we just need to simply sort by offset value.
289 llvm::sort(MIs.begin(), MIs.end(), [this](MachineInstr *A, MachineInstr *B) {
290 return getMemoryOpOffset(*A).second < getMemoryOpOffset(*B).second;
291 });
292
293 bool Modified = false;
294
295 // Try to pair consecutive operations
296 for (size_t i = 0; i + 1 < MIs.size(); i++) {
297 MachineInstr *MI0 = MIs[i];
298 MachineInstr *MI1 = MIs[i + 1];
299
300 Register FirstReg = MI0->getOperand(0).getReg();
301 Register SecondReg = MI1->getOperand(0).getReg();
302 Register BaseReg = MI0->getOperand(1).getReg();
303 const MachineOperand &OffsetOp = MI0->getOperand(2);
304
305 // At this point, MI0 and MI1 are:
306 // 1. both either LW or SW.
307 // 2. guaranteed to have same memory kind.
308 // 3. guaranteed to have same base register.
309 // 4. already be sorted by offset value.
310 // so we don't have to check these in canFormLdSdPair.
311 if (!canFormLdSdPair(MI0, MI1))
312 continue;
313
314 // Use MI2LocMap to determine which instruction appears later in program
315 // order
316 bool MI1IsLater = MI2LocMap[MI1] > MI2LocMap[MI0];
317
318 // For loads: move later instruction up (backwards) to earlier instruction
319 // For stores: move earlier instruction down (forwards) to later instruction
320 MachineInstr *MoveInstr, *TargetInstr;
321 if (IsLoad) {
322 // For loads: move the later instruction to the earlier one
323 MoveInstr = MI1IsLater ? MI1 : MI0;
324 TargetInstr = MI1IsLater ? MI0 : MI1;
325 } else {
326 // For stores: move the earlier instruction to the later one
327 MoveInstr = MI1IsLater ? MI0 : MI1;
328 TargetInstr = MI1IsLater ? MI1 : MI0;
329 }
330
331 unsigned Distance = MI1IsLater ? MI2LocMap[MI1] - MI2LocMap[MI0]
332 : MI2LocMap[MI0] - MI2LocMap[MI1];
333 if (!isSafeToMove(MoveInstr, TargetInstr, !IsLoad) ||
334 Distance > MaxRescheduleDistance)
335 continue;
336
337 // Move the instruction to the target position
338 MachineBasicBlock::iterator InsertPos = TargetInstr->getIterator();
339 ++InsertPos;
340
341 // If we need to move an instruction, do it now
342 if (MoveInstr != TargetInstr)
343 MBB->splice(InsertPos, MBB, MoveInstr->getIterator());
344
345 // Create the paired instruction
346 MachineInstrBuilder MIB;
347 DebugLoc DL = MI0->getDebugLoc();
348
349 if (IsLoad) {
350 MIB = BuildMI(*MBB, InsertPos, DL, TII->get(RISCV::PseudoLD_RV32_OPT))
351 .addReg(FirstReg, RegState::Define)
352 .addReg(SecondReg, RegState::Define)
353 .addReg(BaseReg)
354 .add(OffsetOp);
355 ++NumLDFormed;
356 LLVM_DEBUG(dbgs() << "Formed LD: " << *MIB << "\n");
357 } else {
358 MIB = BuildMI(*MBB, InsertPos, DL, TII->get(RISCV::PseudoSD_RV32_OPT))
359 .addReg(FirstReg)
360 .addReg(SecondReg)
361 .addReg(BaseReg)
362 .add(OffsetOp);
363 ++NumSDFormed;
364 LLVM_DEBUG(dbgs() << "Formed SD: " << *MIB << "\n");
365 }
366
367 // Copy memory operands
368 MIB.cloneMergedMemRefs({MI0, MI1});
369
370 // Add register allocation hints for consecutive registers
371 // RISC-V Zilsd requires even/odd register pairs
372 // Only set hints for virtual registers (physical registers already have
373 // encoding)
374 if (FirstReg.isVirtual() && SecondReg.isVirtual()) {
375 // For virtual registers, we can't determine even/odd yet, but we can hint
376 // that they should be allocated as a consecutive pair
377 MRI->setRegAllocationHint(FirstReg, RISCVRI::RegPairEven, SecondReg);
378 MRI->setRegAllocationHint(SecondReg, RISCVRI::RegPairOdd, FirstReg);
379 }
380
381 // Remove the original instructions
382 MI0->eraseFromParent();
383 MI1->eraseFromParent();
384
385 Modified = true;
386
387 // Skip the next instruction since we've already processed it
388 i++;
389 }
390
391 return Modified;
392}
393
394bool RISCVPreAllocZilsdOpt::isMemoryOp(const MachineInstr &MI) {
395 unsigned Opcode = MI.getOpcode();
396 if (Opcode != RISCV::LW && Opcode != RISCV::SW)
397 return false;
398
399 if (!MI.getOperand(1).isReg())
400 return false;
401
402 // When no memory operands are present, conservatively assume unaligned,
403 // volatile, unfoldable.
404 if (!MI.hasOneMemOperand())
405 return false;
406
407 const MachineMemOperand *MMO = *MI.memoperands_begin();
408
409 if (MMO->isVolatile() || MMO->isAtomic())
410 return false;
411
412 // sw <undef> could probably be eliminated entirely, but for now we just want
413 // to avoid making a mess of it.
414 if (MI.getOperand(0).isReg() && MI.getOperand(0).isUndef())
415 return false;
416
417 // Likewise don't mess with references to undefined addresses.
418 if (MI.getOperand(1).isUndef())
419 return false;
420
421 return true;
422}
423
424bool RISCVPreAllocZilsdOpt::rescheduleLoadStoreInstrs(MachineBasicBlock *MBB) {
425 bool Modified = false;
426
427 // Process the basic block in windows delimited by calls, terminators,
428 // or instructions with duplicate base+offset pairs
431
432 while (MBBI != E) {
433 // Map from instruction to its location in the current window
434 DenseMap<MachineInstr *, unsigned> MI2LocMap;
435
436 // Map from base register to list of load/store instructions
437 using Base2InstMap = DenseMap<BaseRegInfo, SmallVector<MachineInstr *, 4>>;
438 using BaseVec = SmallVector<BaseRegInfo, 4>;
439 Base2InstMap Base2LdsMap;
440 Base2InstMap Base2StsMap;
441 BaseVec LdBases;
442 BaseVec StBases;
443
444 unsigned Loc = 0;
445
446 // Build the current window of instructions
447 for (; MBBI != E; ++MBBI) {
448 MachineInstr &MI = *MBBI;
449
450 // Stop at barriers (calls and terminators)
451 if (MI.isCall() || MI.isTerminator()) {
452 // Move past the barrier for next iteration
453 ++MBBI;
454 break;
455 }
456
457 // Track instruction location in window
458 if (!MI.isDebugInstr())
459 MI2LocMap[&MI] = ++Loc;
460
461 MemOffset Offset = getMemoryOpOffset(MI);
462 // Skip non-memory operations or it's not a valid memory offset kind.
463 if (!isMemoryOp(MI) || Offset.first == MemoryOffsetKind::Unknown)
464 continue;
465
466 bool IsLd = (MI.getOpcode() == RISCV::LW);
467 Register Base = MI.getOperand(1).getReg();
468 bool StopHere = false;
469
470 // Lambda to find or add base register entries
471 auto FindBases = [&](Base2InstMap &Base2Ops, BaseVec &Bases) {
472 auto [BI, Inserted] = Base2Ops.try_emplace({Base.id(), Offset.first});
473 if (Inserted) {
474 // First time seeing this base register
475 BI->second.push_back(&MI);
476 Bases.push_back({Base.id(), Offset.first});
477 return;
478 }
479 // Check if we've seen this exact base+offset before
480 if (any_of(BI->second, [&](const MachineInstr *PrevMI) {
481 return Offset == getMemoryOpOffset(*PrevMI);
482 })) {
483 // Found duplicate base+offset - stop here to process current window
484 StopHere = true;
485 } else {
486 BI->second.push_back(&MI);
487 }
488 };
489
490 if (IsLd)
491 FindBases(Base2LdsMap, LdBases);
492 else
493 FindBases(Base2StsMap, StBases);
494
495 if (StopHere) {
496 // Found a duplicate (a base+offset combination that's seen earlier).
497 // Backtrack to process the current window.
498 --Loc;
499 break;
500 }
501 }
502
503 // Process the current window - reschedule loads
504 for (auto Base : LdBases) {
505 SmallVectorImpl<MachineInstr *> &Lds = Base2LdsMap[Base];
506 if (Lds.size() > 1) {
507 Modified |= rescheduleOps(MBB, Lds, Base, true, MI2LocMap);
508 }
509 }
510
511 // Process the current window - reschedule stores
512 for (auto Base : StBases) {
513 SmallVectorImpl<MachineInstr *> &Sts = Base2StsMap[Base];
514 if (Sts.size() > 1) {
515 Modified |= rescheduleOps(MBB, Sts, Base, false, MI2LocMap);
516 }
517 }
518 }
519
520 return Modified;
521}
522
523//===----------------------------------------------------------------------===//
524// Pass creation functions
525//===----------------------------------------------------------------------===//
526
528 return new RISCVPreAllocZilsdOpt();
529}
unsigned const MachineRegisterInfo * MRI
static int getMemoryOpOffset(const MachineInstr &MI)
static bool isMemoryOp(const MachineInstr &MI)
Returns true if instruction is a memory operation that this pass is capable of operating on.
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file defines the DenseMap class.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
std::pair< Instruction::BinaryOps, Value * > OffsetOp
Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
if(PassOpts->AAPipeline)
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
static bool isSafeToMove(const MachineInstr &From, const MachineInstr &To)
Check if it's safe to move From down to To, checking that no physical registers are clobbered.
static cl::opt< bool > DisableZilsdOpt("disable-riscv-zilsd-opt", cl::Hidden, cl::init(false), cl::desc("Disable Zilsd load/store optimization"))
static cl::opt< unsigned > MaxRescheduleDistance("riscv-zilsd-max-reschedule-distance", cl::Hidden, cl::init(10), cl::desc("Maximum distance for rescheduling load/store instructions"))
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
AnalysisUsage & addRequired()
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
Definition Pass.cpp:270
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
Analysis pass which computes a MachineDominatorTree.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const MachineInstrBuilder & cloneMergedMemRefs(ArrayRef< const MachineInstr * > OtherMIs) const
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
bool isAtomic() const
Returns true if this operation has an atomic ordering requirement of unordered or higher,...
LLVM_ABI Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
Register getReg() const
getReg - Returns the register number.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition Register.h:79
self_iterator getIterator()
Definition ilist_node.h:123
Abstract Attribute helper functions.
Definition Attributor.h:165
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ Define
Register definition.
initializer< Ty > init(const Ty &Val)
BaseReg
Stack frame base register. Bit 0 of FREInfo.Info.
Definition SFrame.h:77
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:532
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1732
FunctionPass * createRISCVPreAllocZilsdOptPass()
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1622
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
@ Global
Append to llvm.global_dtors.
AAResults AliasAnalysis
Temporary typedef for legacy code that uses a generic AliasAnalysis pointer or reference.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:869
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39