LLVM 19.0.0git
CoroElide.cpp
Go to the documentation of this file.
1//===- CoroElide.cpp - Coroutine Frame Allocation Elision Pass ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
10#include "CoroInternal.h"
11#include "llvm/ADT/DenseMap.h"
12#include "llvm/ADT/Statistic.h"
16#include "llvm/IR/Dominators.h"
20#include <optional>
21
22using namespace llvm;
23
24#define DEBUG_TYPE "coro-elide"
25
26STATISTIC(NumOfCoroElided, "The # of coroutine get elided.");
27
28#ifndef NDEBUG
30 "coro-elide-info-output-file", cl::value_desc("filename"),
31 cl::desc("File to record the coroutines got elided"), cl::Hidden);
32#endif
33
34namespace {
35// Created on demand if the coro-elide pass has work to do.
36class FunctionElideInfo {
37public:
38 FunctionElideInfo(Function *F) : ContainingFunction(F) {
39 this->collectPostSplitCoroIds();
40 }
41
42 bool hasCoroIds() const { return !CoroIds.empty(); }
43
44 const SmallVectorImpl<CoroIdInst *> &getCoroIds() const { return CoroIds; }
45
46private:
47 Function *ContainingFunction;
49 // Used in canCoroBeginEscape to distinguish coro.suspend switchs.
50 SmallPtrSet<const SwitchInst *, 4> CoroSuspendSwitches;
51
52 void collectPostSplitCoroIds();
53 friend class CoroIdElider;
54};
55
56class CoroIdElider {
57public:
58 CoroIdElider(CoroIdInst *CoroId, FunctionElideInfo &FEI, AAResults &AA,
60 void elideHeapAllocations(uint64_t FrameSize, Align FrameAlign);
61 bool lifetimeEligibleForElide() const;
62 bool attemptElide();
63 bool canCoroBeginEscape(const CoroBeginInst *,
64 const SmallPtrSetImpl<BasicBlock *> &) const;
65
66private:
67 CoroIdInst *CoroId;
68 FunctionElideInfo &FEI;
69 AAResults &AA;
70 DominatorTree &DT;
72
77};
78} // end anonymous namespace
79
80// Go through the list of coro.subfn.addr intrinsics and replace them with the
81// provided constant.
84 if (Users.empty())
85 return;
86
87 // See if we need to bitcast the constant to match the type of the intrinsic
88 // being replaced. Note: All coro.subfn.addr intrinsics return the same type,
89 // so we only need to examine the type of the first one in the list.
90 Type *IntrTy = Users.front()->getType();
91 Type *ValueTy = Value->getType();
92 if (ValueTy != IntrTy) {
93 // May need to tweak the function type to match the type expected at the
94 // use site.
95 assert(ValueTy->isPointerTy() && IntrTy->isPointerTy());
97 }
98
99 // Now the value type matches the type of the intrinsic. Replace them all!
100 for (CoroSubFnInst *I : Users)
102}
103
104// See if any operand of the call instruction references the coroutine frame.
105static bool operandReferences(CallInst *CI, AllocaInst *Frame, AAResults &AA) {
106 for (Value *Op : CI->operand_values())
107 if (!AA.isNoAlias(Op, Frame))
108 return true;
109 return false;
110}
111
112// Look for any tail calls referencing the coroutine frame and remove tail
113// attribute from them, since now coroutine frame resides on the stack and tail
114// call implies that the function does not references anything on the stack.
115// However if it's a musttail call, we cannot remove the tailcall attribute.
116// It's safe to keep it there as the musttail call is for symmetric transfer,
117// and by that point the frame should have been destroyed and hence not
118// interfering with operands.
120 Function &F = *Frame->getFunction();
121 for (Instruction &I : instructions(F))
122 if (auto *Call = dyn_cast<CallInst>(&I))
123 if (Call->isTailCall() && operandReferences(Call, Frame, AA) &&
124 !Call->isMustTailCall())
125 Call->setTailCall(false);
126}
127
128// Given a resume function @f.resume(%f.frame* %frame), returns the size
129// and expected alignment of %f.frame type.
130static std::optional<std::pair<uint64_t, Align>>
132 // Pull information from the function attributes.
133 auto Size = Resume->getParamDereferenceableBytes(0);
134 if (!Size)
135 return std::nullopt;
136 return std::make_pair(Size, Resume->getParamAlign(0).valueOrOne());
137}
138
139// Finds first non alloca instruction in the entry block of a function.
141 for (Instruction &I : F->getEntryBlock())
142 if (!isa<AllocaInst>(&I))
143 return &I;
144 llvm_unreachable("no terminator in the entry block");
145}
146
147#ifndef NDEBUG
148static std::unique_ptr<raw_fd_ostream> getOrCreateLogFile() {
150 "coro-elide-info-output-file shouldn't be empty");
151 std::error_code EC;
152 auto Result = std::make_unique<raw_fd_ostream>(CoroElideInfoOutputFilename,
154 if (!EC)
155 return Result;
156 llvm::errs() << "Error opening coro-elide-info-output-file '"
157 << CoroElideInfoOutputFilename << " for appending!\n";
158 return std::make_unique<raw_fd_ostream>(2, false); // stderr.
159}
160#endif
161
162void FunctionElideInfo::collectPostSplitCoroIds() {
163 for (auto &I : instructions(this->ContainingFunction)) {
164 if (auto *CII = dyn_cast<CoroIdInst>(&I))
165 if (CII->getInfo().isPostSplit())
166 // If it is the coroutine itself, don't touch it.
167 if (CII->getCoroutine() != CII->getFunction())
168 CoroIds.push_back(CII);
169
170 // Consider case like:
171 // %0 = call i8 @llvm.coro.suspend(...)
172 // switch i8 %0, label %suspend [i8 0, label %resume
173 // i8 1, label %cleanup]
174 // and collect the SwitchInsts which are used by escape analysis later.
175 if (auto *CSI = dyn_cast<CoroSuspendInst>(&I))
176 if (CSI->hasOneUse() && isa<SwitchInst>(CSI->use_begin()->getUser())) {
177 SwitchInst *SWI = cast<SwitchInst>(CSI->use_begin()->getUser());
178 if (SWI->getNumCases() == 2)
179 CoroSuspendSwitches.insert(SWI);
180 }
181 }
182}
183
184CoroIdElider::CoroIdElider(CoroIdInst *CoroId, FunctionElideInfo &FEI,
185 AAResults &AA, DominatorTree &DT,
187 : CoroId(CoroId), FEI(FEI), AA(AA), DT(DT), ORE(ORE) {
188 // Collect all coro.begin and coro.allocs associated with this coro.id.
189 for (User *U : CoroId->users()) {
190 if (auto *CB = dyn_cast<CoroBeginInst>(U))
191 CoroBegins.push_back(CB);
192 else if (auto *CA = dyn_cast<CoroAllocInst>(U))
193 CoroAllocs.push_back(CA);
194 }
195
196 // Collect all coro.subfn.addrs associated with coro.begin.
197 // Note, we only devirtualize the calls if their coro.subfn.addr refers to
198 // coro.begin directly. If we run into cases where this check is too
199 // conservative, we can consider relaxing the check.
200 for (CoroBeginInst *CB : CoroBegins) {
201 for (User *U : CB->users())
202 if (auto *II = dyn_cast<CoroSubFnInst>(U))
203 switch (II->getIndex()) {
205 ResumeAddr.push_back(II);
206 break;
208 DestroyAddr[CB].push_back(II);
209 break;
210 default:
211 llvm_unreachable("unexpected coro.subfn.addr constant");
212 }
213 }
214}
215
216// To elide heap allocations we need to suppress code blocks guarded by
217// llvm.coro.alloc and llvm.coro.free instructions.
218void CoroIdElider::elideHeapAllocations(uint64_t FrameSize, Align FrameAlign) {
219 LLVMContext &C = FEI.ContainingFunction->getContext();
220 BasicBlock::iterator InsertPt =
221 getFirstNonAllocaInTheEntryBlock(FEI.ContainingFunction)->getIterator();
222
223 // Replacing llvm.coro.alloc with false will suppress dynamic
224 // allocation as it is expected for the frontend to generate the code that
225 // looks like:
226 // id = coro.id(...)
227 // mem = coro.alloc(id) ? malloc(coro.size()) : 0;
228 // coro.begin(id, mem)
229 auto *False = ConstantInt::getFalse(C);
230 for (auto *CA : CoroAllocs) {
231 CA->replaceAllUsesWith(False);
232 CA->eraseFromParent();
233 }
234
235 // FIXME: Design how to transmit alignment information for every alloca that
236 // is spilled into the coroutine frame and recreate the alignment information
237 // here. Possibly we will need to do a mini SROA here and break the coroutine
238 // frame into individual AllocaInst recreating the original alignment.
239 const DataLayout &DL = FEI.ContainingFunction->getParent()->getDataLayout();
240 auto FrameTy = ArrayType::get(Type::getInt8Ty(C), FrameSize);
241 auto *Frame = new AllocaInst(FrameTy, DL.getAllocaAddrSpace(), "", InsertPt);
242 Frame->setAlignment(FrameAlign);
243 auto *FrameVoidPtr =
244 new BitCastInst(Frame, PointerType::getUnqual(C), "vFrame", InsertPt);
245
246 for (auto *CB : CoroBegins) {
247 CB->replaceAllUsesWith(FrameVoidPtr);
248 CB->eraseFromParent();
249 }
250
251 // Since now coroutine frame lives on the stack we need to make sure that
252 // any tail call referencing it, must be made non-tail call.
253 removeTailCallAttribute(Frame, AA);
254}
255
256bool CoroIdElider::canCoroBeginEscape(
257 const CoroBeginInst *CB, const SmallPtrSetImpl<BasicBlock *> &TIs) const {
258 const auto &It = DestroyAddr.find(CB);
259 assert(It != DestroyAddr.end());
260
261 // Limit the number of blocks we visit.
262 unsigned Limit = 32 * (1 + It->second.size());
263
265 Worklist.push_back(CB->getParent());
266
268 // Consider basicblock of coro.destroy as visited one, so that we
269 // skip the path pass through coro.destroy.
270 for (auto *DA : It->second)
271 Visited.insert(DA->getParent());
272
274 for (auto *U : CB->users()) {
275 // The use from coroutine intrinsics are not a problem.
276 if (isa<CoroFreeInst, CoroSubFnInst, CoroSaveInst>(U))
277 continue;
278
279 // Think all other usages may be an escaping candidate conservatively.
280 //
281 // Note that the major user of switch ABI coroutine (the C++) will store
282 // resume.fn, destroy.fn and the index to the coroutine frame immediately.
283 // So the parent of the coro.begin in C++ will be always escaping.
284 // Then we can't get any performance benefits for C++ by improving the
285 // precision of the method.
286 //
287 // The reason why we still judge it is we want to make LLVM Coroutine in
288 // switch ABIs to be self contained as much as possible instead of a
289 // by-product of C++20 Coroutines.
290 EscapingBBs.insert(cast<Instruction>(U)->getParent());
291 }
292
293 bool PotentiallyEscaped = false;
294
295 do {
296 const auto *BB = Worklist.pop_back_val();
297 if (!Visited.insert(BB).second)
298 continue;
299
300 // A Path insensitive marker to test whether the coro.begin escapes.
301 // It is intentional to make it path insensitive while it may not be
302 // precise since we don't want the process to be too slow.
303 PotentiallyEscaped |= EscapingBBs.count(BB);
304
305 if (TIs.count(BB)) {
306 if (isa<ReturnInst>(BB->getTerminator()) || PotentiallyEscaped)
307 return true;
308
309 // If the function ends with the exceptional terminator, the memory used
310 // by the coroutine frame can be released by stack unwinding
311 // automatically. So we can think the coro.begin doesn't escape if it
312 // exits the function by exceptional terminator.
313
314 continue;
315 }
316
317 // Conservatively say that there is potentially a path.
318 if (!--Limit)
319 return true;
320
321 auto TI = BB->getTerminator();
322 // Although the default dest of coro.suspend switches is suspend pointer
323 // which means a escape path to normal terminator, it is reasonable to skip
324 // it since coroutine frame doesn't change outside the coroutine body.
325 if (isa<SwitchInst>(TI) &&
326 FEI.CoroSuspendSwitches.count(cast<SwitchInst>(TI))) {
327 Worklist.push_back(cast<SwitchInst>(TI)->getSuccessor(1));
328 Worklist.push_back(cast<SwitchInst>(TI)->getSuccessor(2));
329 } else
330 Worklist.append(succ_begin(BB), succ_end(BB));
331
332 } while (!Worklist.empty());
333
334 // We have exhausted all possible paths and are certain that coro.begin can
335 // not reach to any of terminators.
336 return false;
337}
338
339bool CoroIdElider::lifetimeEligibleForElide() const {
340 // If no CoroAllocs, we cannot suppress allocation, so elision is not
341 // possible.
342 if (CoroAllocs.empty())
343 return false;
344
345 // Check that for every coro.begin there is at least one coro.destroy directly
346 // referencing the SSA value of that coro.begin along each
347 // non-exceptional path.
348 //
349 // If the value escaped, then coro.destroy would have been referencing a
350 // memory location storing that value and not the virtual register.
351
353 // First gather all of the terminators for the function.
354 // Consider the final coro.suspend as the real terminator when the current
355 // function is a coroutine.
356 for (BasicBlock &B : *FEI.ContainingFunction) {
357 auto *TI = B.getTerminator();
358
359 if (TI->getNumSuccessors() != 0 || isa<UnreachableInst>(TI))
360 continue;
361
362 Terminators.insert(&B);
363 }
364
365 // Filter out the coro.destroy that lie along exceptional paths.
366 for (const auto *CB : CoroBegins) {
367 auto It = DestroyAddr.find(CB);
368
369 // FIXME: If we have not found any destroys for this coro.begin, we
370 // disqualify this elide.
371 if (It == DestroyAddr.end())
372 return false;
373
374 const auto &CorrespondingDestroyAddrs = It->second;
375
376 // If every terminators is dominated by coro.destroy, we could know the
377 // corresponding coro.begin wouldn't escape.
378 auto DominatesTerminator = [&](auto *TI) {
379 return llvm::any_of(CorrespondingDestroyAddrs, [&](auto *Destroy) {
380 return DT.dominates(Destroy, TI->getTerminator());
381 });
382 };
383
384 if (llvm::all_of(Terminators, DominatesTerminator))
385 continue;
386
387 // Otherwise canCoroBeginEscape would decide whether there is any paths from
388 // coro.begin to Terminators which not pass through any of the
389 // coro.destroys. This is a slower analysis.
390 //
391 // canCoroBeginEscape is relatively slow, so we avoid to run it as much as
392 // possible.
393 if (canCoroBeginEscape(CB, Terminators))
394 return false;
395 }
396
397 // We have checked all CoroBegins and their paths to the terminators without
398 // finding disqualifying code patterns, so we can perform heap allocations.
399 return true;
400}
401
402bool CoroIdElider::attemptElide() {
403 // PostSplit coro.id refers to an array of subfunctions in its Info
404 // argument.
405 ConstantArray *Resumers = CoroId->getInfo().Resumers;
406 assert(Resumers && "PostSplit coro.id Info argument must refer to an array"
407 "of coroutine subfunctions");
408 auto *ResumeAddrConstant =
410
411 replaceWithConstant(ResumeAddrConstant, ResumeAddr);
412
413 bool EligibleForElide = lifetimeEligibleForElide();
414
415 auto *DestroyAddrConstant = Resumers->getAggregateElement(
416 EligibleForElide ? CoroSubFnInst::CleanupIndex
418
419 for (auto &It : DestroyAddr)
420 replaceWithConstant(DestroyAddrConstant, It.second);
421
422 auto FrameSizeAndAlign = getFrameLayout(cast<Function>(ResumeAddrConstant));
423
424 auto CallerFunctionName = FEI.ContainingFunction->getName();
425 auto CalleeCoroutineName = CoroId->getCoroutine()->getName();
426
427 if (EligibleForElide && FrameSizeAndAlign) {
428 elideHeapAllocations(FrameSizeAndAlign->first, FrameSizeAndAlign->second);
429 coro::replaceCoroFree(CoroId, /*Elide=*/true);
430 NumOfCoroElided++;
431
432#ifndef NDEBUG
433 if (!CoroElideInfoOutputFilename.empty())
434 *getOrCreateLogFile() << "Elide " << CalleeCoroutineName << " in "
435 << FEI.ContainingFunction->getName() << "\n";
436#endif
437
438 ORE.emit([&]() {
439 return OptimizationRemark(DEBUG_TYPE, "CoroElide", CoroId)
440 << "'" << ore::NV("callee", CalleeCoroutineName)
441 << "' elided in '" << ore::NV("caller", CallerFunctionName)
442 << "' (frame_size="
443 << ore::NV("frame_size", FrameSizeAndAlign->first) << ", align="
444 << ore::NV("align", FrameSizeAndAlign->second.value()) << ")";
445 });
446 } else {
447 ORE.emit([&]() {
448 auto Remark = OptimizationRemarkMissed(DEBUG_TYPE, "CoroElide", CoroId)
449 << "'" << ore::NV("callee", CalleeCoroutineName)
450 << "' not elided in '"
451 << ore::NV("caller", CallerFunctionName);
452
453 if (FrameSizeAndAlign)
454 return Remark << "' (frame_size="
455 << ore::NV("frame_size", FrameSizeAndAlign->first)
456 << ", align="
457 << ore::NV("align", FrameSizeAndAlign->second.value())
458 << ")";
459 else
460 return Remark << "' (frame_size=unknown, align=unknown)";
461 });
462 }
463
464 return true;
465}
466
468 return coro::declaresIntrinsics(M, {"llvm.coro.id", "llvm.coro.id.async"});
469}
470
472 auto &M = *F.getParent();
474 return PreservedAnalyses::all();
475
476 FunctionElideInfo FEI{&F};
477 // Elide is not necessary if there's no coro.id within the function.
478 if (!FEI.hasCoroIds())
479 return PreservedAnalyses::all();
480
481 AAResults &AA = AM.getResult<AAManager>(F);
484
485 bool Changed = false;
486 for (auto *CII : FEI.getCoroIds()) {
487 CoroIdElider CIE(CII, FEI, AA, DT, ORE);
488 Changed |= CIE.attemptElide();
489 }
490
491 return Changed ? PreservedAnalyses::none() : PreservedAnalyses::all();
492}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static void replaceWithConstant(Constant *Value, SmallVectorImpl< CoroSubFnInst * > &Users)
Definition: CoroElide.cpp:82
static Instruction * getFirstNonAllocaInTheEntryBlock(Function *F)
Definition: CoroElide.cpp:140
static cl::opt< std::string > CoroElideInfoOutputFilename("coro-elide-info-output-file", cl::value_desc("filename"), cl::desc("File to record the coroutines got elided"), cl::Hidden)
static void removeTailCallAttribute(AllocaInst *Frame, AAResults &AA)
Definition: CoroElide.cpp:119
static std::optional< std::pair< uint64_t, Align > > getFrameLayout(Function *Resume)
Definition: CoroElide.cpp:131
static bool declaresCoroElideIntrinsics(Module &M)
Definition: CoroElide.cpp:467
#define DEBUG_TYPE
Definition: CoroElide.cpp:24
static std::unique_ptr< raw_fd_ostream > getOrCreateLogFile()
Definition: CoroElide.cpp:148
static bool operandReferences(CallInst *CI, AllocaInst *Frame, AAResults &AA)
Definition: CoroElide.cpp:105
This file defines the DenseMap class.
uint64_t Size
iv Induction Variable Users
Definition: IVUsers.cpp:48
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
A manager for alias analyses.
bool isNoAlias(const MemoryLocation &LocA, const MemoryLocation &LocB)
A trivial helper function to check to see if the specified pointers are no-alias.
an instruction to allocate memory on the stack
Definition: Instructions.h:59
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:321
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:473
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:165
This class represents a no-op cast from one type to another.
This class represents a function call, abstracting a target machine's calling convention.
ConstantArray - Constant Array Declarations.
Definition: Constants.h:423
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2140
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:856
This is an important base class in LLVM.
Definition: Constant.h:41
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Definition: Constants.cpp:432
This class represents the llvm.coro.begin instruction.
Definition: CoroInstr.h:451
This represents the llvm.coro.id instruction.
Definition: CoroInstr.h:146
Info getInfo() const
Definition: CoroInstr.h:195
Function * getCoroutine() const
Definition: CoroInstr.h:215
This class represents the llvm.coro.subfn.addr instruction.
Definition: CoroInstr.h:35
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
uint64_t getParamDereferenceableBytes(unsigned ArgNo) const
Extract the number of dereferenceable bytes for a parameter.
Definition: Function.h:504
MaybeAlign getParamAlign(unsigned ArgNo) const
Definition: Function.h:469
const BasicBlock * getParent() const
Definition: Instruction.h:152
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:87
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
The optimization diagnostic interface.
void emit(DiagnosticInfoOptimizationBase &OptDiag)
Output the remark via the diagnostic handler and to the optimization record file.
Diagnostic information for missed-optimization remarks.
Diagnostic information for applied optimization remarks.
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:109
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:115
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:321
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:360
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:342
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:427
bool empty() const
Definition: SmallVector.h:94
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
Multiway switch.
unsigned getNumCases() const
Return the number of 'cases' in this switch instruction, excluding the default case.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
static IntegerType * getInt8Ty(LLVMContext &C)
iterator_range< value_op_iterator > operand_values()
Definition: User.h:266
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
iterator_range< user_iterator > users()
Definition: Value.h:421
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
DWARF Common Information Entry (CIE)
self_iterator getIterator()
Definition: ilist_node.h:109
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
bool declaresIntrinsics(const Module &M, const std::initializer_list< StringRef >)
Definition: Coroutines.cpp:115
void replaceCoroFree(CoroIdInst *CoroId, bool Elide)
Definition: Coroutines.cpp:128
DiagnosticInfoOptimizationBase::Argument NV
@ OF_Append
The file should be opened in append mode.
Definition: FileSystem.h:771
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, SmallSetVector< Instruction *, 8 > *UnsimplifiedUsers=nullptr)
Replace all uses of 'I' with 'SimpleV' and simplify the uses recursively.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
RNSuccIterator< NodeRef, BlockT, RegionT > succ_begin(NodeRef Node)
RNSuccIterator< NodeRef, BlockT, RegionT > succ_end(NodeRef Node)
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Definition: CoroElide.cpp:471
ConstantArray * Resumers
Definition: CoroInstr.h:189
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141