Bug Summary

File:llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
Warning:line 758, column 29
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name MemCpyOptimizer.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/build-llvm/lib/Transforms/Scalar -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/build-llvm/lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/build-llvm/lib/Transforms/Scalar -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-07-26-235520-9401-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp

/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp

1//===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass performs various transformations related to eliminating memcpy
10// calls, or transforming sets of stores into memset's.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Transforms/Scalar/MemCpyOptimizer.h"
15#include "llvm/ADT/DenseSet.h"
16#include "llvm/ADT/None.h"
17#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/SmallVector.h"
19#include "llvm/ADT/Statistic.h"
20#include "llvm/ADT/iterator_range.h"
21#include "llvm/Analysis/AliasAnalysis.h"
22#include "llvm/Analysis/AssumptionCache.h"
23#include "llvm/Analysis/GlobalsModRef.h"
24#include "llvm/Analysis/Loads.h"
25#include "llvm/Analysis/MemoryDependenceAnalysis.h"
26#include "llvm/Analysis/MemoryLocation.h"
27#include "llvm/Analysis/MemorySSA.h"
28#include "llvm/Analysis/MemorySSAUpdater.h"
29#include "llvm/Analysis/TargetLibraryInfo.h"
30#include "llvm/Analysis/ValueTracking.h"
31#include "llvm/IR/Argument.h"
32#include "llvm/IR/BasicBlock.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/DerivedTypes.h"
36#include "llvm/IR/Dominators.h"
37#include "llvm/IR/Function.h"
38#include "llvm/IR/GetElementPtrTypeIterator.h"
39#include "llvm/IR/GlobalVariable.h"
40#include "llvm/IR/IRBuilder.h"
41#include "llvm/IR/InstrTypes.h"
42#include "llvm/IR/Instruction.h"
43#include "llvm/IR/Instructions.h"
44#include "llvm/IR/IntrinsicInst.h"
45#include "llvm/IR/Intrinsics.h"
46#include "llvm/IR/LLVMContext.h"
47#include "llvm/IR/Module.h"
48#include "llvm/IR/Operator.h"
49#include "llvm/IR/PassManager.h"
50#include "llvm/IR/Type.h"
51#include "llvm/IR/User.h"
52#include "llvm/IR/Value.h"
53#include "llvm/InitializePasses.h"
54#include "llvm/Pass.h"
55#include "llvm/Support/Casting.h"
56#include "llvm/Support/Debug.h"
57#include "llvm/Support/MathExtras.h"
58#include "llvm/Support/raw_ostream.h"
59#include "llvm/Transforms/Scalar.h"
60#include "llvm/Transforms/Utils/Local.h"
61#include <algorithm>
62#include <cassert>
63#include <cstdint>
64#include <utility>
65
66using namespace llvm;
67
68#define DEBUG_TYPE"memcpyopt" "memcpyopt"
69
70static cl::opt<bool>
71 EnableMemorySSA("enable-memcpyopt-memoryssa", cl::init(true), cl::Hidden,
72 cl::desc("Use MemorySSA-backed MemCpyOpt."));
73
74STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted")static llvm::Statistic NumMemCpyInstr = {"memcpyopt", "NumMemCpyInstr"
, "Number of memcpy instructions deleted"}
;
75STATISTIC(NumMemSetInfer, "Number of memsets inferred")static llvm::Statistic NumMemSetInfer = {"memcpyopt", "NumMemSetInfer"
, "Number of memsets inferred"}
;
76STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy")static llvm::Statistic NumMoveToCpy = {"memcpyopt", "NumMoveToCpy"
, "Number of memmoves converted to memcpy"}
;
77STATISTIC(NumCpyToSet, "Number of memcpys converted to memset")static llvm::Statistic NumCpyToSet = {"memcpyopt", "NumCpyToSet"
, "Number of memcpys converted to memset"}
;
78STATISTIC(NumCallSlot, "Number of call slot optimizations performed")static llvm::Statistic NumCallSlot = {"memcpyopt", "NumCallSlot"
, "Number of call slot optimizations performed"}
;
79
80namespace {
81
82/// Represents a range of memset'd bytes with the ByteVal value.
83/// This allows us to analyze stores like:
84/// store 0 -> P+1
85/// store 0 -> P+0
86/// store 0 -> P+3
87/// store 0 -> P+2
88/// which sometimes happens with stores to arrays of structs etc. When we see
89/// the first store, we make a range [1, 2). The second store extends the range
90/// to [0, 2). The third makes a new range [2, 3). The fourth store joins the
91/// two ranges into [0, 3) which is memset'able.
92struct MemsetRange {
93 // Start/End - A semi range that describes the span that this range covers.
94 // The range is closed at the start and open at the end: [Start, End).
95 int64_t Start, End;
96
97 /// StartPtr - The getelementptr instruction that points to the start of the
98 /// range.
99 Value *StartPtr;
100
101 /// Alignment - The known alignment of the first store.
102 unsigned Alignment;
103
104 /// TheStores - The actual stores that make up this range.
105 SmallVector<Instruction*, 16> TheStores;
106
107 bool isProfitableToUseMemset(const DataLayout &DL) const;
108};
109
110} // end anonymous namespace
111
112bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const {
113 // If we found more than 4 stores to merge or 16 bytes, use memset.
114 if (TheStores.size() >= 4 || End-Start >= 16) return true;
115
116 // If there is nothing to merge, don't do anything.
117 if (TheStores.size() < 2) return false;
118
119 // If any of the stores are a memset, then it is always good to extend the
120 // memset.
121 for (Instruction *SI : TheStores)
122 if (!isa<StoreInst>(SI))
123 return true;
124
125 // Assume that the code generator is capable of merging pairs of stores
126 // together if it wants to.
127 if (TheStores.size() == 2) return false;
128
129 // If we have fewer than 8 stores, it can still be worthwhile to do this.
130 // For example, merging 4 i8 stores into an i32 store is useful almost always.
131 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
132 // memset will be split into 2 32-bit stores anyway) and doing so can
133 // pessimize the llvm optimizer.
134 //
135 // Since we don't have perfect knowledge here, make some assumptions: assume
136 // the maximum GPR width is the same size as the largest legal integer
137 // size. If so, check to see whether we will end up actually reducing the
138 // number of stores used.
139 unsigned Bytes = unsigned(End-Start);
140 unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8;
141 if (MaxIntSize == 0)
142 MaxIntSize = 1;
143 unsigned NumPointerStores = Bytes / MaxIntSize;
144
145 // Assume the remaining bytes if any are done a byte at a time.
146 unsigned NumByteStores = Bytes % MaxIntSize;
147
148 // If we will reduce the # stores (according to this heuristic), do the
149 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
150 // etc.
151 return TheStores.size() > NumPointerStores+NumByteStores;
152}
153
154namespace {
155
156class MemsetRanges {
157 using range_iterator = SmallVectorImpl<MemsetRange>::iterator;
158
159 /// A sorted list of the memset ranges.
160 SmallVector<MemsetRange, 8> Ranges;
161
162 const DataLayout &DL;
163
164public:
165 MemsetRanges(const DataLayout &DL) : DL(DL) {}
166
167 using const_iterator = SmallVectorImpl<MemsetRange>::const_iterator;
168
169 const_iterator begin() const { return Ranges.begin(); }
170 const_iterator end() const { return Ranges.end(); }
171 bool empty() const { return Ranges.empty(); }
172
173 void addInst(int64_t OffsetFromFirst, Instruction *Inst) {
174 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
175 addStore(OffsetFromFirst, SI);
176 else
177 addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst));
178 }
179
180 void addStore(int64_t OffsetFromFirst, StoreInst *SI) {
181 int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType());
182
183 addRange(OffsetFromFirst, StoreSize, SI->getPointerOperand(),
184 SI->getAlign().value(), SI);
185 }
186
187 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) {
188 int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue();
189 addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getDestAlignment(), MSI);
190 }
191
192 void addRange(int64_t Start, int64_t Size, Value *Ptr,
193 unsigned Alignment, Instruction *Inst);
194};
195
196} // end anonymous namespace
197
198/// Add a new store to the MemsetRanges data structure. This adds a
199/// new range for the specified store at the specified offset, merging into
200/// existing ranges as appropriate.
201void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr,
202 unsigned Alignment, Instruction *Inst) {
203 int64_t End = Start+Size;
204
205 range_iterator I = partition_point(
206 Ranges, [=](const MemsetRange &O) { return O.End < Start; });
207
208 // We now know that I == E, in which case we didn't find anything to merge
209 // with, or that Start <= I->End. If End < I->Start or I == E, then we need
210 // to insert a new range. Handle this now.
211 if (I == Ranges.end() || End < I->Start) {
212 MemsetRange &R = *Ranges.insert(I, MemsetRange());
213 R.Start = Start;
214 R.End = End;
215 R.StartPtr = Ptr;
216 R.Alignment = Alignment;
217 R.TheStores.push_back(Inst);
218 return;
219 }
220
221 // This store overlaps with I, add it.
222 I->TheStores.push_back(Inst);
223
224 // At this point, we may have an interval that completely contains our store.
225 // If so, just add it to the interval and return.
226 if (I->Start <= Start && I->End >= End)
227 return;
228
229 // Now we know that Start <= I->End and End >= I->Start so the range overlaps
230 // but is not entirely contained within the range.
231
232 // See if the range extends the start of the range. In this case, it couldn't
233 // possibly cause it to join the prior range, because otherwise we would have
234 // stopped on *it*.
235 if (Start < I->Start) {
236 I->Start = Start;
237 I->StartPtr = Ptr;
238 I->Alignment = Alignment;
239 }
240
241 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint
242 // is in or right at the end of I), and that End >= I->Start. Extend I out to
243 // End.
244 if (End > I->End) {
245 I->End = End;
246 range_iterator NextI = I;
247 while (++NextI != Ranges.end() && End >= NextI->Start) {
248 // Merge the range in.
249 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end());
250 if (NextI->End > I->End)
251 I->End = NextI->End;
252 Ranges.erase(NextI);
253 NextI = I;
254 }
255 }
256}
257
258//===----------------------------------------------------------------------===//
259// MemCpyOptLegacyPass Pass
260//===----------------------------------------------------------------------===//
261
262namespace {
263
264class MemCpyOptLegacyPass : public FunctionPass {
265 MemCpyOptPass Impl;
266
267public:
268 static char ID; // Pass identification, replacement for typeid
269
270 MemCpyOptLegacyPass() : FunctionPass(ID) {
271 initializeMemCpyOptLegacyPassPass(*PassRegistry::getPassRegistry());
272 }
273
274 bool runOnFunction(Function &F) override;
275
276private:
277 // This transformation requires dominator postdominator info
278 void getAnalysisUsage(AnalysisUsage &AU) const override {
279 AU.setPreservesCFG();
280 AU.addRequired<AssumptionCacheTracker>();
281 AU.addRequired<DominatorTreeWrapperPass>();
282 AU.addPreserved<DominatorTreeWrapperPass>();
283 AU.addPreserved<GlobalsAAWrapperPass>();
284 AU.addRequired<TargetLibraryInfoWrapperPass>();
285 if (!EnableMemorySSA)
286 AU.addRequired<MemoryDependenceWrapperPass>();
287 AU.addPreserved<MemoryDependenceWrapperPass>();
288 AU.addRequired<AAResultsWrapperPass>();
289 AU.addPreserved<AAResultsWrapperPass>();
290 if (EnableMemorySSA)
291 AU.addRequired<MemorySSAWrapperPass>();
292 AU.addPreserved<MemorySSAWrapperPass>();
293 }
294};
295
296} // end anonymous namespace
297
298char MemCpyOptLegacyPass::ID = 0;
299
300/// The public interface to this file...
301FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOptLegacyPass(); }
302
303INITIALIZE_PASS_BEGIN(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",static void *initializeMemCpyOptLegacyPassPassOnce(PassRegistry
&Registry) {
304 false, false)static void *initializeMemCpyOptLegacyPassPassOnce(PassRegistry
&Registry) {
305INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)initializeAssumptionCacheTrackerPass(Registry);
306INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry);
307INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)initializeMemoryDependenceWrapperPassPass(Registry);
308INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry);
309INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry);
310INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)initializeGlobalsAAWrapperPassPass(Registry);
311INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)initializeMemorySSAWrapperPassPass(Registry);
312INITIALIZE_PASS_END(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",PassInfo *PI = new PassInfo( "MemCpy Optimization", "memcpyopt"
, &MemCpyOptLegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<MemCpyOptLegacyPass>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeMemCpyOptLegacyPassPassFlag
; void llvm::initializeMemCpyOptLegacyPassPass(PassRegistry &
Registry) { llvm::call_once(InitializeMemCpyOptLegacyPassPassFlag
, initializeMemCpyOptLegacyPassPassOnce, std::ref(Registry));
}
313 false, false)PassInfo *PI = new PassInfo( "MemCpy Optimization", "memcpyopt"
, &MemCpyOptLegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<MemCpyOptLegacyPass>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeMemCpyOptLegacyPassPassFlag
; void llvm::initializeMemCpyOptLegacyPassPass(PassRegistry &
Registry) { llvm::call_once(InitializeMemCpyOptLegacyPassPassFlag
, initializeMemCpyOptLegacyPassPassOnce, std::ref(Registry));
}
314
315// Check that V is either not accessible by the caller, or unwinding cannot
316// occur between Start and End.
317static bool mayBeVisibleThroughUnwinding(Value *V, Instruction *Start,
318 Instruction *End) {
319 assert(Start->getParent() == End->getParent() && "Must be in same block")(static_cast <bool> (Start->getParent() == End->getParent
() && "Must be in same block") ? void (0) : __assert_fail
("Start->getParent() == End->getParent() && \"Must be in same block\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp"
, 319, __extension__ __PRETTY_FUNCTION__))
;
320 if (!Start->getFunction()->doesNotThrow() &&
321 !isa<AllocaInst>(getUnderlyingObject(V))) {
322 for (const Instruction &I :
323 make_range(Start->getIterator(), End->getIterator())) {
324 if (I.mayThrow())
325 return true;
326 }
327 }
328 return false;
329}
330
331void MemCpyOptPass::eraseInstruction(Instruction *I) {
332 if (MSSAU)
333 MSSAU->removeMemoryAccess(I);
334 if (MD)
335 MD->removeInstruction(I);
336 I->eraseFromParent();
337}
338
339// Check for mod or ref of Loc between Start and End, excluding both boundaries.
340// Start and End must be in the same block
341static bool accessedBetween(AliasAnalysis &AA, MemoryLocation Loc,
342 const MemoryUseOrDef *Start,
343 const MemoryUseOrDef *End) {
344 assert(Start->getBlock() == End->getBlock() && "Only local supported")(static_cast <bool> (Start->getBlock() == End->getBlock
() && "Only local supported") ? void (0) : __assert_fail
("Start->getBlock() == End->getBlock() && \"Only local supported\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp"
, 344, __extension__ __PRETTY_FUNCTION__))
;
345 for (const MemoryAccess &MA :
346 make_range(++Start->getIterator(), End->getIterator())) {
347 if (isModOrRefSet(AA.getModRefInfo(cast<MemoryUseOrDef>(MA).getMemoryInst(),
348 Loc)))
349 return true;
350 }
351 return false;
352}
353
354// Check for mod of Loc between Start and End, excluding both boundaries.
355// Start and End can be in different blocks.
356static bool writtenBetween(MemorySSA *MSSA, MemoryLocation Loc,
357 const MemoryUseOrDef *Start,
358 const MemoryUseOrDef *End) {
359 // TODO: Only walk until we hit Start.
360 MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess(
361 End->getDefiningAccess(), Loc);
362 return !MSSA->dominates(Clobber, Start);
363}
364
365/// When scanning forward over instructions, we look for some other patterns to
366/// fold away. In particular, this looks for stores to neighboring locations of
367/// memory. If it sees enough consecutive ones, it attempts to merge them
368/// together into a memcpy/memset.
369Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst,
370 Value *StartPtr,
371 Value *ByteVal) {
372 const DataLayout &DL = StartInst->getModule()->getDataLayout();
373
374 // Okay, so we now have a single store that can be splatable. Scan to find
375 // all subsequent stores of the same value to offset from the same pointer.
376 // Join these together into ranges, so we can decide whether contiguous blocks
377 // are stored.
378 MemsetRanges Ranges(DL);
379
380 BasicBlock::iterator BI(StartInst);
381
382 // Keeps track of the last memory use or def before the insertion point for
383 // the new memset. The new MemoryDef for the inserted memsets will be inserted
384 // after MemInsertPoint. It points to either LastMemDef or to the last user
385 // before the insertion point of the memset, if there are any such users.
386 MemoryUseOrDef *MemInsertPoint = nullptr;
387 // Keeps track of the last MemoryDef between StartInst and the insertion point
388 // for the new memset. This will become the defining access of the inserted
389 // memsets.
390 MemoryDef *LastMemDef = nullptr;
391 for (++BI; !BI->isTerminator(); ++BI) {
392 if (MSSAU) {
393 auto *CurrentAcc = cast_or_null<MemoryUseOrDef>(
394 MSSAU->getMemorySSA()->getMemoryAccess(&*BI));
395 if (CurrentAcc) {
396 MemInsertPoint = CurrentAcc;
397 if (auto *CurrentDef = dyn_cast<MemoryDef>(CurrentAcc))
398 LastMemDef = CurrentDef;
399 }
400 }
401
402 // Calls that only access inaccessible memory do not block merging
403 // accessible stores.
404 if (auto *CB = dyn_cast<CallBase>(BI)) {
405 if (CB->onlyAccessesInaccessibleMemory())
406 continue;
407 }
408
409 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
410 // If the instruction is readnone, ignore it, otherwise bail out. We
411 // don't even allow readonly here because we don't want something like:
412 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
413 if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
414 break;
415 continue;
416 }
417
418 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
419 // If this is a store, see if we can merge it in.
420 if (!NextStore->isSimple()) break;
421
422 Value *StoredVal = NextStore->getValueOperand();
423
424 // Don't convert stores of non-integral pointer types to memsets (which
425 // stores integers).
426 if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType()))
427 break;
428
429 // Check to see if this stored value is of the same byte-splattable value.
430 Value *StoredByte = isBytewiseValue(StoredVal, DL);
431 if (isa<UndefValue>(ByteVal) && StoredByte)
432 ByteVal = StoredByte;
433 if (ByteVal != StoredByte)
434 break;
435
436 // Check to see if this store is to a constant offset from the start ptr.
437 Optional<int64_t> Offset =
438 isPointerOffset(StartPtr, NextStore->getPointerOperand(), DL);
439 if (!Offset)
440 break;
441
442 Ranges.addStore(*Offset, NextStore);
443 } else {
444 MemSetInst *MSI = cast<MemSetInst>(BI);
445
446 if (MSI->isVolatile() || ByteVal != MSI->getValue() ||
447 !isa<ConstantInt>(MSI->getLength()))
448 break;
449
450 // Check to see if this store is to a constant offset from the start ptr.
451 Optional<int64_t> Offset = isPointerOffset(StartPtr, MSI->getDest(), DL);
452 if (!Offset)
453 break;
454
455 Ranges.addMemSet(*Offset, MSI);
456 }
457 }
458
459 // If we have no ranges, then we just had a single store with nothing that
460 // could be merged in. This is a very common case of course.
461 if (Ranges.empty())
462 return nullptr;
463
464 // If we had at least one store that could be merged in, add the starting
465 // store as well. We try to avoid this unless there is at least something
466 // interesting as a small compile-time optimization.
467 Ranges.addInst(0, StartInst);
468
469 // If we create any memsets, we put it right before the first instruction that
470 // isn't part of the memset block. This ensure that the memset is dominated
471 // by any addressing instruction needed by the start of the block.
472 IRBuilder<> Builder(&*BI);
473
474 // Now that we have full information about ranges, loop over the ranges and
475 // emit memset's for anything big enough to be worthwhile.
476 Instruction *AMemSet = nullptr;
477 for (const MemsetRange &Range : Ranges) {
478 if (Range.TheStores.size() == 1) continue;
479
480 // If it is profitable to lower this range to memset, do so now.
481 if (!Range.isProfitableToUseMemset(DL))
482 continue;
483
484 // Otherwise, we do want to transform this! Create a new memset.
485 // Get the starting pointer of the block.
486 StartPtr = Range.StartPtr;
487
488 AMemSet = Builder.CreateMemSet(StartPtr, ByteVal, Range.End - Range.Start,
489 MaybeAlign(Range.Alignment));
490 LLVM_DEBUG(dbgs() << "Replace stores:\n"; for (Instruction *SIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "Replace stores:\n"; for (Instruction
*SI : Range.TheStores) dbgs() << *SI << '\n'; dbgs
() << "With: " << *AMemSet << '\n'; } } while
(false)
491 : Range.TheStores) dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "Replace stores:\n"; for (Instruction
*SI : Range.TheStores) dbgs() << *SI << '\n'; dbgs
() << "With: " << *AMemSet << '\n'; } } while
(false)
492 << *SI << '\n';do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "Replace stores:\n"; for (Instruction
*SI : Range.TheStores) dbgs() << *SI << '\n'; dbgs
() << "With: " << *AMemSet << '\n'; } } while
(false)
493 dbgs() << "With: " << *AMemSet << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "Replace stores:\n"; for (Instruction
*SI : Range.TheStores) dbgs() << *SI << '\n'; dbgs
() << "With: " << *AMemSet << '\n'; } } while
(false)
;
494 if (!Range.TheStores.empty())
495 AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc());
496
497 if (MSSAU) {
498 assert(LastMemDef && MemInsertPoint &&(static_cast <bool> (LastMemDef && MemInsertPoint
&& "Both LastMemDef and MemInsertPoint need to be set"
) ? void (0) : __assert_fail ("LastMemDef && MemInsertPoint && \"Both LastMemDef and MemInsertPoint need to be set\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp"
, 499, __extension__ __PRETTY_FUNCTION__))
499 "Both LastMemDef and MemInsertPoint need to be set")(static_cast <bool> (LastMemDef && MemInsertPoint
&& "Both LastMemDef and MemInsertPoint need to be set"
) ? void (0) : __assert_fail ("LastMemDef && MemInsertPoint && \"Both LastMemDef and MemInsertPoint need to be set\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp"
, 499, __extension__ __PRETTY_FUNCTION__))
;
500 auto *NewDef =
501 cast<MemoryDef>(MemInsertPoint->getMemoryInst() == &*BI
502 ? MSSAU->createMemoryAccessBefore(
503 AMemSet, LastMemDef, MemInsertPoint)
504 : MSSAU->createMemoryAccessAfter(
505 AMemSet, LastMemDef, MemInsertPoint));
506 MSSAU->insertDef(NewDef, /*RenameUses=*/true);
507 LastMemDef = NewDef;
508 MemInsertPoint = NewDef;
509 }
510
511 // Zap all the stores.
512 for (Instruction *SI : Range.TheStores)
513 eraseInstruction(SI);
514
515 ++NumMemSetInfer;
516 }
517
518 return AMemSet;
519}
520
521// This method try to lift a store instruction before position P.
522// It will lift the store and its argument + that anything that
523// may alias with these.
524// The method returns true if it was successful.
525bool MemCpyOptPass::moveUp(StoreInst *SI, Instruction *P, const LoadInst *LI) {
526 // If the store alias this position, early bail out.
527 MemoryLocation StoreLoc = MemoryLocation::get(SI);
528 if (isModOrRefSet(AA->getModRefInfo(P, StoreLoc)))
529 return false;
530
531 // Keep track of the arguments of all instruction we plan to lift
532 // so we can make sure to lift them as well if appropriate.
533 DenseSet<Instruction*> Args;
534 if (auto *Ptr = dyn_cast<Instruction>(SI->getPointerOperand()))
535 if (Ptr->getParent() == SI->getParent())
536 Args.insert(Ptr);
537
538 // Instruction to lift before P.
539 SmallVector<Instruction *, 8> ToLift{SI};
540
541 // Memory locations of lifted instructions.
542 SmallVector<MemoryLocation, 8> MemLocs{StoreLoc};
543
544 // Lifted calls.
545 SmallVector<const CallBase *, 8> Calls;
546
547 const MemoryLocation LoadLoc = MemoryLocation::get(LI);
548
549 for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) {
550 auto *C = &*I;
551
552 // Make sure hoisting does not perform a store that was not guaranteed to
553 // happen.
554 if (!isGuaranteedToTransferExecutionToSuccessor(C))
555 return false;
556
557 bool MayAlias = isModOrRefSet(AA->getModRefInfo(C, None));
558
559 bool NeedLift = false;
560 if (Args.erase(C))
561 NeedLift = true;
562 else if (MayAlias) {
563 NeedLift = llvm::any_of(MemLocs, [C, this](const MemoryLocation &ML) {
564 return isModOrRefSet(AA->getModRefInfo(C, ML));
565 });
566
567 if (!NeedLift)
568 NeedLift = llvm::any_of(Calls, [C, this](const CallBase *Call) {
569 return isModOrRefSet(AA->getModRefInfo(C, Call));
570 });
571 }
572
573 if (!NeedLift)
574 continue;
575
576 if (MayAlias) {
577 // Since LI is implicitly moved downwards past the lifted instructions,
578 // none of them may modify its source.
579 if (isModSet(AA->getModRefInfo(C, LoadLoc)))
580 return false;
581 else if (const auto *Call = dyn_cast<CallBase>(C)) {
582 // If we can't lift this before P, it's game over.
583 if (isModOrRefSet(AA->getModRefInfo(P, Call)))
584 return false;
585
586 Calls.push_back(Call);
587 } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) {
588 // If we can't lift this before P, it's game over.
589 auto ML = MemoryLocation::get(C);
590 if (isModOrRefSet(AA->getModRefInfo(P, ML)))
591 return false;
592
593 MemLocs.push_back(ML);
594 } else
595 // We don't know how to lift this instruction.
596 return false;
597 }
598
599 ToLift.push_back(C);
600 for (unsigned k = 0, e = C->getNumOperands(); k != e; ++k)
601 if (auto *A = dyn_cast<Instruction>(C->getOperand(k))) {
602 if (A->getParent() == SI->getParent()) {
603 // Cannot hoist user of P above P
604 if(A == P) return false;
605 Args.insert(A);
606 }
607 }
608 }
609
610 // Find MSSA insertion point. Normally P will always have a corresponding
611 // memory access before which we can insert. However, with non-standard AA
612 // pipelines, there may be a mismatch between AA and MSSA, in which case we
613 // will scan for a memory access before P. In either case, we know for sure
614 // that at least the load will have a memory access.
615 // TODO: Simplify this once P will be determined by MSSA, in which case the
616 // discrepancy can no longer occur.
617 MemoryUseOrDef *MemInsertPoint = nullptr;
618 if (MSSAU) {
619 if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(P)) {
620 MemInsertPoint = cast<MemoryUseOrDef>(--MA->getIterator());
621 } else {
622 const Instruction *ConstP = P;
623 for (const Instruction &I : make_range(++ConstP->getReverseIterator(),
624 ++LI->getReverseIterator())) {
625 if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(&I)) {
626 MemInsertPoint = MA;
627 break;
628 }
629 }
630 }
631 }
632
633 // We made it, we need to lift.
634 for (auto *I : llvm::reverse(ToLift)) {
635 LLVM_DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "Lifting " << *I <<
" before " << *P << "\n"; } } while (false)
;
636 I->moveBefore(P);
637 if (MSSAU) {
638 assert(MemInsertPoint && "Must have found insert point")(static_cast <bool> (MemInsertPoint && "Must have found insert point"
) ? void (0) : __assert_fail ("MemInsertPoint && \"Must have found insert point\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp"
, 638, __extension__ __PRETTY_FUNCTION__))
;
639 if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(I)) {
640 MSSAU->moveAfter(MA, MemInsertPoint);
641 MemInsertPoint = MA;
642 }
643 }
644 }
645
646 return true;
647}
648
649bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
650 if (!SI->isSimple()) return false;
23
Calling 'StoreInst::isSimple'
27
Returning from 'StoreInst::isSimple'
28
Taking false branch
651
652 // Avoid merging nontemporal stores since the resulting
653 // memcpy/memset would not be able to preserve the nontemporal hint.
654 // In theory we could teach how to propagate the !nontemporal metadata to
655 // memset calls. However, that change would force the backend to
656 // conservatively expand !nontemporal memset calls back to sequences of
657 // store instructions (effectively undoing the merging).
658 if (SI->getMetadata(LLVMContext::MD_nontemporal))
29
Calling 'Instruction::getMetadata'
33
Returning from 'Instruction::getMetadata'
34
Taking false branch
659 return false;
660
661 const DataLayout &DL = SI->getModule()->getDataLayout();
662
663 Value *StoredVal = SI->getValueOperand();
664
665 // Not all the transforms below are correct for non-integral pointers, bail
666 // until we've audited the individual pieces.
667 if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType()))
35
Calling 'DataLayout::isNonIntegralPointerType'
38
Returning from 'DataLayout::isNonIntegralPointerType'
39
Taking false branch
668 return false;
669
670 // Load to store forwarding can be interpreted as memcpy.
671 if (LoadInst *LI
40.1
'LI' is non-null
40.1
'LI' is non-null
40.1
'LI' is non-null
40.1
'LI' is non-null
40.1
'LI' is non-null
40.1
'LI' is non-null
40.1
'LI' is non-null
= dyn_cast<LoadInst>(StoredVal)) {
40
Assuming 'StoredVal' is a 'LoadInst'
41
Taking true branch
672 if (LI->isSimple() && LI->hasOneUse() &&
42
Calling 'LoadInst::isSimple'
46
Returning from 'LoadInst::isSimple'
47
Calling 'Value::hasOneUse'
53
Returning from 'Value::hasOneUse'
54
Assuming the condition is true
56
Taking true branch
673 LI->getParent() == SI->getParent()) {
55
Assuming the condition is true
674
675 auto *T = LI->getType();
676 if (T->isAggregateType()) {
57
Calling 'Type::isAggregateType'
61
Returning from 'Type::isAggregateType'
62
Taking false branch
677 MemoryLocation LoadLoc = MemoryLocation::get(LI);
678
679 // We use alias analysis to check if an instruction may store to
680 // the memory we load from in between the load and the store. If
681 // such an instruction is found, we try to promote there instead
682 // of at the store position.
683 // TODO: Can use MSSA for this.
684 Instruction *P = SI;
685 for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) {
686 if (isModSet(AA->getModRefInfo(&I, LoadLoc))) {
687 P = &I;
688 break;
689 }
690 }
691
692 // We found an instruction that may write to the loaded memory.
693 // We can try to promote at this position instead of the store
694 // position if nothing aliases the store memory after this and the store
695 // destination is not in the range.
696 if (P && P != SI) {
697 if (!moveUp(SI, P, LI))
698 P = nullptr;
699 }
700
701 // If a valid insertion position is found, then we can promote
702 // the load/store pair to a memcpy.
703 if (P) {
704 // If we load from memory that may alias the memory we store to,
705 // memmove must be used to preserve semantic. If not, memcpy can
706 // be used.
707 bool UseMemMove = false;
708 if (!AA->isNoAlias(MemoryLocation::get(SI), LoadLoc))
709 UseMemMove = true;
710
711 uint64_t Size = DL.getTypeStoreSize(T);
712
713 IRBuilder<> Builder(P);
714 Instruction *M;
715 if (UseMemMove)
716 M = Builder.CreateMemMove(
717 SI->getPointerOperand(), SI->getAlign(),
718 LI->getPointerOperand(), LI->getAlign(), Size);
719 else
720 M = Builder.CreateMemCpy(
721 SI->getPointerOperand(), SI->getAlign(),
722 LI->getPointerOperand(), LI->getAlign(), Size);
723
724 LLVM_DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI << " => "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "Promoting " << *LI <<
" to " << *SI << " => " << *M << "\n"
; } } while (false)
725 << *M << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "Promoting " << *LI <<
" to " << *SI << " => " << *M << "\n"
; } } while (false)
;
726
727 if (MSSAU) {
728 auto *LastDef =
729 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI));
730 auto *NewAccess =
731 MSSAU->createMemoryAccessAfter(M, LastDef, LastDef);
732 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
733 }
734
735 eraseInstruction(SI);
736 eraseInstruction(LI);
737 ++NumMemCpyInstr;
738
739 // Make sure we do not invalidate the iterator.
740 BBI = M->getIterator();
741 return true;
742 }
743 }
744
745 // Detect cases where we're performing call slot forwarding, but
746 // happen to be using a load-store pair to implement it, rather than
747 // a memcpy.
748 CallInst *C = nullptr;
749 if (EnableMemorySSA) {
63
Assuming the condition is false
64
Taking false branch
750 if (auto *LoadClobber = dyn_cast<MemoryUseOrDef>(
751 MSSA->getWalker()->getClobberingMemoryAccess(LI))) {
752 // The load most post-dom the call. Limit to the same block for now.
753 // TODO: Support non-local call-slot optimization?
754 if (LoadClobber->getBlock() == SI->getParent())
755 C = dyn_cast_or_null<CallInst>(LoadClobber->getMemoryInst());
756 }
757 } else {
758 MemDepResult ldep = MD->getDependency(LI);
65
Called C++ object pointer is null
759 if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst()))
760 C = dyn_cast<CallInst>(ldep.getInst());
761 }
762
763 if (C) {
764 // Check that nothing touches the dest of the "copy" between
765 // the call and the store.
766 MemoryLocation StoreLoc = MemoryLocation::get(SI);
767 if (EnableMemorySSA) {
768 if (accessedBetween(*AA, StoreLoc, MSSA->getMemoryAccess(C),
769 MSSA->getMemoryAccess(SI)))
770 C = nullptr;
771 } else {
772 for (BasicBlock::iterator I = --SI->getIterator(),
773 E = C->getIterator();
774 I != E; --I) {
775 if (isModOrRefSet(AA->getModRefInfo(&*I, StoreLoc))) {
776 C = nullptr;
777 break;
778 }
779 }
780 }
781 }
782
783 if (C) {
784 bool changed = performCallSlotOptzn(
785 LI, SI, SI->getPointerOperand()->stripPointerCasts(),
786 LI->getPointerOperand()->stripPointerCasts(),
787 DL.getTypeStoreSize(SI->getOperand(0)->getType()),
788 commonAlignment(SI->getAlign(), LI->getAlign()), C);
789 if (changed) {
790 eraseInstruction(SI);
791 eraseInstruction(LI);
792 ++NumMemCpyInstr;
793 return true;
794 }
795 }
796 }
797 }
798
799 // There are two cases that are interesting for this code to handle: memcpy
800 // and memset. Right now we only handle memset.
801
802 // Ensure that the value being stored is something that can be memset'able a
803 // byte at a time like "0" or "-1" or any width, as well as things like
804 // 0xA0A0A0A0 and 0.0.
805 auto *V = SI->getOperand(0);
806 if (Value *ByteVal = isBytewiseValue(V, DL)) {
807 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(),
808 ByteVal)) {
809 BBI = I->getIterator(); // Don't invalidate iterator.
810 return true;
811 }
812
813 // If we have an aggregate, we try to promote it to memset regardless
814 // of opportunity for merging as it can expose optimization opportunities
815 // in subsequent passes.
816 auto *T = V->getType();
817 if (T->isAggregateType()) {
818 uint64_t Size = DL.getTypeStoreSize(T);
819 IRBuilder<> Builder(SI);
820 auto *M = Builder.CreateMemSet(SI->getPointerOperand(), ByteVal, Size,
821 SI->getAlign());
822
823 LLVM_DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "Promoting " << *SI <<
" to " << *M << "\n"; } } while (false)
;
824
825 if (MSSAU) {
826 assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI)))(static_cast <bool> (isa<MemoryDef>(MSSAU->getMemorySSA
()->getMemoryAccess(SI))) ? void (0) : __assert_fail ("isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI))"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp"
, 826, __extension__ __PRETTY_FUNCTION__))
;
827 auto *LastDef =
828 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI));
829 auto *NewAccess = MSSAU->createMemoryAccessAfter(M, LastDef, LastDef);
830 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
831 }
832
833 eraseInstruction(SI);
834 NumMemSetInfer++;
835
836 // Make sure we do not invalidate the iterator.
837 BBI = M->getIterator();
838 return true;
839 }
840 }
841
842 return false;
843}
844
845bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) {
846 // See if there is another memset or store neighboring this memset which
847 // allows us to widen out the memset to do a single larger store.
848 if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile())
849 if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(),
850 MSI->getValue())) {
851 BBI = I->getIterator(); // Don't invalidate iterator.
852 return true;
853 }
854 return false;
855}
856
857/// Takes a memcpy and a call that it depends on,
858/// and checks for the possibility of a call slot optimization by having
859/// the call write its result directly into the destination of the memcpy.
860bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad,
861 Instruction *cpyStore, Value *cpyDest,
862 Value *cpySrc, uint64_t cpyLen,
863 Align cpyAlign, CallInst *C) {
864 // The general transformation to keep in mind is
865 //
866 // call @func(..., src, ...)
867 // memcpy(dest, src, ...)
868 //
869 // ->
870 //
871 // memcpy(dest, src, ...)
872 // call @func(..., dest, ...)
873 //
874 // Since moving the memcpy is technically awkward, we additionally check that
875 // src only holds uninitialized values at the moment of the call, meaning that
876 // the memcpy can be discarded rather than moved.
877
878 // Lifetime marks shouldn't be operated on.
879 if (Function *F = C->getCalledFunction())
880 if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start)
881 return false;
882
883 // Require that src be an alloca. This simplifies the reasoning considerably.
884 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
885 if (!srcAlloca)
886 return false;
887
888 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
889 if (!srcArraySize)
890 return false;
891
892 const DataLayout &DL = cpyLoad->getModule()->getDataLayout();
893 uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) *
894 srcArraySize->getZExtValue();
895
896 if (cpyLen < srcSize)
897 return false;
898
899 // Check that accessing the first srcSize bytes of dest will not cause a
900 // trap. Otherwise the transform is invalid since it might cause a trap
901 // to occur earlier than it otherwise would.
902 if (!isDereferenceableAndAlignedPointer(cpyDest, Align(1), APInt(64, cpyLen),
903 DL, C, DT))
904 return false;
905
906 // Make sure that nothing can observe cpyDest being written early. There are
907 // a number of cases to consider:
908 // 1. cpyDest cannot be accessed between C and cpyStore as a precondition of
909 // the transform.
910 // 2. C itself may not access cpyDest (prior to the transform). This is
911 // checked further below.
912 // 3. If cpyDest is accessible to the caller of this function (potentially
913 // captured and not based on an alloca), we need to ensure that we cannot
914 // unwind between C and cpyStore. This is checked here.
915 // 4. If cpyDest is potentially captured, there may be accesses to it from
916 // another thread. In this case, we need to check that cpyStore is
917 // guaranteed to be executed if C is. As it is a non-atomic access, it
918 // renders accesses from other threads undefined.
919 // TODO: This is currently not checked.
920 if (mayBeVisibleThroughUnwinding(cpyDest, C, cpyStore))
921 return false;
922
923 // Check that dest points to memory that is at least as aligned as src.
924 Align srcAlign = srcAlloca->getAlign();
925 bool isDestSufficientlyAligned = srcAlign <= cpyAlign;
926 // If dest is not aligned enough and we can't increase its alignment then
927 // bail out.
928 if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest))
929 return false;
930
931 // Check that src is not accessed except via the call and the memcpy. This
932 // guarantees that it holds only undefined values when passed in (so the final
933 // memcpy can be dropped), that it is not read or written between the call and
934 // the memcpy, and that writing beyond the end of it is undefined.
935 SmallVector<User *, 8> srcUseList(srcAlloca->users());
936 while (!srcUseList.empty()) {
937 User *U = srcUseList.pop_back_val();
938
939 if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) {
940 append_range(srcUseList, U->users());
941 continue;
942 }
943 if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) {
944 if (!G->hasAllZeroIndices())
945 return false;
946
947 append_range(srcUseList, U->users());
948 continue;
949 }
950 if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U))
951 if (IT->isLifetimeStartOrEnd())
952 continue;
953
954 if (U != C && U != cpyLoad)
955 return false;
956 }
957
958 // Check that src isn't captured by the called function since the
959 // transformation can cause aliasing issues in that case.
960 for (unsigned ArgI = 0, E = C->arg_size(); ArgI != E; ++ArgI)
961 if (C->getArgOperand(ArgI) == cpySrc && !C->doesNotCapture(ArgI))
962 return false;
963
964 // Since we're changing the parameter to the callsite, we need to make sure
965 // that what would be the new parameter dominates the callsite.
966 if (!DT->dominates(cpyDest, C)) {
967 // Support moving a constant index GEP before the call.
968 auto *GEP = dyn_cast<GetElementPtrInst>(cpyDest);
969 if (GEP && GEP->hasAllConstantIndices() &&
970 DT->dominates(GEP->getPointerOperand(), C))
971 GEP->moveBefore(C);
972 else
973 return false;
974 }
975
976 // In addition to knowing that the call does not access src in some
977 // unexpected manner, for example via a global, which we deduce from
978 // the use analysis, we also need to know that it does not sneakily
979 // access dest. We rely on AA to figure this out for us.
980 ModRefInfo MR = AA->getModRefInfo(C, cpyDest, LocationSize::precise(srcSize));
981 // If necessary, perform additional analysis.
982 if (isModOrRefSet(MR))
983 MR = AA->callCapturesBefore(C, cpyDest, LocationSize::precise(srcSize), DT);
984 if (isModOrRefSet(MR))
985 return false;
986
987 // We can't create address space casts here because we don't know if they're
988 // safe for the target.
989 if (cpySrc->getType()->getPointerAddressSpace() !=
990 cpyDest->getType()->getPointerAddressSpace())
991 return false;
992 for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI)
993 if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc &&
994 cpySrc->getType()->getPointerAddressSpace() !=
995 C->getArgOperand(ArgI)->getType()->getPointerAddressSpace())
996 return false;
997
998 // All the checks have passed, so do the transformation.
999 bool changedArgument = false;
1000 for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI)
1001 if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc) {
1002 Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest
1003 : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(),
1004 cpyDest->getName(), C);
1005 changedArgument = true;
1006 if (C->getArgOperand(ArgI)->getType() == Dest->getType())
1007 C->setArgOperand(ArgI, Dest);
1008 else
1009 C->setArgOperand(ArgI, CastInst::CreatePointerCast(
1010 Dest, C->getArgOperand(ArgI)->getType(),
1011 Dest->getName(), C));
1012 }
1013
1014 if (!changedArgument)
1015 return false;
1016
1017 // If the destination wasn't sufficiently aligned then increase its alignment.
1018 if (!isDestSufficientlyAligned) {
1019 assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!")(static_cast <bool> (isa<AllocaInst>(cpyDest) &&
"Can only increase alloca alignment!") ? void (0) : __assert_fail
("isa<AllocaInst>(cpyDest) && \"Can only increase alloca alignment!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp"
, 1019, __extension__ __PRETTY_FUNCTION__))
;
1020 cast<AllocaInst>(cpyDest)->setAlignment(srcAlign);
1021 }
1022
1023 // Drop any cached information about the call, because we may have changed
1024 // its dependence information by changing its parameter.
1025 if (MD)
1026 MD->removeInstruction(C);
1027
1028 // Update AA metadata
1029 // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be
1030 // handled here, but combineMetadata doesn't support them yet
1031 unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
1032 LLVMContext::MD_noalias,
1033 LLVMContext::MD_invariant_group,
1034 LLVMContext::MD_access_group};
1035 combineMetadata(C, cpyLoad, KnownIDs, true);
1036
1037 ++NumCallSlot;
1038 return true;
1039}
1040
1041/// We've found that the (upward scanning) memory dependence of memcpy 'M' is
1042/// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can.
1043bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M,
1044 MemCpyInst *MDep) {
1045 // We can only transforms memcpy's where the dest of one is the source of the
1046 // other.
1047 if (M->getSource() != MDep->getDest() || MDep->isVolatile())
1048 return false;
1049
1050 // If dep instruction is reading from our current input, then it is a noop
1051 // transfer and substituting the input won't change this instruction. Just
1052 // ignore the input and let someone else zap MDep. This handles cases like:
1053 // memcpy(a <- a)
1054 // memcpy(b <- a)
1055 if (M->getSource() == MDep->getSource())
1056 return false;
1057
1058 // Second, the length of the memcpy's must be the same, or the preceding one
1059 // must be larger than the following one.
1060 if (MDep->getLength() != M->getLength()) {
1061 ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength());
1062 ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength());
1063 if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue())
1064 return false;
1065 }
1066
1067 // Verify that the copied-from memory doesn't change in between the two
1068 // transfers. For example, in:
1069 // memcpy(a <- b)
1070 // *b = 42;
1071 // memcpy(c <- a)
1072 // It would be invalid to transform the second memcpy into memcpy(c <- b).
1073 //
1074 // TODO: If the code between M and MDep is transparent to the destination "c",
1075 // then we could still perform the xform by moving M up to the first memcpy.
1076 if (EnableMemorySSA) {
1077 // TODO: It would be sufficient to check the MDep source up to the memcpy
1078 // size of M, rather than MDep.
1079 if (writtenBetween(MSSA, MemoryLocation::getForSource(MDep),
1080 MSSA->getMemoryAccess(MDep), MSSA->getMemoryAccess(M)))
1081 return false;
1082 } else {
1083 // NOTE: This is conservative, it will stop on any read from the source loc,
1084 // not just the defining memcpy.
1085 MemDepResult SourceDep =
1086 MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false,
1087 M->getIterator(), M->getParent());
1088 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
1089 return false;
1090 }
1091
1092 // If the dest of the second might alias the source of the first, then the
1093 // source and dest might overlap. We still want to eliminate the intermediate
1094 // value, but we have to generate a memmove instead of memcpy.
1095 bool UseMemMove = false;
1096 if (!AA->isNoAlias(MemoryLocation::getForDest(M),
1097 MemoryLocation::getForSource(MDep)))
1098 UseMemMove = true;
1099
1100 // If all checks passed, then we can transform M.
1101 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n"
<< *MDep << '\n' << *M << '\n'; } } while
(false)
1102 << *MDep << '\n' << *M << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n"
<< *MDep << '\n' << *M << '\n'; } } while
(false)
;
1103
1104 // TODO: Is this worth it if we're creating a less aligned memcpy? For
1105 // example we could be moving from movaps -> movq on x86.
1106 IRBuilder<> Builder(M);
1107 Instruction *NewM;
1108 if (UseMemMove)
1109 NewM = Builder.CreateMemMove(M->getRawDest(), M->getDestAlign(),
1110 MDep->getRawSource(), MDep->getSourceAlign(),
1111 M->getLength(), M->isVolatile());
1112 else if (isa<MemCpyInlineInst>(M)) {
1113 // llvm.memcpy may be promoted to llvm.memcpy.inline, but the converse is
1114 // never allowed since that would allow the latter to be lowered as a call
1115 // to an external function.
1116 NewM = Builder.CreateMemCpyInline(
1117 M->getRawDest(), M->getDestAlign(), MDep->getRawSource(),
1118 MDep->getSourceAlign(), M->getLength(), M->isVolatile());
1119 } else
1120 NewM = Builder.CreateMemCpy(M->getRawDest(), M->getDestAlign(),
1121 MDep->getRawSource(), MDep->getSourceAlign(),
1122 M->getLength(), M->isVolatile());
1123
1124 if (MSSAU) {
1125 assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)))(static_cast <bool> (isa<MemoryDef>(MSSAU->getMemorySSA
()->getMemoryAccess(M))) ? void (0) : __assert_fail ("isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M))"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp"
, 1125, __extension__ __PRETTY_FUNCTION__))
;
1126 auto *LastDef = cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M));
1127 auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef);
1128 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
1129 }
1130
1131 // Remove the instruction we're replacing.
1132 eraseInstruction(M);
1133 ++NumMemCpyInstr;
1134 return true;
1135}
1136
1137/// We've found that the (upward scanning) memory dependence of \p MemCpy is
1138/// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that
1139/// weren't copied over by \p MemCpy.
1140///
1141/// In other words, transform:
1142/// \code
1143/// memset(dst, c, dst_size);
1144/// memcpy(dst, src, src_size);
1145/// \endcode
1146/// into:
1147/// \code
1148/// memcpy(dst, src, src_size);
1149/// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size);
1150/// \endcode
1151bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy,
1152 MemSetInst *MemSet) {
1153 // We can only transform memset/memcpy with the same destination.
1154 if (!AA->isMustAlias(MemSet->getDest(), MemCpy->getDest()))
1155 return false;
1156
1157 // Check that src and dst of the memcpy aren't the same. While memcpy
1158 // operands cannot partially overlap, exact equality is allowed.
1159 if (!AA->isNoAlias(MemoryLocation(MemCpy->getSource(),
1160 LocationSize::precise(1)),
1161 MemoryLocation(MemCpy->getDest(),
1162 LocationSize::precise(1))))
1163 return false;
1164
1165 if (EnableMemorySSA) {
1166 // We know that dst up to src_size is not written. We now need to make sure
1167 // that dst up to dst_size is not accessed. (If we did not move the memset,
1168 // checking for reads would be sufficient.)
1169 if (accessedBetween(*AA, MemoryLocation::getForDest(MemSet),
1170 MSSA->getMemoryAccess(MemSet),
1171 MSSA->getMemoryAccess(MemCpy))) {
1172 return false;
1173 }
1174 } else {
1175 // We have already checked that dst up to src_size is not accessed. We
1176 // need to make sure that there are no accesses up to dst_size either.
1177 MemDepResult DstDepInfo = MD->getPointerDependencyFrom(
1178 MemoryLocation::getForDest(MemSet), false, MemCpy->getIterator(),
1179 MemCpy->getParent());
1180 if (DstDepInfo.getInst() != MemSet)
1181 return false;
1182 }
1183
1184 // Use the same i8* dest as the memcpy, killing the memset dest if different.
1185 Value *Dest = MemCpy->getRawDest();
1186 Value *DestSize = MemSet->getLength();
1187 Value *SrcSize = MemCpy->getLength();
1188
1189 if (mayBeVisibleThroughUnwinding(Dest, MemSet, MemCpy))
1190 return false;
1191
1192 // If the sizes are the same, simply drop the memset instead of generating
1193 // a replacement with zero size.
1194 if (DestSize == SrcSize) {
1195 eraseInstruction(MemSet);
1196 return true;
1197 }
1198
1199 // By default, create an unaligned memset.
1200 unsigned Align = 1;
1201 // If Dest is aligned, and SrcSize is constant, use the minimum alignment
1202 // of the sum.
1203 const unsigned DestAlign =
1204 std::max(MemSet->getDestAlignment(), MemCpy->getDestAlignment());
1205 if (DestAlign > 1)
1206 if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize))
1207 Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign);
1208
1209 IRBuilder<> Builder(MemCpy);
1210
1211 // If the sizes have different types, zext the smaller one.
1212 if (DestSize->getType() != SrcSize->getType()) {
1213 if (DestSize->getType()->getIntegerBitWidth() >
1214 SrcSize->getType()->getIntegerBitWidth())
1215 SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType());
1216 else
1217 DestSize = Builder.CreateZExt(DestSize, SrcSize->getType());
1218 }
1219
1220 Value *Ule = Builder.CreateICmpULE(DestSize, SrcSize);
1221 Value *SizeDiff = Builder.CreateSub(DestSize, SrcSize);
1222 Value *MemsetLen = Builder.CreateSelect(
1223 Ule, ConstantInt::getNullValue(DestSize->getType()), SizeDiff);
1224 unsigned DestAS = Dest->getType()->getPointerAddressSpace();
1225 Instruction *NewMemSet = Builder.CreateMemSet(
1226 Builder.CreateGEP(Builder.getInt8Ty(),
1227 Builder.CreatePointerCast(Dest,
1228 Builder.getInt8PtrTy(DestAS)),
1229 SrcSize),
1230 MemSet->getOperand(1), MemsetLen, MaybeAlign(Align));
1231
1232 if (MSSAU) {
1233 assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)) &&(static_cast <bool> (isa<MemoryDef>(MSSAU->getMemorySSA
()->getMemoryAccess(MemCpy)) && "MemCpy must be a MemoryDef"
) ? void (0) : __assert_fail ("isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)) && \"MemCpy must be a MemoryDef\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp"
, 1234, __extension__ __PRETTY_FUNCTION__))
1234 "MemCpy must be a MemoryDef")(static_cast <bool> (isa<MemoryDef>(MSSAU->getMemorySSA
()->getMemoryAccess(MemCpy)) && "MemCpy must be a MemoryDef"
) ? void (0) : __assert_fail ("isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)) && \"MemCpy must be a MemoryDef\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp"
, 1234, __extension__ __PRETTY_FUNCTION__))
;
1235 // The new memset is inserted after the memcpy, but it is known that its
1236 // defining access is the memset about to be removed which immediately
1237 // precedes the memcpy.
1238 auto *LastDef =
1239 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy));
1240 auto *NewAccess = MSSAU->createMemoryAccessBefore(
1241 NewMemSet, LastDef->getDefiningAccess(), LastDef);
1242 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
1243 }
1244
1245 eraseInstruction(MemSet);
1246 return true;
1247}
1248
1249/// Determine whether the instruction has undefined content for the given Size,
1250/// either because it was freshly alloca'd or started its lifetime.
1251static bool hasUndefContents(Instruction *I, Value *Size) {
1252 if (isa<AllocaInst>(I))
1253 return true;
1254
1255 if (ConstantInt *CSize = dyn_cast<ConstantInt>(Size)) {
1256 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1257 if (II->getIntrinsicID() == Intrinsic::lifetime_start)
1258 if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0)))
1259 if (LTSize->getZExtValue() >= CSize->getZExtValue())
1260 return true;
1261 }
1262
1263 return false;
1264}
1265
1266static bool hasUndefContentsMSSA(MemorySSA *MSSA, AliasAnalysis *AA, Value *V,
1267 MemoryDef *Def, Value *Size) {
1268 if (MSSA->isLiveOnEntryDef(Def))
1269 return isa<AllocaInst>(getUnderlyingObject(V));
1270
1271 if (IntrinsicInst *II =
1272 dyn_cast_or_null<IntrinsicInst>(Def->getMemoryInst())) {
1273 if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
1274 ConstantInt *LTSize = cast<ConstantInt>(II->getArgOperand(0));
1275
1276 if (ConstantInt *CSize = dyn_cast<ConstantInt>(Size)) {
1277 if (AA->isMustAlias(V, II->getArgOperand(1)) &&
1278 LTSize->getZExtValue() >= CSize->getZExtValue())
1279 return true;
1280 }
1281
1282 // If the lifetime.start covers a whole alloca (as it almost always
1283 // does) and we're querying a pointer based on that alloca, then we know
1284 // the memory is definitely undef, regardless of how exactly we alias.
1285 // The size also doesn't matter, as an out-of-bounds access would be UB.
1286 AllocaInst *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(V));
1287 if (getUnderlyingObject(II->getArgOperand(1)) == Alloca) {
1288 const DataLayout &DL = Alloca->getModule()->getDataLayout();
1289 if (Optional<TypeSize> AllocaSize = Alloca->getAllocationSizeInBits(DL))
1290 if (*AllocaSize == LTSize->getValue() * 8)
1291 return true;
1292 }
1293 }
1294 }
1295
1296 return false;
1297}
1298
1299/// Transform memcpy to memset when its source was just memset.
1300/// In other words, turn:
1301/// \code
1302/// memset(dst1, c, dst1_size);
1303/// memcpy(dst2, dst1, dst2_size);
1304/// \endcode
1305/// into:
1306/// \code
1307/// memset(dst1, c, dst1_size);
1308/// memset(dst2, c, dst2_size);
1309/// \endcode
1310/// When dst2_size <= dst1_size.
1311bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy,
1312 MemSetInst *MemSet) {
1313 // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and
1314 // memcpying from the same address. Otherwise it is hard to reason about.
1315 if (!AA->isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource()))
1316 return false;
1317
1318 Value *MemSetSize = MemSet->getLength();
1319 Value *CopySize = MemCpy->getLength();
1320
1321 if (MemSetSize != CopySize) {
1322 // Make sure the memcpy doesn't read any more than what the memset wrote.
1323 // Don't worry about sizes larger than i64.
1324
1325 // A known memset size is required.
1326 ConstantInt *CMemSetSize = dyn_cast<ConstantInt>(MemSetSize);
1327 if (!CMemSetSize)
1328 return false;
1329
1330 // A known memcpy size is also required.
1331 ConstantInt *CCopySize = dyn_cast<ConstantInt>(CopySize);
1332 if (!CCopySize)
1333 return false;
1334 if (CCopySize->getZExtValue() > CMemSetSize->getZExtValue()) {
1335 // If the memcpy is larger than the memset, but the memory was undef prior
1336 // to the memset, we can just ignore the tail. Technically we're only
1337 // interested in the bytes from MemSetSize..CopySize here, but as we can't
1338 // easily represent this location, we use the full 0..CopySize range.
1339 MemoryLocation MemCpyLoc = MemoryLocation::getForSource(MemCpy);
1340 bool CanReduceSize = false;
1341 if (EnableMemorySSA) {
1342 MemoryUseOrDef *MemSetAccess = MSSA->getMemoryAccess(MemSet);
1343 MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess(
1344 MemSetAccess->getDefiningAccess(), MemCpyLoc);
1345 if (auto *MD = dyn_cast<MemoryDef>(Clobber))
1346 if (hasUndefContentsMSSA(MSSA, AA, MemCpy->getSource(), MD, CopySize))
1347 CanReduceSize = true;
1348 } else {
1349 MemDepResult DepInfo = MD->getPointerDependencyFrom(
1350 MemCpyLoc, true, MemSet->getIterator(), MemSet->getParent());
1351 if (DepInfo.isDef() && hasUndefContents(DepInfo.getInst(), CopySize))
1352 CanReduceSize = true;
1353 }
1354
1355 if (!CanReduceSize)
1356 return false;
1357 CopySize = MemSetSize;
1358 }
1359 }
1360
1361 IRBuilder<> Builder(MemCpy);
1362 Instruction *NewM =
1363 Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1),
1364 CopySize, MaybeAlign(MemCpy->getDestAlignment()));
1365 if (MSSAU) {
1366 auto *LastDef =
1367 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy));
1368 auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef);
1369 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
1370 }
1371
1372 return true;
1373}
1374
1375/// Perform simplification of memcpy's. If we have memcpy A
1376/// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
1377/// B to be a memcpy from X to Z (or potentially a memmove, depending on
1378/// circumstances). This allows later passes to remove the first memcpy
1379/// altogether.
1380bool MemCpyOptPass::processMemCpy(MemCpyInst *M, BasicBlock::iterator &BBI) {
1381 // We can only optimize non-volatile memcpy's.
1382 if (M->isVolatile()) return false;
1383
1384 // If the source and destination of the memcpy are the same, then zap it.
1385 if (M->getSource() == M->getDest()) {
1386 ++BBI;
1387 eraseInstruction(M);
1388 return true;
1389 }
1390
1391 // If copying from a constant, try to turn the memcpy into a memset.
1392 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource()))
1393 if (GV->isConstant() && GV->hasDefinitiveInitializer())
1394 if (Value *ByteVal = isBytewiseValue(GV->getInitializer(),
1395 M->getModule()->getDataLayout())) {
1396 IRBuilder<> Builder(M);
1397 Instruction *NewM =
1398 Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(),
1399 MaybeAlign(M->getDestAlignment()), false);
1400 if (MSSAU) {
1401 auto *LastDef =
1402 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M));
1403 auto *NewAccess =
1404 MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef);
1405 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
1406 }
1407
1408 eraseInstruction(M);
1409 ++NumCpyToSet;
1410 return true;
1411 }
1412
1413 if (EnableMemorySSA) {
1414 MemoryUseOrDef *MA = MSSA->getMemoryAccess(M);
1415 MemoryAccess *AnyClobber = MSSA->getWalker()->getClobberingMemoryAccess(MA);
1416 MemoryLocation DestLoc = MemoryLocation::getForDest(M);
1417 const MemoryAccess *DestClobber =
1418 MSSA->getWalker()->getClobberingMemoryAccess(AnyClobber, DestLoc);
1419
1420 // Try to turn a partially redundant memset + memcpy into
1421 // memcpy + smaller memset. We don't need the memcpy size for this.
1422 // The memcpy most post-dom the memset, so limit this to the same basic
1423 // block. A non-local generalization is likely not worthwhile.
1424 if (auto *MD = dyn_cast<MemoryDef>(DestClobber))
1425 if (auto *MDep = dyn_cast_or_null<MemSetInst>(MD->getMemoryInst()))
1426 if (DestClobber->getBlock() == M->getParent())
1427 if (processMemSetMemCpyDependence(M, MDep))
1428 return true;
1429
1430 MemoryAccess *SrcClobber = MSSA->getWalker()->getClobberingMemoryAccess(
1431 AnyClobber, MemoryLocation::getForSource(M));
1432
1433 // There are four possible optimizations we can do for memcpy:
1434 // a) memcpy-memcpy xform which exposes redundance for DSE.
1435 // b) call-memcpy xform for return slot optimization.
1436 // c) memcpy from freshly alloca'd space or space that has just started
1437 // its lifetime copies undefined data, and we can therefore eliminate
1438 // the memcpy in favor of the data that was already at the destination.
1439 // d) memcpy from a just-memset'd source can be turned into memset.
1440 if (auto *MD = dyn_cast<MemoryDef>(SrcClobber)) {
1441 if (Instruction *MI = MD->getMemoryInst()) {
1442 if (ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength())) {
1443 if (auto *C = dyn_cast<CallInst>(MI)) {
1444 // The memcpy must post-dom the call. Limit to the same block for
1445 // now. Additionally, we need to ensure that there are no accesses
1446 // to dest between the call and the memcpy. Accesses to src will be
1447 // checked by performCallSlotOptzn().
1448 // TODO: Support non-local call-slot optimization?
1449 if (C->getParent() == M->getParent() &&
1450 !accessedBetween(*AA, DestLoc, MD, MA)) {
1451 // FIXME: Can we pass in either of dest/src alignment here instead
1452 // of conservatively taking the minimum?
1453 Align Alignment = std::min(M->getDestAlign().valueOrOne(),
1454 M->getSourceAlign().valueOrOne());
1455 if (performCallSlotOptzn(M, M, M->getDest(), M->getSource(),
1456 CopySize->getZExtValue(), Alignment,
1457 C)) {
1458 LLVM_DEBUG(dbgs() << "Performed call slot optimization:\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "Performed call slot optimization:\n"
<< " call: " << *C << "\n" << " memcpy: "
<< *M << "\n"; } } while (false)
1459 << " call: " << *C << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "Performed call slot optimization:\n"
<< " call: " << *C << "\n" << " memcpy: "
<< *M << "\n"; } } while (false)
1460 << " memcpy: " << *M << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "Performed call slot optimization:\n"
<< " call: " << *C << "\n" << " memcpy: "
<< *M << "\n"; } } while (false)
;
1461 eraseInstruction(M);
1462 ++NumMemCpyInstr;
1463 return true;
1464 }
1465 }
1466 }
1467 }
1468 if (auto *MDep = dyn_cast<MemCpyInst>(MI))
1469 return processMemCpyMemCpyDependence(M, MDep);
1470 if (auto *MDep = dyn_cast<MemSetInst>(MI)) {
1471 if (performMemCpyToMemSetOptzn(M, MDep)) {
1472 LLVM_DEBUG(dbgs() << "Converted memcpy to memset\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "Converted memcpy to memset\n"
; } } while (false)
;
1473 eraseInstruction(M);
1474 ++NumCpyToSet;
1475 return true;
1476 }
1477 }
1478 }
1479
1480 if (hasUndefContentsMSSA(MSSA, AA, M->getSource(), MD, M->getLength())) {
1481 LLVM_DEBUG(dbgs() << "Removed memcpy from undef\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "Removed memcpy from undef\n"
; } } while (false)
;
1482 eraseInstruction(M);
1483 ++NumMemCpyInstr;
1484 return true;
1485 }
1486 }
1487 } else {
1488 MemDepResult DepInfo = MD->getDependency(M);
1489
1490 // Try to turn a partially redundant memset + memcpy into
1491 // memcpy + smaller memset. We don't need the memcpy size for this.
1492 if (DepInfo.isClobber())
1493 if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst()))
1494 if (processMemSetMemCpyDependence(M, MDep))
1495 return true;
1496
1497 // There are four possible optimizations we can do for memcpy:
1498 // a) memcpy-memcpy xform which exposes redundance for DSE.
1499 // b) call-memcpy xform for return slot optimization.
1500 // c) memcpy from freshly alloca'd space or space that has just started
1501 // its lifetime copies undefined data, and we can therefore eliminate
1502 // the memcpy in favor of the data that was already at the destination.
1503 // d) memcpy from a just-memset'd source can be turned into memset.
1504 if (ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength())) {
1505 if (DepInfo.isClobber()) {
1506 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) {
1507 // FIXME: Can we pass in either of dest/src alignment here instead
1508 // of conservatively taking the minimum?
1509 Align Alignment = std::min(M->getDestAlign().valueOrOne(),
1510 M->getSourceAlign().valueOrOne());
1511 if (performCallSlotOptzn(M, M, M->getDest(), M->getSource(),
1512 CopySize->getZExtValue(), Alignment, C)) {
1513 eraseInstruction(M);
1514 ++NumMemCpyInstr;
1515 return true;
1516 }
1517 }
1518 }
1519 }
1520
1521 MemoryLocation SrcLoc = MemoryLocation::getForSource(M);
1522 MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(
1523 SrcLoc, true, M->getIterator(), M->getParent());
1524
1525 if (SrcDepInfo.isClobber()) {
1526 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst()))
1527 return processMemCpyMemCpyDependence(M, MDep);
1528 } else if (SrcDepInfo.isDef()) {
1529 if (hasUndefContents(SrcDepInfo.getInst(), M->getLength())) {
1530 eraseInstruction(M);
1531 ++NumMemCpyInstr;
1532 return true;
1533 }
1534 }
1535
1536 if (SrcDepInfo.isClobber())
1537 if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst()))
1538 if (performMemCpyToMemSetOptzn(M, MDep)) {
1539 eraseInstruction(M);
1540 ++NumCpyToSet;
1541 return true;
1542 }
1543 }
1544
1545 return false;
1546}
1547
1548/// Transforms memmove calls to memcpy calls when the src/dst are guaranteed
1549/// not to alias.
1550bool MemCpyOptPass::processMemMove(MemMoveInst *M) {
1551 if (!TLI->has(LibFunc_memmove))
1552 return false;
1553
1554 // See if the pointers alias.
1555 if (!AA->isNoAlias(MemoryLocation::getForDest(M),
1556 MemoryLocation::getForSource(M)))
1557 return false;
1558
1559 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *Mdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: "
<< *M << "\n"; } } while (false)
1560 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: "
<< *M << "\n"; } } while (false)
;
1561
1562 // If not, then we know we can transform this.
1563 Type *ArgTys[3] = { M->getRawDest()->getType(),
1564 M->getRawSource()->getType(),
1565 M->getLength()->getType() };
1566 M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(),
1567 Intrinsic::memcpy, ArgTys));
1568
1569 // For MemorySSA nothing really changes (except that memcpy may imply stricter
1570 // aliasing guarantees).
1571
1572 // MemDep may have over conservative information about this instruction, just
1573 // conservatively flush it from the cache.
1574 if (MD)
1575 MD->removeInstruction(M);
1576
1577 ++NumMoveToCpy;
1578 return true;
1579}
1580
1581/// This is called on every byval argument in call sites.
1582bool MemCpyOptPass::processByValArgument(CallBase &CB, unsigned ArgNo) {
1583 const DataLayout &DL = CB.getCaller()->getParent()->getDataLayout();
1584 // Find out what feeds this byval argument.
1585 Value *ByValArg = CB.getArgOperand(ArgNo);
1586 Type *ByValTy = CB.getParamByValType(ArgNo);
1587 uint64_t ByValSize = DL.getTypeAllocSize(ByValTy);
1588 MemoryLocation Loc(ByValArg, LocationSize::precise(ByValSize));
1589 MemCpyInst *MDep = nullptr;
1590 if (EnableMemorySSA) {
1591 MemoryUseOrDef *CallAccess = MSSA->getMemoryAccess(&CB);
1592 if (!CallAccess)
1593 return false;
1594 MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess(
1595 CallAccess->getDefiningAccess(), Loc);
1596 if (auto *MD = dyn_cast<MemoryDef>(Clobber))
1597 MDep = dyn_cast_or_null<MemCpyInst>(MD->getMemoryInst());
1598 } else {
1599 MemDepResult DepInfo = MD->getPointerDependencyFrom(
1600 Loc, true, CB.getIterator(), CB.getParent());
1601 if (!DepInfo.isClobber())
1602 return false;
1603 MDep = dyn_cast<MemCpyInst>(DepInfo.getInst());
1604 }
1605
1606 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by
1607 // a memcpy, see if we can byval from the source of the memcpy instead of the
1608 // result.
1609 if (!MDep || MDep->isVolatile() ||
1610 ByValArg->stripPointerCasts() != MDep->getDest())
1611 return false;
1612
1613 // The length of the memcpy must be larger or equal to the size of the byval.
1614 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
1615 if (!C1 || C1->getValue().getZExtValue() < ByValSize)
1616 return false;
1617
1618 // Get the alignment of the byval. If the call doesn't specify the alignment,
1619 // then it is some target specific value that we can't know.
1620 MaybeAlign ByValAlign = CB.getParamAlign(ArgNo);
1621 if (!ByValAlign) return false;
1622
1623 // If it is greater than the memcpy, then we check to see if we can force the
1624 // source of the memcpy to the alignment we need. If we fail, we bail out.
1625 MaybeAlign MemDepAlign = MDep->getSourceAlign();
1626 if ((!MemDepAlign || *MemDepAlign < *ByValAlign) &&
1627 getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL, &CB, AC,
1628 DT) < *ByValAlign)
1629 return false;
1630
1631 // The address space of the memcpy source must match the byval argument
1632 if (MDep->getSource()->getType()->getPointerAddressSpace() !=
1633 ByValArg->getType()->getPointerAddressSpace())
1634 return false;
1635
1636 // Verify that the copied-from memory doesn't change in between the memcpy and
1637 // the byval call.
1638 // memcpy(a <- b)
1639 // *b = 42;
1640 // foo(*a)
1641 // It would be invalid to transform the second memcpy into foo(*b).
1642 if (EnableMemorySSA) {
1643 if (writtenBetween(MSSA, MemoryLocation::getForSource(MDep),
1644 MSSA->getMemoryAccess(MDep), MSSA->getMemoryAccess(&CB)))
1645 return false;
1646 } else {
1647 // NOTE: This is conservative, it will stop on any read from the source loc,
1648 // not just the defining memcpy.
1649 MemDepResult SourceDep = MD->getPointerDependencyFrom(
1650 MemoryLocation::getForSource(MDep), false,
1651 CB.getIterator(), MDep->getParent());
1652 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
1653 return false;
1654 }
1655
1656 Value *TmpCast = MDep->getSource();
1657 if (MDep->getSource()->getType() != ByValArg->getType()) {
1658 BitCastInst *TmpBitCast = new BitCastInst(MDep->getSource(), ByValArg->getType(),
1659 "tmpcast", &CB);
1660 // Set the tmpcast's DebugLoc to MDep's
1661 TmpBitCast->setDebugLoc(MDep->getDebugLoc());
1662 TmpCast = TmpBitCast;
1663 }
1664
1665 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n"
<< " " << *MDep << "\n" << " " <<
CB << "\n"; } } while (false)
1666 << " " << *MDep << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n"
<< " " << *MDep << "\n" << " " <<
CB << "\n"; } } while (false)
1667 << " " << CB << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n"
<< " " << *MDep << "\n" << " " <<
CB << "\n"; } } while (false)
;
1668
1669 // Otherwise we're good! Update the byval argument.
1670 CB.setArgOperand(ArgNo, TmpCast);
1671 ++NumMemCpyInstr;
1672 return true;
1673}
1674
1675/// Executes one iteration of MemCpyOptPass.
1676bool MemCpyOptPass::iterateOnFunction(Function &F) {
1677 bool MadeChange = false;
1678
1679 // Walk all instruction in the function.
1680 for (BasicBlock &BB : F) {
1681 // Skip unreachable blocks. For example processStore assumes that an
1682 // instruction in a BB can't be dominated by a later instruction in the
1683 // same BB (which is a scenario that can happen for an unreachable BB that
1684 // has itself as a predecessor).
1685 if (!DT->isReachableFromEntry(&BB))
17
Assuming the condition is false
18
Taking false branch
1686 continue;
1687
1688 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) {
19
Loop condition is true. Entering loop body
1689 // Avoid invalidating the iterator.
1690 Instruction *I = &*BI++;
1691
1692 bool RepeatInstruction = false;
1693
1694 if (StoreInst *SI
20.1
'SI' is non-null
20.1
'SI' is non-null
20.1
'SI' is non-null
20.1
'SI' is non-null
20.1
'SI' is non-null
20.1
'SI' is non-null
20.1
'SI' is non-null
= dyn_cast<StoreInst>(I))
20
Assuming 'I' is a 'StoreInst'
21
Taking true branch
1695 MadeChange |= processStore(SI, BI);
22
Calling 'MemCpyOptPass::processStore'
1696 else if (MemSetInst *M = dyn_cast<MemSetInst>(I))
1697 RepeatInstruction = processMemSet(M, BI);
1698 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I))
1699 RepeatInstruction = processMemCpy(M, BI);
1700 else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I))
1701 RepeatInstruction = processMemMove(M);
1702 else if (auto *CB = dyn_cast<CallBase>(I)) {
1703 for (unsigned i = 0, e = CB->arg_size(); i != e; ++i)
1704 if (CB->isByValArgument(i))
1705 MadeChange |= processByValArgument(*CB, i);
1706 }
1707
1708 // Reprocess the instruction if desired.
1709 if (RepeatInstruction) {
1710 if (BI != BB.begin())
1711 --BI;
1712 MadeChange = true;
1713 }
1714 }
1715 }
1716
1717 return MadeChange;
1718}
1719
1720PreservedAnalyses MemCpyOptPass::run(Function &F, FunctionAnalysisManager &AM) {
1721 auto *MD = !EnableMemorySSA ? &AM.getResult<MemoryDependenceAnalysis>(F)
1722 : AM.getCachedResult<MemoryDependenceAnalysis>(F);
1723 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1724 auto *AA = &AM.getResult<AAManager>(F);
1725 auto *AC = &AM.getResult<AssumptionAnalysis>(F);
1726 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1727 auto *MSSA = EnableMemorySSA ? &AM.getResult<MemorySSAAnalysis>(F)
1728 : AM.getCachedResult<MemorySSAAnalysis>(F);
1729
1730 bool MadeChange =
1731 runImpl(F, MD, &TLI, AA, AC, DT, MSSA ? &MSSA->getMSSA() : nullptr);
1732 if (!MadeChange)
1733 return PreservedAnalyses::all();
1734
1735 PreservedAnalyses PA;
1736 PA.preserveSet<CFGAnalyses>();
1737 if (MD)
1738 PA.preserve<MemoryDependenceAnalysis>();
1739 if (MSSA)
1740 PA.preserve<MemorySSAAnalysis>();
1741 return PA;
1742}
1743
1744bool MemCpyOptPass::runImpl(Function &F, MemoryDependenceResults *MD_,
1745 TargetLibraryInfo *TLI_, AliasAnalysis *AA_,
1746 AssumptionCache *AC_, DominatorTree *DT_,
1747 MemorySSA *MSSA_) {
1748 bool MadeChange = false;
1749 MD = MD_;
12
Null pointer value stored to field 'MD'
1750 TLI = TLI_;
1751 AA = AA_;
1752 AC = AC_;
1753 DT = DT_;
1754 MSSA = MSSA_;
1755 MemorySSAUpdater MSSAU_(MSSA_);
1756 MSSAU = MSSA_
12.1
'MSSA_' is null
12.1
'MSSA_' is null
12.1
'MSSA_' is null
12.1
'MSSA_' is null
12.1
'MSSA_' is null
12.1
'MSSA_' is null
12.1
'MSSA_' is null
? &MSSAU_ : nullptr;
13
'?' condition is false
1757 // If we don't have at least memset and memcpy, there is little point of doing
1758 // anything here. These are required by a freestanding implementation, so if
1759 // even they are disabled, there is no point in trying hard.
1760 if (!TLI->has(LibFunc_memset) || !TLI->has(LibFunc_memcpy))
14
Taking false branch
1761 return false;
1762
1763 while (true) {
15
Loop condition is true. Entering loop body
1764 if (!iterateOnFunction(F))
16
Calling 'MemCpyOptPass::iterateOnFunction'
1765 break;
1766 MadeChange = true;
1767 }
1768
1769 if (MSSA_ && VerifyMemorySSA)
1770 MSSA_->verifyMemorySSA();
1771
1772 MD = nullptr;
1773 return MadeChange;
1774}
1775
1776/// This is the main transformation entry point for a function.
1777bool MemCpyOptLegacyPass::runOnFunction(Function &F) {
1778 if (skipFunction(F))
1
Assuming the condition is false
2
Taking false branch
1779 return false;
1780
1781 auto *MDWP = !EnableMemorySSA
3
Assuming the condition is false
4
'?' condition is false
1782 ? &getAnalysis<MemoryDependenceWrapperPass>()
1783 : getAnalysisIfAvailable<MemoryDependenceWrapperPass>();
1784 auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
1785 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1786 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1787 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1788 auto *MSSAWP = EnableMemorySSA
5
Assuming the condition is false
6
'?' condition is false
1789 ? &getAnalysis<MemorySSAWrapperPass>()
1790 : getAnalysisIfAvailable<MemorySSAWrapperPass>();
1791
1792 return Impl.runImpl(F, MDWP ? & MDWP->getMemDep() : nullptr, TLI, AA, AC, DT,
7
Assuming 'MDWP' is null
8
'?' condition is false
10
Passing null pointer value via 2nd parameter 'MD_'
11
Calling 'MemCpyOptPass::runImpl'
1793 MSSAWP
8.1
'MSSAWP' is null
8.1
'MSSAWP' is null
8.1
'MSSAWP' is null
8.1
'MSSAWP' is null
8.1
'MSSAWP' is null
8.1
'MSSAWP' is null
8.1
'MSSAWP' is null
? &MSSAWP->getMSSA() : nullptr)
;
9
'?' condition is false
1794}

/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/None.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/StringRef.h"
25#include "llvm/ADT/Twine.h"
26#include "llvm/ADT/iterator.h"
27#include "llvm/ADT/iterator_range.h"
28#include "llvm/IR/Attributes.h"
29#include "llvm/IR/BasicBlock.h"
30#include "llvm/IR/CallingConv.h"
31#include "llvm/IR/CFG.h"
32#include "llvm/IR/Constant.h"
33#include "llvm/IR/DerivedTypes.h"
34#include "llvm/IR/Function.h"
35#include "llvm/IR/InstrTypes.h"
36#include "llvm/IR/Instruction.h"
37#include "llvm/IR/OperandTraits.h"
38#include "llvm/IR/Type.h"
39#include "llvm/IR/Use.h"
40#include "llvm/IR/User.h"
41#include "llvm/IR/Value.h"
42#include "llvm/Support/AtomicOrdering.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/ErrorHandling.h"
45#include <cassert>
46#include <cstddef>
47#include <cstdint>
48#include <iterator>
49
50namespace llvm {
51
52class APInt;
53class ConstantInt;
54class DataLayout;
55class LLVMContext;
56
57//===----------------------------------------------------------------------===//
58// AllocaInst Class
59//===----------------------------------------------------------------------===//
60
61/// an instruction to allocate memory on the stack
62class AllocaInst : public UnaryInstruction {
63 Type *AllocatedType;
64
65 using AlignmentField = AlignmentBitfieldElementT<0>;
66 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
67 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>;
68 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
69 SwiftErrorField>(),
70 "Bitfields must be contiguous");
71
72protected:
73 // Note: Instruction needs to be a friend here to call cloneImpl.
74 friend class Instruction;
75
76 AllocaInst *cloneImpl() const;
77
78public:
79 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
80 const Twine &Name, Instruction *InsertBefore);
81 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
82 const Twine &Name, BasicBlock *InsertAtEnd);
83
84 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
85 Instruction *InsertBefore);
86 AllocaInst(Type *Ty, unsigned AddrSpace,
87 const Twine &Name, BasicBlock *InsertAtEnd);
88
89 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
90 const Twine &Name = "", Instruction *InsertBefore = nullptr);
91 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
92 const Twine &Name, BasicBlock *InsertAtEnd);
93
94 /// Return true if there is an allocation size parameter to the allocation
95 /// instruction that is not 1.
96 bool isArrayAllocation() const;
97
98 /// Get the number of elements allocated. For a simple allocation of a single
99 /// element, this will return a constant 1 value.
100 const Value *getArraySize() const { return getOperand(0); }
101 Value *getArraySize() { return getOperand(0); }
102
103 /// Overload to return most specific pointer type.
104 PointerType *getType() const {
105 return cast<PointerType>(Instruction::getType());
106 }
107
108 /// Get allocation size in bits. Returns None if size can't be determined,
109 /// e.g. in case of a VLA.
110 Optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
111
112 /// Return the type that is being allocated by the instruction.
113 Type *getAllocatedType() const { return AllocatedType; }
114 /// for use only in special circumstances that need to generically
115 /// transform a whole instruction (eg: IR linking and vectorization).
116 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
117
118 /// Return the alignment of the memory that is being allocated by the
119 /// instruction.
120 Align getAlign() const {
121 return Align(1ULL << getSubclassData<AlignmentField>());
122 }
123
124 void setAlignment(Align Align) {
125 setSubclassData<AlignmentField>(Log2(Align));
126 }
127
128 // FIXME: Remove this one transition to Align is over.
129 unsigned getAlignment() const { return getAlign().value(); }
130
131 /// Return true if this alloca is in the entry block of the function and is a
132 /// constant size. If so, the code generator will fold it into the
133 /// prolog/epilog code, so it is basically free.
134 bool isStaticAlloca() const;
135
136 /// Return true if this alloca is used as an inalloca argument to a call. Such
137 /// allocas are never considered static even if they are in the entry block.
138 bool isUsedWithInAlloca() const {
139 return getSubclassData<UsedWithInAllocaField>();
140 }
141
142 /// Specify whether this alloca is used to represent the arguments to a call.
143 void setUsedWithInAlloca(bool V) {
144 setSubclassData<UsedWithInAllocaField>(V);
145 }
146
147 /// Return true if this alloca is used as a swifterror argument to a call.
148 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
149 /// Specify whether this alloca is used to represent a swifterror.
150 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
151
152 // Methods for support type inquiry through isa, cast, and dyn_cast:
153 static bool classof(const Instruction *I) {
154 return (I->getOpcode() == Instruction::Alloca);
155 }
156 static bool classof(const Value *V) {
157 return isa<Instruction>(V) && classof(cast<Instruction>(V));
158 }
159
160private:
161 // Shadow Instruction::setInstructionSubclassData with a private forwarding
162 // method so that subclasses cannot accidentally use it.
163 template <typename Bitfield>
164 void setSubclassData(typename Bitfield::Type Value) {
165 Instruction::setSubclassData<Bitfield>(Value);
166 }
167};
168
169//===----------------------------------------------------------------------===//
170// LoadInst Class
171//===----------------------------------------------------------------------===//
172
173/// An instruction for reading from memory. This uses the SubclassData field in
174/// Value to store whether or not the load is volatile.
175class LoadInst : public UnaryInstruction {
176 using VolatileField = BoolBitfieldElementT<0>;
177 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
178 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
179 static_assert(
180 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
181 "Bitfields must be contiguous");
182
183 void AssertOK();
184
185protected:
186 // Note: Instruction needs to be a friend here to call cloneImpl.
187 friend class Instruction;
188
189 LoadInst *cloneImpl() const;
190
191public:
192 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
193 Instruction *InsertBefore);
194 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
195 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
196 Instruction *InsertBefore);
197 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
198 BasicBlock *InsertAtEnd);
199 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
200 Align Align, Instruction *InsertBefore = nullptr);
201 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
202 Align Align, BasicBlock *InsertAtEnd);
203 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
204 Align Align, AtomicOrdering Order,
205 SyncScope::ID SSID = SyncScope::System,
206 Instruction *InsertBefore = nullptr);
207 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
208 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
209 BasicBlock *InsertAtEnd);
210
211 /// Return true if this is a load from a volatile memory location.
212 bool isVolatile() const { return getSubclassData<VolatileField>(); }
213
214 /// Specify whether this is a volatile load or not.
215 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
216
217 /// Return the alignment of the access that is being performed.
218 /// FIXME: Remove this function once transition to Align is over.
219 /// Use getAlign() instead.
220 unsigned getAlignment() const { return getAlign().value(); }
221
222 /// Return the alignment of the access that is being performed.
223 Align getAlign() const {
224 return Align(1ULL << (getSubclassData<AlignmentField>()));
225 }
226
227 void setAlignment(Align Align) {
228 setSubclassData<AlignmentField>(Log2(Align));
229 }
230
231 /// Returns the ordering constraint of this load instruction.
232 AtomicOrdering getOrdering() const {
233 return getSubclassData<OrderingField>();
234 }
235 /// Sets the ordering constraint of this load instruction. May not be Release
236 /// or AcquireRelease.
237 void setOrdering(AtomicOrdering Ordering) {
238 setSubclassData<OrderingField>(Ordering);
239 }
240
241 /// Returns the synchronization scope ID of this load instruction.
242 SyncScope::ID getSyncScopeID() const {
243 return SSID;
244 }
245
246 /// Sets the synchronization scope ID of this load instruction.
247 void setSyncScopeID(SyncScope::ID SSID) {
248 this->SSID = SSID;
249 }
250
251 /// Sets the ordering constraint and the synchronization scope ID of this load
252 /// instruction.
253 void setAtomic(AtomicOrdering Ordering,
254 SyncScope::ID SSID = SyncScope::System) {
255 setOrdering(Ordering);
256 setSyncScopeID(SSID);
257 }
258
259 bool isSimple() const { return !isAtomic() && !isVolatile(); }
43
Assuming the condition is true
44
Assuming the condition is true
45
Returning the value 1, which participates in a condition later
260
261 bool isUnordered() const {
262 return (getOrdering() == AtomicOrdering::NotAtomic ||
263 getOrdering() == AtomicOrdering::Unordered) &&
264 !isVolatile();
265 }
266
267 Value *getPointerOperand() { return getOperand(0); }
268 const Value *getPointerOperand() const { return getOperand(0); }
269 static unsigned getPointerOperandIndex() { return 0U; }
270 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
271
272 /// Returns the address space of the pointer operand.
273 unsigned getPointerAddressSpace() const {
274 return getPointerOperandType()->getPointerAddressSpace();
275 }
276
277 // Methods for support type inquiry through isa, cast, and dyn_cast:
278 static bool classof(const Instruction *I) {
279 return I->getOpcode() == Instruction::Load;
280 }
281 static bool classof(const Value *V) {
282 return isa<Instruction>(V) && classof(cast<Instruction>(V));
283 }
284
285private:
286 // Shadow Instruction::setInstructionSubclassData with a private forwarding
287 // method so that subclasses cannot accidentally use it.
288 template <typename Bitfield>
289 void setSubclassData(typename Bitfield::Type Value) {
290 Instruction::setSubclassData<Bitfield>(Value);
291 }
292
293 /// The synchronization scope ID of this load instruction. Not quite enough
294 /// room in SubClassData for everything, so synchronization scope ID gets its
295 /// own field.
296 SyncScope::ID SSID;
297};
298
299//===----------------------------------------------------------------------===//
300// StoreInst Class
301//===----------------------------------------------------------------------===//
302
303/// An instruction for storing to memory.
304class StoreInst : public Instruction {
305 using VolatileField = BoolBitfieldElementT<0>;
306 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
307 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
308 static_assert(
309 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
310 "Bitfields must be contiguous");
311
312 void AssertOK();
313
314protected:
315 // Note: Instruction needs to be a friend here to call cloneImpl.
316 friend class Instruction;
317
318 StoreInst *cloneImpl() const;
319
320public:
321 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
322 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
323 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
324 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
325 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
326 Instruction *InsertBefore = nullptr);
327 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
328 BasicBlock *InsertAtEnd);
329 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
330 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
331 Instruction *InsertBefore = nullptr);
332 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
333 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
334
335 // allocate space for exactly two operands
336 void *operator new(size_t S) { return User::operator new(S, 2); }
337 void operator delete(void *Ptr) { User::operator delete(Ptr); }
338
339 /// Return true if this is a store to a volatile memory location.
340 bool isVolatile() const { return getSubclassData<VolatileField>(); }
341
342 /// Specify whether this is a volatile store or not.
343 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
344
345 /// Transparently provide more efficient getOperand methods.
346 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
347
348 /// Return the alignment of the access that is being performed
349 /// FIXME: Remove this function once transition to Align is over.
350 /// Use getAlign() instead.
351 unsigned getAlignment() const { return getAlign().value(); }
352
353 Align getAlign() const {
354 return Align(1ULL << (getSubclassData<AlignmentField>()));
355 }
356
357 void setAlignment(Align Align) {
358 setSubclassData<AlignmentField>(Log2(Align));
359 }
360
361 /// Returns the ordering constraint of this store instruction.
362 AtomicOrdering getOrdering() const {
363 return getSubclassData<OrderingField>();
364 }
365
366 /// Sets the ordering constraint of this store instruction. May not be
367 /// Acquire or AcquireRelease.
368 void setOrdering(AtomicOrdering Ordering) {
369 setSubclassData<OrderingField>(Ordering);
370 }
371
372 /// Returns the synchronization scope ID of this store instruction.
373 SyncScope::ID getSyncScopeID() const {
374 return SSID;
375 }
376
377 /// Sets the synchronization scope ID of this store instruction.
378 void setSyncScopeID(SyncScope::ID SSID) {
379 this->SSID = SSID;
380 }
381
382 /// Sets the ordering constraint and the synchronization scope ID of this
383 /// store instruction.
384 void setAtomic(AtomicOrdering Ordering,
385 SyncScope::ID SSID = SyncScope::System) {
386 setOrdering(Ordering);
387 setSyncScopeID(SSID);
388 }
389
390 bool isSimple() const { return !isAtomic() && !isVolatile(); }
24
Assuming the condition is true
25
Assuming the condition is true
26
Returning the value 1, which participates in a condition later
391
392 bool isUnordered() const {
393 return (getOrdering() == AtomicOrdering::NotAtomic ||
394 getOrdering() == AtomicOrdering::Unordered) &&
395 !isVolatile();
396 }
397
398 Value *getValueOperand() { return getOperand(0); }
399 const Value *getValueOperand() const { return getOperand(0); }
400
401 Value *getPointerOperand() { return getOperand(1); }
402 const Value *getPointerOperand() const { return getOperand(1); }
403 static unsigned getPointerOperandIndex() { return 1U; }
404 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
405
406 /// Returns the address space of the pointer operand.
407 unsigned getPointerAddressSpace() const {
408 return getPointerOperandType()->getPointerAddressSpace();
409 }
410
411 // Methods for support type inquiry through isa, cast, and dyn_cast:
412 static bool classof(const Instruction *I) {
413 return I->getOpcode() == Instruction::Store;
414 }
415 static bool classof(const Value *V) {
416 return isa<Instruction>(V) && classof(cast<Instruction>(V));
417 }
418
419private:
420 // Shadow Instruction::setInstructionSubclassData with a private forwarding
421 // method so that subclasses cannot accidentally use it.
422 template <typename Bitfield>
423 void setSubclassData(typename Bitfield::Type Value) {
424 Instruction::setSubclassData<Bitfield>(Value);
425 }
426
427 /// The synchronization scope ID of this store instruction. Not quite enough
428 /// room in SubClassData for everything, so synchronization scope ID gets its
429 /// own field.
430 SyncScope::ID SSID;
431};
432
433template <>
434struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
435};
436
437DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<StoreInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 437, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<StoreInst>::op_begin(const_cast
<StoreInst*>(this))[i_nocapture].get()); } void StoreInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<StoreInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 437, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
StoreInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned StoreInst::getNumOperands() const { return OperandTraits
<StoreInst>::operands(this); } template <int Idx_nocapture
> Use &StoreInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
StoreInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
438
439//===----------------------------------------------------------------------===//
440// FenceInst Class
441//===----------------------------------------------------------------------===//
442
443/// An instruction for ordering other memory operations.
444class FenceInst : public Instruction {
445 using OrderingField = AtomicOrderingBitfieldElementT<0>;
446
447 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
448
449protected:
450 // Note: Instruction needs to be a friend here to call cloneImpl.
451 friend class Instruction;
452
453 FenceInst *cloneImpl() const;
454
455public:
456 // Ordering may only be Acquire, Release, AcquireRelease, or
457 // SequentiallyConsistent.
458 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
459 SyncScope::ID SSID = SyncScope::System,
460 Instruction *InsertBefore = nullptr);
461 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
462 BasicBlock *InsertAtEnd);
463
464 // allocate space for exactly zero operands
465 void *operator new(size_t S) { return User::operator new(S, 0); }
466 void operator delete(void *Ptr) { User::operator delete(Ptr); }
467
468 /// Returns the ordering constraint of this fence instruction.
469 AtomicOrdering getOrdering() const {
470 return getSubclassData<OrderingField>();
471 }
472
473 /// Sets the ordering constraint of this fence instruction. May only be
474 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
475 void setOrdering(AtomicOrdering Ordering) {
476 setSubclassData<OrderingField>(Ordering);
477 }
478
479 /// Returns the synchronization scope ID of this fence instruction.
480 SyncScope::ID getSyncScopeID() const {
481 return SSID;
482 }
483
484 /// Sets the synchronization scope ID of this fence instruction.
485 void setSyncScopeID(SyncScope::ID SSID) {
486 this->SSID = SSID;
487 }
488
489 // Methods for support type inquiry through isa, cast, and dyn_cast:
490 static bool classof(const Instruction *I) {
491 return I->getOpcode() == Instruction::Fence;
492 }
493 static bool classof(const Value *V) {
494 return isa<Instruction>(V) && classof(cast<Instruction>(V));
495 }
496
497private:
498 // Shadow Instruction::setInstructionSubclassData with a private forwarding
499 // method so that subclasses cannot accidentally use it.
500 template <typename Bitfield>
501 void setSubclassData(typename Bitfield::Type Value) {
502 Instruction::setSubclassData<Bitfield>(Value);
503 }
504
505 /// The synchronization scope ID of this fence instruction. Not quite enough
506 /// room in SubClassData for everything, so synchronization scope ID gets its
507 /// own field.
508 SyncScope::ID SSID;
509};
510
511//===----------------------------------------------------------------------===//
512// AtomicCmpXchgInst Class
513//===----------------------------------------------------------------------===//
514
515/// An instruction that atomically checks whether a
516/// specified value is in a memory location, and, if it is, stores a new value
517/// there. The value returned by this instruction is a pair containing the
518/// original value as first element, and an i1 indicating success (true) or
519/// failure (false) as second element.
520///
521class AtomicCmpXchgInst : public Instruction {
522 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
523 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
524 SyncScope::ID SSID);
525
526 template <unsigned Offset>
527 using AtomicOrderingBitfieldElement =
528 typename Bitfield::Element<AtomicOrdering, Offset, 3,
529 AtomicOrdering::LAST>;
530
531protected:
532 // Note: Instruction needs to be a friend here to call cloneImpl.
533 friend class Instruction;
534
535 AtomicCmpXchgInst *cloneImpl() const;
536
537public:
538 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
539 AtomicOrdering SuccessOrdering,
540 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
541 Instruction *InsertBefore = nullptr);
542 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
543 AtomicOrdering SuccessOrdering,
544 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
545 BasicBlock *InsertAtEnd);
546
547 // allocate space for exactly three operands
548 void *operator new(size_t S) { return User::operator new(S, 3); }
549 void operator delete(void *Ptr) { User::operator delete(Ptr); }
550
551 using VolatileField = BoolBitfieldElementT<0>;
552 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>;
553 using SuccessOrderingField =
554 AtomicOrderingBitfieldElementT<WeakField::NextBit>;
555 using FailureOrderingField =
556 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>;
557 using AlignmentField =
558 AlignmentBitfieldElementT<FailureOrderingField::NextBit>;
559 static_assert(
560 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField,
561 FailureOrderingField, AlignmentField>(),
562 "Bitfields must be contiguous");
563
564 /// Return the alignment of the memory that is being allocated by the
565 /// instruction.
566 Align getAlign() const {
567 return Align(1ULL << getSubclassData<AlignmentField>());
568 }
569
570 void setAlignment(Align Align) {
571 setSubclassData<AlignmentField>(Log2(Align));
572 }
573
574 /// Return true if this is a cmpxchg from a volatile memory
575 /// location.
576 ///
577 bool isVolatile() const { return getSubclassData<VolatileField>(); }
578
579 /// Specify whether this is a volatile cmpxchg.
580 ///
581 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
582
583 /// Return true if this cmpxchg may spuriously fail.
584 bool isWeak() const { return getSubclassData<WeakField>(); }
585
586 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
587
588 /// Transparently provide more efficient getOperand methods.
589 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
590
591 static bool isValidSuccessOrdering(AtomicOrdering Ordering) {
592 return Ordering != AtomicOrdering::NotAtomic &&
593 Ordering != AtomicOrdering::Unordered;
594 }
595
596 static bool isValidFailureOrdering(AtomicOrdering Ordering) {
597 return Ordering != AtomicOrdering::NotAtomic &&
598 Ordering != AtomicOrdering::Unordered &&
599 Ordering != AtomicOrdering::AcquireRelease &&
600 Ordering != AtomicOrdering::Release;
601 }
602
603 /// Returns the success ordering constraint of this cmpxchg instruction.
604 AtomicOrdering getSuccessOrdering() const {
605 return getSubclassData<SuccessOrderingField>();
606 }
607
608 /// Sets the success ordering constraint of this cmpxchg instruction.
609 void setSuccessOrdering(AtomicOrdering Ordering) {
610 assert(isValidSuccessOrdering(Ordering) &&(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 611, __extension__ __PRETTY_FUNCTION__))
611 "invalid CmpXchg success ordering")(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 611, __extension__ __PRETTY_FUNCTION__))
;
612 setSubclassData<SuccessOrderingField>(Ordering);
613 }
614
615 /// Returns the failure ordering constraint of this cmpxchg instruction.
616 AtomicOrdering getFailureOrdering() const {
617 return getSubclassData<FailureOrderingField>();
618 }
619
620 /// Sets the failure ordering constraint of this cmpxchg instruction.
621 void setFailureOrdering(AtomicOrdering Ordering) {
622 assert(isValidFailureOrdering(Ordering) &&(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 623, __extension__ __PRETTY_FUNCTION__))
623 "invalid CmpXchg failure ordering")(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 623, __extension__ __PRETTY_FUNCTION__))
;
624 setSubclassData<FailureOrderingField>(Ordering);
625 }
626
627 /// Returns a single ordering which is at least as strong as both the
628 /// success and failure orderings for this cmpxchg.
629 AtomicOrdering getMergedOrdering() const {
630 if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent)
631 return AtomicOrdering::SequentiallyConsistent;
632 if (getFailureOrdering() == AtomicOrdering::Acquire) {
633 if (getSuccessOrdering() == AtomicOrdering::Monotonic)
634 return AtomicOrdering::Acquire;
635 if (getSuccessOrdering() == AtomicOrdering::Release)
636 return AtomicOrdering::AcquireRelease;
637 }
638 return getSuccessOrdering();
639 }
640
641 /// Returns the synchronization scope ID of this cmpxchg instruction.
642 SyncScope::ID getSyncScopeID() const {
643 return SSID;
644 }
645
646 /// Sets the synchronization scope ID of this cmpxchg instruction.
647 void setSyncScopeID(SyncScope::ID SSID) {
648 this->SSID = SSID;
649 }
650
651 Value *getPointerOperand() { return getOperand(0); }
652 const Value *getPointerOperand() const { return getOperand(0); }
653 static unsigned getPointerOperandIndex() { return 0U; }
654
655 Value *getCompareOperand() { return getOperand(1); }
656 const Value *getCompareOperand() const { return getOperand(1); }
657
658 Value *getNewValOperand() { return getOperand(2); }
659 const Value *getNewValOperand() const { return getOperand(2); }
660
661 /// Returns the address space of the pointer operand.
662 unsigned getPointerAddressSpace() const {
663 return getPointerOperand()->getType()->getPointerAddressSpace();
664 }
665
666 /// Returns the strongest permitted ordering on failure, given the
667 /// desired ordering on success.
668 ///
669 /// If the comparison in a cmpxchg operation fails, there is no atomic store
670 /// so release semantics cannot be provided. So this function drops explicit
671 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
672 /// operation would remain SequentiallyConsistent.
673 static AtomicOrdering
674 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
675 switch (SuccessOrdering) {
676 default:
677 llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 677)
;
678 case AtomicOrdering::Release:
679 case AtomicOrdering::Monotonic:
680 return AtomicOrdering::Monotonic;
681 case AtomicOrdering::AcquireRelease:
682 case AtomicOrdering::Acquire:
683 return AtomicOrdering::Acquire;
684 case AtomicOrdering::SequentiallyConsistent:
685 return AtomicOrdering::SequentiallyConsistent;
686 }
687 }
688
689 // Methods for support type inquiry through isa, cast, and dyn_cast:
690 static bool classof(const Instruction *I) {
691 return I->getOpcode() == Instruction::AtomicCmpXchg;
692 }
693 static bool classof(const Value *V) {
694 return isa<Instruction>(V) && classof(cast<Instruction>(V));
695 }
696
697private:
698 // Shadow Instruction::setInstructionSubclassData with a private forwarding
699 // method so that subclasses cannot accidentally use it.
700 template <typename Bitfield>
701 void setSubclassData(typename Bitfield::Type Value) {
702 Instruction::setSubclassData<Bitfield>(Value);
703 }
704
705 /// The synchronization scope ID of this cmpxchg instruction. Not quite
706 /// enough room in SubClassData for everything, so synchronization scope ID
707 /// gets its own field.
708 SyncScope::ID SSID;
709};
710
711template <>
712struct OperandTraits<AtomicCmpXchgInst> :
713 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
714};
715
716DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 716, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<AtomicCmpXchgInst>::op_begin
(const_cast<AtomicCmpXchgInst*>(this))[i_nocapture].get
()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 716, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
AtomicCmpXchgInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned AtomicCmpXchgInst::getNumOperands() const { return
OperandTraits<AtomicCmpXchgInst>::operands(this); } template
<int Idx_nocapture> Use &AtomicCmpXchgInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &AtomicCmpXchgInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
717
718//===----------------------------------------------------------------------===//
719// AtomicRMWInst Class
720//===----------------------------------------------------------------------===//
721
722/// an instruction that atomically reads a memory location,
723/// combines it with another value, and then stores the result back. Returns
724/// the old value.
725///
726class AtomicRMWInst : public Instruction {
727protected:
728 // Note: Instruction needs to be a friend here to call cloneImpl.
729 friend class Instruction;
730
731 AtomicRMWInst *cloneImpl() const;
732
733public:
734 /// This enumeration lists the possible modifications atomicrmw can make. In
735 /// the descriptions, 'p' is the pointer to the instruction's memory location,
736 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
737 /// instruction. These instructions always return 'old'.
738 enum BinOp : unsigned {
739 /// *p = v
740 Xchg,
741 /// *p = old + v
742 Add,
743 /// *p = old - v
744 Sub,
745 /// *p = old & v
746 And,
747 /// *p = ~(old & v)
748 Nand,
749 /// *p = old | v
750 Or,
751 /// *p = old ^ v
752 Xor,
753 /// *p = old >signed v ? old : v
754 Max,
755 /// *p = old <signed v ? old : v
756 Min,
757 /// *p = old >unsigned v ? old : v
758 UMax,
759 /// *p = old <unsigned v ? old : v
760 UMin,
761
762 /// *p = old + v
763 FAdd,
764
765 /// *p = old - v
766 FSub,
767
768 FIRST_BINOP = Xchg,
769 LAST_BINOP = FSub,
770 BAD_BINOP
771 };
772
773private:
774 template <unsigned Offset>
775 using AtomicOrderingBitfieldElement =
776 typename Bitfield::Element<AtomicOrdering, Offset, 3,
777 AtomicOrdering::LAST>;
778
779 template <unsigned Offset>
780 using BinOpBitfieldElement =
781 typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>;
782
783public:
784 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
785 AtomicOrdering Ordering, SyncScope::ID SSID,
786 Instruction *InsertBefore = nullptr);
787 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
788 AtomicOrdering Ordering, SyncScope::ID SSID,
789 BasicBlock *InsertAtEnd);
790
791 // allocate space for exactly two operands
792 void *operator new(size_t S) { return User::operator new(S, 2); }
793 void operator delete(void *Ptr) { User::operator delete(Ptr); }
794
795 using VolatileField = BoolBitfieldElementT<0>;
796 using AtomicOrderingField =
797 AtomicOrderingBitfieldElementT<VolatileField::NextBit>;
798 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
799 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>;
800 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField,
801 OperationField, AlignmentField>(),
802 "Bitfields must be contiguous");
803
804 BinOp getOperation() const { return getSubclassData<OperationField>(); }
805
806 static StringRef getOperationName(BinOp Op);
807
808 static bool isFPOperation(BinOp Op) {
809 switch (Op) {
810 case AtomicRMWInst::FAdd:
811 case AtomicRMWInst::FSub:
812 return true;
813 default:
814 return false;
815 }
816 }
817
818 void setOperation(BinOp Operation) {
819 setSubclassData<OperationField>(Operation);
820 }
821
822 /// Return the alignment of the memory that is being allocated by the
823 /// instruction.
824 Align getAlign() const {
825 return Align(1ULL << getSubclassData<AlignmentField>());
826 }
827
828 void setAlignment(Align Align) {
829 setSubclassData<AlignmentField>(Log2(Align));
830 }
831
832 /// Return true if this is a RMW on a volatile memory location.
833 ///
834 bool isVolatile() const { return getSubclassData<VolatileField>(); }
835
836 /// Specify whether this is a volatile RMW or not.
837 ///
838 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
839
840 /// Transparently provide more efficient getOperand methods.
841 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
842
843 /// Returns the ordering constraint of this rmw instruction.
844 AtomicOrdering getOrdering() const {
845 return getSubclassData<AtomicOrderingField>();
846 }
847
848 /// Sets the ordering constraint of this rmw instruction.
849 void setOrdering(AtomicOrdering Ordering) {
850 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 851, __extension__ __PRETTY_FUNCTION__))
851 "atomicrmw instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 851, __extension__ __PRETTY_FUNCTION__))
;
852 setSubclassData<AtomicOrderingField>(Ordering);
853 }
854
855 /// Returns the synchronization scope ID of this rmw instruction.
856 SyncScope::ID getSyncScopeID() const {
857 return SSID;
858 }
859
860 /// Sets the synchronization scope ID of this rmw instruction.
861 void setSyncScopeID(SyncScope::ID SSID) {
862 this->SSID = SSID;
863 }
864
865 Value *getPointerOperand() { return getOperand(0); }
866 const Value *getPointerOperand() const { return getOperand(0); }
867 static unsigned getPointerOperandIndex() { return 0U; }
868
869 Value *getValOperand() { return getOperand(1); }
870 const Value *getValOperand() const { return getOperand(1); }
871
872 /// Returns the address space of the pointer operand.
873 unsigned getPointerAddressSpace() const {
874 return getPointerOperand()->getType()->getPointerAddressSpace();
875 }
876
877 bool isFloatingPointOperation() const {
878 return isFPOperation(getOperation());
879 }
880
881 // Methods for support type inquiry through isa, cast, and dyn_cast:
882 static bool classof(const Instruction *I) {
883 return I->getOpcode() == Instruction::AtomicRMW;
884 }
885 static bool classof(const Value *V) {
886 return isa<Instruction>(V) && classof(cast<Instruction>(V));
887 }
888
889private:
890 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
891 AtomicOrdering Ordering, SyncScope::ID SSID);
892
893 // Shadow Instruction::setInstructionSubclassData with a private forwarding
894 // method so that subclasses cannot accidentally use it.
895 template <typename Bitfield>
896 void setSubclassData(typename Bitfield::Type Value) {
897 Instruction::setSubclassData<Bitfield>(Value);
898 }
899
900 /// The synchronization scope ID of this rmw instruction. Not quite enough
901 /// room in SubClassData for everything, so synchronization scope ID gets its
902 /// own field.
903 SyncScope::ID SSID;
904};
905
906template <>
907struct OperandTraits<AtomicRMWInst>
908 : public FixedNumOperandTraits<AtomicRMWInst,2> {
909};
910
911DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 911, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<AtomicRMWInst>::op_begin(const_cast
<AtomicRMWInst*>(this))[i_nocapture].get()); } void AtomicRMWInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<AtomicRMWInst
>::operands(this) && "setOperand() out of range!")
? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 911, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
AtomicRMWInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned AtomicRMWInst::getNumOperands() const { return OperandTraits
<AtomicRMWInst>::operands(this); } template <int Idx_nocapture
> Use &AtomicRMWInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &AtomicRMWInst::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
912
913//===----------------------------------------------------------------------===//
914// GetElementPtrInst Class
915//===----------------------------------------------------------------------===//
916
917// checkGEPType - Simple wrapper function to give a better assertion failure
918// message on bad indexes for a gep instruction.
919//
920inline Type *checkGEPType(Type *Ty) {
921 assert(Ty && "Invalid GetElementPtrInst indices for type!")(static_cast <bool> (Ty && "Invalid GetElementPtrInst indices for type!"
) ? void (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 921, __extension__ __PRETTY_FUNCTION__))
;
922 return Ty;
923}
924
925/// an instruction for type-safe pointer arithmetic to
926/// access elements of arrays and structs
927///
928class GetElementPtrInst : public Instruction {
929 Type *SourceElementType;
930 Type *ResultElementType;
931
932 GetElementPtrInst(const GetElementPtrInst &GEPI);
933
934 /// Constructors - Create a getelementptr instruction with a base pointer an
935 /// list of indices. The first ctor can optionally insert before an existing
936 /// instruction, the second appends the new instruction to the specified
937 /// BasicBlock.
938 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
939 ArrayRef<Value *> IdxList, unsigned Values,
940 const Twine &NameStr, Instruction *InsertBefore);
941 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
942 ArrayRef<Value *> IdxList, unsigned Values,
943 const Twine &NameStr, BasicBlock *InsertAtEnd);
944
945 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
946
947protected:
948 // Note: Instruction needs to be a friend here to call cloneImpl.
949 friend class Instruction;
950
951 GetElementPtrInst *cloneImpl() const;
952
953public:
954 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
955 ArrayRef<Value *> IdxList,
956 const Twine &NameStr = "",
957 Instruction *InsertBefore = nullptr) {
958 unsigned Values = 1 + unsigned(IdxList.size());
959 assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type"
) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 959, __extension__ __PRETTY_FUNCTION__))
;
960 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 961, __extension__ __PRETTY_FUNCTION__))
961 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 961, __extension__ __PRETTY_FUNCTION__))
;
962 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
963 NameStr, InsertBefore);
964 }
965
966 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
967 ArrayRef<Value *> IdxList,
968 const Twine &NameStr,
969 BasicBlock *InsertAtEnd) {
970 unsigned Values = 1 + unsigned(IdxList.size());
971 assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type"
) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 971, __extension__ __PRETTY_FUNCTION__))
;
972 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 973, __extension__ __PRETTY_FUNCTION__))
973 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 973, __extension__ __PRETTY_FUNCTION__))
;
974 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
975 NameStr, InsertAtEnd);
976 }
977
978 LLVM_ATTRIBUTE_DEPRECATED(static GetElementPtrInst *CreateInBounds([[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr = "", Instruction
*InsertBefore = nullptr)
979 Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr = "",[[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr = "", Instruction
*InsertBefore = nullptr)
980 Instruction *InsertBefore = nullptr),[[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr = "", Instruction
*InsertBefore = nullptr)
981 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr = "", Instruction
*InsertBefore = nullptr)
{
982 return CreateInBounds(
983 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, IdxList,
984 NameStr, InsertBefore);
985 }
986
987 /// Create an "inbounds" getelementptr. See the documentation for the
988 /// "inbounds" flag in LangRef.html for details.
989 static GetElementPtrInst *
990 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
991 const Twine &NameStr = "",
992 Instruction *InsertBefore = nullptr) {
993 GetElementPtrInst *GEP =
994 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
995 GEP->setIsInBounds(true);
996 return GEP;
997 }
998
999 LLVM_ATTRIBUTE_DEPRECATED(static GetElementPtrInst *CreateInBounds([[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr, BasicBlock
*InsertAtEnd)
1000 Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr,[[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr, BasicBlock
*InsertAtEnd)
1001 BasicBlock *InsertAtEnd),[[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr, BasicBlock
*InsertAtEnd)
1002 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] static GetElementPtrInst *CreateInBounds( Value *Ptr, ArrayRef
<Value *> IdxList, const Twine &NameStr, BasicBlock
*InsertAtEnd)
{
1003 return CreateInBounds(
1004 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, IdxList,
1005 NameStr, InsertAtEnd);
1006 }
1007
1008 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
1009 ArrayRef<Value *> IdxList,
1010 const Twine &NameStr,
1011 BasicBlock *InsertAtEnd) {
1012 GetElementPtrInst *GEP =
1013 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
1014 GEP->setIsInBounds(true);
1015 return GEP;
1016 }
1017
1018 /// Transparently provide more efficient getOperand methods.
1019 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1020
1021 Type *getSourceElementType() const { return SourceElementType; }
1022
1023 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1024 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1025
1026 Type *getResultElementType() const {
1027 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 1028, __extension__ __PRETTY_FUNCTION__))
1028 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 1028, __extension__ __PRETTY_FUNCTION__))
;
1029 return ResultElementType;
1030 }
1031
1032 /// Returns the address space of this instruction's pointer type.
1033 unsigned getAddressSpace() const {
1034 // Note that this is always the same as the pointer operand's address space
1035 // and that is cheaper to compute, so cheat here.
1036 return getPointerAddressSpace();
1037 }
1038
1039 /// Returns the result type of a getelementptr with the given source
1040 /// element type and indexes.
1041 ///
1042 /// Null is returned if the indices are invalid for the specified
1043 /// source element type.
1044 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1045 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1046 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1047
1048 /// Return the type of the element at the given index of an indexable
1049 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1050 ///
1051 /// Returns null if the type can't be indexed, or the given index is not
1052 /// legal for the given type.
1053 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1054 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1055
1056 inline op_iterator idx_begin() { return op_begin()+1; }
1057 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1058 inline op_iterator idx_end() { return op_end(); }
1059 inline const_op_iterator idx_end() const { return op_end(); }
1060
1061 inline iterator_range<op_iterator> indices() {
1062 return make_range(idx_begin(), idx_end());
1063 }
1064
1065 inline iterator_range<const_op_iterator> indices() const {
1066 return make_range(idx_begin(), idx_end());
1067 }
1068
1069 Value *getPointerOperand() {
1070 return getOperand(0);
1071 }
1072 const Value *getPointerOperand() const {
1073 return getOperand(0);
1074 }
1075 static unsigned getPointerOperandIndex() {
1076 return 0U; // get index for modifying correct operand.
1077 }
1078
1079 /// Method to return the pointer operand as a
1080 /// PointerType.
1081 Type *getPointerOperandType() const {
1082 return getPointerOperand()->getType();
1083 }
1084
1085 /// Returns the address space of the pointer operand.
1086 unsigned getPointerAddressSpace() const {
1087 return getPointerOperandType()->getPointerAddressSpace();
1088 }
1089
1090 /// Returns the pointer type returned by the GEP
1091 /// instruction, which may be a vector of pointers.
1092 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1093 ArrayRef<Value *> IdxList) {
1094 PointerType *OrigPtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1095 unsigned AddrSpace = OrigPtrTy->getAddressSpace();
1096 Type *ResultElemTy = checkGEPType(getIndexedType(ElTy, IdxList));
1097 Type *PtrTy = OrigPtrTy->isOpaque()
1098 ? PointerType::get(OrigPtrTy->getContext(), AddrSpace)
1099 : PointerType::get(ResultElemTy, AddrSpace);
1100 // Vector GEP
1101 if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
1102 ElementCount EltCount = PtrVTy->getElementCount();
1103 return VectorType::get(PtrTy, EltCount);
1104 }
1105 for (Value *Index : IdxList)
1106 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1107 ElementCount EltCount = IndexVTy->getElementCount();
1108 return VectorType::get(PtrTy, EltCount);
1109 }
1110 // Scalar GEP
1111 return PtrTy;
1112 }
1113
1114 unsigned getNumIndices() const { // Note: always non-negative
1115 return getNumOperands() - 1;
1116 }
1117
1118 bool hasIndices() const {
1119 return getNumOperands() > 1;
1120 }
1121
1122 /// Return true if all of the indices of this GEP are
1123 /// zeros. If so, the result pointer and the first operand have the same
1124 /// value, just potentially different types.
1125 bool hasAllZeroIndices() const;
1126
1127 /// Return true if all of the indices of this GEP are
1128 /// constant integers. If so, the result pointer and the first operand have
1129 /// a constant offset between them.
1130 bool hasAllConstantIndices() const;
1131
1132 /// Set or clear the inbounds flag on this GEP instruction.
1133 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1134 void setIsInBounds(bool b = true);
1135
1136 /// Determine whether the GEP has the inbounds flag.
1137 bool isInBounds() const;
1138
1139 /// Accumulate the constant address offset of this GEP if possible.
1140 ///
1141 /// This routine accepts an APInt into which it will accumulate the constant
1142 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1143 /// all-constant, it returns false and the value of the offset APInt is
1144 /// undefined (it is *not* preserved!). The APInt passed into this routine
1145 /// must be at least as wide as the IntPtr type for the address space of
1146 /// the base GEP pointer.
1147 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1148 bool collectOffset(const DataLayout &DL, unsigned BitWidth,
1149 MapVector<Value *, APInt> &VariableOffsets,
1150 APInt &ConstantOffset) const;
1151 // Methods for support type inquiry through isa, cast, and dyn_cast:
1152 static bool classof(const Instruction *I) {
1153 return (I->getOpcode() == Instruction::GetElementPtr);
1154 }
1155 static bool classof(const Value *V) {
1156 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1157 }
1158};
1159
1160template <>
1161struct OperandTraits<GetElementPtrInst> :
1162 public VariadicOperandTraits<GetElementPtrInst, 1> {
1163};
1164
1165GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1166 ArrayRef<Value *> IdxList, unsigned Values,
1167 const Twine &NameStr,
1168 Instruction *InsertBefore)
1169 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1170 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1171 Values, InsertBefore),
1172 SourceElementType(PointeeType),
1173 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1174 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 1175, __extension__ __PRETTY_FUNCTION__))
1175 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 1175, __extension__ __PRETTY_FUNCTION__))
;
1176 init(Ptr, IdxList, NameStr);
1177}
1178
1179GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1180 ArrayRef<Value *> IdxList, unsigned Values,
1181 const Twine &NameStr,
1182 BasicBlock *InsertAtEnd)
1183 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1184 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1185 Values, InsertAtEnd),
1186 SourceElementType(PointeeType),
1187 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1188 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 1189, __extension__ __PRETTY_FUNCTION__))
1189 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 1189, __extension__ __PRETTY_FUNCTION__))
;
1190 init(Ptr, IdxList, NameStr);
1191}
1192
1193DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<GetElementPtrInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 1193, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<GetElementPtrInst>::op_begin
(const_cast<GetElementPtrInst*>(this))[i_nocapture].get
()); } void GetElementPtrInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<GetElementPtrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 1193, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
GetElementPtrInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned GetElementPtrInst::getNumOperands() const { return
OperandTraits<GetElementPtrInst>::operands(this); } template
<int Idx_nocapture> Use &GetElementPtrInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &GetElementPtrInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1194
1195//===----------------------------------------------------------------------===//
1196// ICmpInst Class
1197//===----------------------------------------------------------------------===//
1198
1199/// This instruction compares its operands according to the predicate given
1200/// to the constructor. It only operates on integers or pointers. The operands
1201/// must be identical types.
1202/// Represent an integer comparison operator.
1203class ICmpInst: public CmpInst {
1204 void AssertOK() {
1205 assert(isIntPredicate() &&(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 1206, __extension__ __PRETTY_FUNCTION__))
1206 "Invalid ICmp predicate value")(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 1206, __extension__ __PRETTY_FUNCTION__))
;
1207 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 1208, __extension__ __PRETTY_FUNCTION__))
1208 "Both operands to ICmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 1208, __extension__ __PRETTY_FUNCTION__))
;
1209 // Check that the operands are the right type
1210 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 1212, __extension__ __PRETTY_FUNCTION__))
1211 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 1212, __extension__ __PRETTY_FUNCTION__))
1212 "Invalid operand types for ICmp instruction")(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 1212, __extension__ __PRETTY_FUNCTION__))
;
1213 }
1214
1215protected:
1216 // Note: Instruction needs to be a friend here to call cloneImpl.
1217 friend class Instruction;
1218
1219 /// Clone an identical ICmpInst
1220 ICmpInst *cloneImpl() const;
1221
1222public:
1223 /// Constructor with insert-before-instruction semantics.
1224 ICmpInst(
1225 Instruction *InsertBefore, ///< Where to insert
1226 Predicate pred, ///< The predicate to use for the comparison
1227 Value *LHS, ///< The left-hand-side of the expression
1228 Value *RHS, ///< The right-hand-side of the expression
1229 const Twine &NameStr = "" ///< Name of the instruction
1230 ) : CmpInst(makeCmpResultType(LHS->getType()),
1231 Instruction::ICmp, pred, LHS, RHS, NameStr,
1232 InsertBefore) {
1233#ifndef NDEBUG
1234 AssertOK();
1235#endif
1236 }
1237
1238 /// Constructor with insert-at-end semantics.
1239 ICmpInst(
1240 BasicBlock &InsertAtEnd, ///< Block to insert into.
1241 Predicate pred, ///< The predicate to use for the comparison
1242 Value *LHS, ///< The left-hand-side of the expression
1243 Value *RHS, ///< The right-hand-side of the expression
1244 const Twine &NameStr = "" ///< Name of the instruction
1245 ) : CmpInst(makeCmpResultType(LHS->getType()),
1246 Instruction::ICmp, pred, LHS, RHS, NameStr,
1247 &InsertAtEnd) {
1248#ifndef NDEBUG
1249 AssertOK();
1250#endif
1251 }
1252
1253 /// Constructor with no-insertion semantics
1254 ICmpInst(
1255 Predicate pred, ///< The predicate to use for the comparison
1256 Value *LHS, ///< The left-hand-side of the expression
1257 Value *RHS, ///< The right-hand-side of the expression
1258 const Twine &NameStr = "" ///< Name of the instruction
1259 ) : CmpInst(makeCmpResultType(LHS->getType()),
1260 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1261#ifndef NDEBUG
1262 AssertOK();
1263#endif
1264 }
1265
1266 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1267 /// @returns the predicate that would be the result if the operand were
1268 /// regarded as signed.
1269 /// Return the signed version of the predicate
1270 Predicate getSignedPredicate() const {
1271 return getSignedPredicate(getPredicate());
1272 }
1273
1274 /// This is a static version that you can use without an instruction.
1275 /// Return the signed version of the predicate.
1276 static Predicate getSignedPredicate(Predicate pred);
1277
1278 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1279 /// @returns the predicate that would be the result if the operand were
1280 /// regarded as unsigned.
1281 /// Return the unsigned version of the predicate
1282 Predicate getUnsignedPredicate() const {
1283 return getUnsignedPredicate(getPredicate());
1284 }
1285
1286 /// This is a static version that you can use without an instruction.
1287 /// Return the unsigned version of the predicate.
1288 static Predicate getUnsignedPredicate(Predicate pred);
1289
1290 /// Return true if this predicate is either EQ or NE. This also
1291 /// tests for commutativity.
1292 static bool isEquality(Predicate P) {
1293 return P == ICMP_EQ || P == ICMP_NE;
1294 }
1295
1296 /// Return true if this predicate is either EQ or NE. This also
1297 /// tests for commutativity.
1298 bool isEquality() const {
1299 return isEquality(getPredicate());
1300 }
1301
1302 /// @returns true if the predicate of this ICmpInst is commutative
1303 /// Determine if this relation is commutative.
1304 bool isCommutative() const { return isEquality(); }
1305
1306 /// Return true if the predicate is relational (not EQ or NE).
1307 ///
1308 bool isRelational() const {
1309 return !isEquality();
1310 }
1311
1312 /// Return true if the predicate is relational (not EQ or NE).
1313 ///
1314 static bool isRelational(Predicate P) {
1315 return !isEquality(P);
1316 }
1317
1318 /// Return true if the predicate is SGT or UGT.
1319 ///
1320 static bool isGT(Predicate P) {
1321 return P == ICMP_SGT || P == ICMP_UGT;
1322 }
1323
1324 /// Return true if the predicate is SLT or ULT.
1325 ///
1326 static bool isLT(Predicate P) {
1327 return P == ICMP_SLT || P == ICMP_ULT;
1328 }
1329
1330 /// Return true if the predicate is SGE or UGE.
1331 ///
1332 static bool isGE(Predicate P) {
1333 return P == ICMP_SGE || P == ICMP_UGE;
1334 }
1335
1336 /// Return true if the predicate is SLE or ULE.
1337 ///
1338 static bool isLE(Predicate P) {
1339 return P == ICMP_SLE || P == ICMP_ULE;
1340 }
1341
1342 /// Exchange the two operands to this instruction in such a way that it does
1343 /// not modify the semantics of the instruction. The predicate value may be
1344 /// changed to retain the same result if the predicate is order dependent
1345 /// (e.g. ult).
1346 /// Swap operands and adjust predicate.
1347 void swapOperands() {
1348 setPredicate(getSwappedPredicate());
1349 Op<0>().swap(Op<1>());
1350 }
1351
1352 // Methods for support type inquiry through isa, cast, and dyn_cast:
1353 static bool classof(const Instruction *I) {
1354 return I->getOpcode() == Instruction::ICmp;
1355 }
1356 static bool classof(const Value *V) {
1357 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1358 }
1359};
1360
1361//===----------------------------------------------------------------------===//
1362// FCmpInst Class
1363//===----------------------------------------------------------------------===//
1364
1365/// This instruction compares its operands according to the predicate given
1366/// to the constructor. It only operates on floating point values or packed
1367/// vectors of floating point values. The operands must be identical types.
1368/// Represents a floating point comparison operator.
1369class FCmpInst: public CmpInst {
1370 void AssertOK() {
1371 assert(isFPPredicate() && "Invalid FCmp predicate value")(static_cast <bool> (isFPPredicate() && "Invalid FCmp predicate value"
) ? void (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 1371, __extension__ __PRETTY_FUNCTION__))
;
1372 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 1373, __extension__ __PRETTY_FUNCTION__))
1373 "Both operands to FCmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 1373, __extension__ __PRETTY_FUNCTION__))
;
1374 // Check that the operands are the right type
1375 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 1376, __extension__ __PRETTY_FUNCTION__))
1376 "Invalid operand types for FCmp instruction")(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 1376, __extension__ __PRETTY_FUNCTION__))
;
1377 }
1378
1379protected:
1380 // Note: Instruction needs to be a friend here to call cloneImpl.
1381 friend class Instruction;
1382
1383 /// Clone an identical FCmpInst
1384 FCmpInst *cloneImpl() const;
1385
1386public:
1387 /// Constructor with insert-before-instruction semantics.
1388 FCmpInst(
1389 Instruction *InsertBefore, ///< Where to insert
1390 Predicate pred, ///< The predicate to use for the comparison
1391 Value *LHS, ///< The left-hand-side of the expression
1392 Value *RHS, ///< The right-hand-side of the expression
1393 const Twine &NameStr = "" ///< Name of the instruction
1394 ) : CmpInst(makeCmpResultType(LHS->getType()),
1395 Instruction::FCmp, pred, LHS, RHS, NameStr,
1396 InsertBefore) {
1397 AssertOK();
1398 }
1399
1400 /// Constructor with insert-at-end semantics.
1401 FCmpInst(
1402 BasicBlock &InsertAtEnd, ///< Block to insert into.
1403 Predicate pred, ///< The predicate to use for the comparison
1404 Value *LHS, ///< The left-hand-side of the expression
1405 Value *RHS, ///< The right-hand-side of the expression
1406 const Twine &NameStr = "" ///< Name of the instruction
1407 ) : CmpInst(makeCmpResultType(LHS->getType()),
1408 Instruction::FCmp, pred, LHS, RHS, NameStr,
1409 &InsertAtEnd) {
1410 AssertOK();
1411 }
1412
1413 /// Constructor with no-insertion semantics
1414 FCmpInst(
1415 Predicate Pred, ///< The predicate to use for the comparison
1416 Value *LHS, ///< The left-hand-side of the expression
1417 Value *RHS, ///< The right-hand-side of the expression
1418 const Twine &NameStr = "", ///< Name of the instruction
1419 Instruction *FlagsSource = nullptr
1420 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1421 RHS, NameStr, nullptr, FlagsSource) {
1422 AssertOK();
1423 }
1424
1425 /// @returns true if the predicate of this instruction is EQ or NE.
1426 /// Determine if this is an equality predicate.
1427 static bool isEquality(Predicate Pred) {
1428 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1429 Pred == FCMP_UNE;
1430 }
1431
1432 /// @returns true if the predicate of this instruction is EQ or NE.
1433 /// Determine if this is an equality predicate.
1434 bool isEquality() const { return isEquality(getPredicate()); }
1435
1436 /// @returns true if the predicate of this instruction is commutative.
1437 /// Determine if this is a commutative predicate.
1438 bool isCommutative() const {
1439 return isEquality() ||
1440 getPredicate() == FCMP_FALSE ||
1441 getPredicate() == FCMP_TRUE ||
1442 getPredicate() == FCMP_ORD ||
1443 getPredicate() == FCMP_UNO;
1444 }
1445
1446 /// @returns true if the predicate is relational (not EQ or NE).
1447 /// Determine if this a relational predicate.
1448 bool isRelational() const { return !isEquality(); }
1449
1450 /// Exchange the two operands to this instruction in such a way that it does
1451 /// not modify the semantics of the instruction. The predicate value may be
1452 /// changed to retain the same result if the predicate is order dependent
1453 /// (e.g. ult).
1454 /// Swap operands and adjust predicate.
1455 void swapOperands() {
1456 setPredicate(getSwappedPredicate());
1457 Op<0>().swap(Op<1>());
1458 }
1459
1460 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1461 static bool classof(const Instruction *I) {
1462 return I->getOpcode() == Instruction::FCmp;
1463 }
1464 static bool classof(const Value *V) {
1465 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1466 }
1467};
1468
1469//===----------------------------------------------------------------------===//
1470/// This class represents a function call, abstracting a target
1471/// machine's calling convention. This class uses low bit of the SubClassData
1472/// field to indicate whether or not this is a tail call. The rest of the bits
1473/// hold the calling convention of the call.
1474///
1475class CallInst : public CallBase {
1476 CallInst(const CallInst &CI);
1477
1478 /// Construct a CallInst given a range of arguments.
1479 /// Construct a CallInst from a range of arguments
1480 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1481 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1482 Instruction *InsertBefore);
1483
1484 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1485 const Twine &NameStr, Instruction *InsertBefore)
1486 : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {}
1487
1488 /// Construct a CallInst given a range of arguments.
1489 /// Construct a CallInst from a range of arguments
1490 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1491 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1492 BasicBlock *InsertAtEnd);
1493
1494 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1495 Instruction *InsertBefore);
1496
1497 CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1498 BasicBlock *InsertAtEnd);
1499
1500 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1501 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1502 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1503
1504 /// Compute the number of operands to allocate.
1505 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1506 // We need one operand for the called function, plus the input operand
1507 // counts provided.
1508 return 1 + NumArgs + NumBundleInputs;
1509 }
1510
1511protected:
1512 // Note: Instruction needs to be a friend here to call cloneImpl.
1513 friend class Instruction;
1514
1515 CallInst *cloneImpl() const;
1516
1517public:
1518 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1519 Instruction *InsertBefore = nullptr) {
1520 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1521 }
1522
1523 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1524 const Twine &NameStr,
1525 Instruction *InsertBefore = nullptr) {
1526 return new (ComputeNumOperands(Args.size()))
1527 CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1528 }
1529
1530 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1531 ArrayRef<OperandBundleDef> Bundles = None,
1532 const Twine &NameStr = "",
1533 Instruction *InsertBefore = nullptr) {
1534 const int NumOperands =
1535 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1536 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1537
1538 return new (NumOperands, DescriptorBytes)
1539 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1540 }
1541
1542 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1543 BasicBlock *InsertAtEnd) {
1544 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1545 }
1546
1547 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1548 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1549 return new (ComputeNumOperands(Args.size()))
1550 CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd);
1551 }
1552
1553 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1554 ArrayRef<OperandBundleDef> Bundles,
1555 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1556 const int NumOperands =
1557 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1558 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1559
1560 return new (NumOperands, DescriptorBytes)
1561 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1562 }
1563
1564 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1565 Instruction *InsertBefore = nullptr) {
1566 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1567 InsertBefore);
1568 }
1569
1570 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1571 ArrayRef<OperandBundleDef> Bundles = None,
1572 const Twine &NameStr = "",
1573 Instruction *InsertBefore = nullptr) {
1574 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1575 NameStr, InsertBefore);
1576 }
1577
1578 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1579 const Twine &NameStr,
1580 Instruction *InsertBefore = nullptr) {
1581 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1582 InsertBefore);
1583 }
1584
1585 static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1586 BasicBlock *InsertAtEnd) {
1587 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1588 InsertAtEnd);
1589 }
1590
1591 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1592 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1593 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1594 InsertAtEnd);
1595 }
1596
1597 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1598 ArrayRef<OperandBundleDef> Bundles,
1599 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1600 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1601 NameStr, InsertAtEnd);
1602 }
1603
1604 /// Create a clone of \p CI with a different set of operand bundles and
1605 /// insert it before \p InsertPt.
1606 ///
1607 /// The returned call instruction is identical \p CI in every way except that
1608 /// the operand bundles for the new instruction are set to the operand bundles
1609 /// in \p Bundles.
1610 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1611 Instruction *InsertPt = nullptr);
1612
1613 /// Generate the IR for a call to malloc:
1614 /// 1. Compute the malloc call's argument as the specified type's size,
1615 /// possibly multiplied by the array size if the array size is not
1616 /// constant 1.
1617 /// 2. Call malloc with that argument.
1618 /// 3. Bitcast the result of the malloc call to the specified type.
1619 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1620 Type *AllocTy, Value *AllocSize,
1621 Value *ArraySize = nullptr,
1622 Function *MallocF = nullptr,
1623 const Twine &Name = "");
1624 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1625 Type *AllocTy, Value *AllocSize,
1626 Value *ArraySize = nullptr,
1627 Function *MallocF = nullptr,
1628 const Twine &Name = "");
1629 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1630 Type *AllocTy, Value *AllocSize,
1631 Value *ArraySize = nullptr,
1632 ArrayRef<OperandBundleDef> Bundles = None,
1633 Function *MallocF = nullptr,
1634 const Twine &Name = "");
1635 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1636 Type *AllocTy, Value *AllocSize,
1637 Value *ArraySize = nullptr,
1638 ArrayRef<OperandBundleDef> Bundles = None,
1639 Function *MallocF = nullptr,
1640 const Twine &Name = "");
1641 /// Generate the IR for a call to the builtin free function.
1642 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
1643 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
1644 static Instruction *CreateFree(Value *Source,
1645 ArrayRef<OperandBundleDef> Bundles,
1646 Instruction *InsertBefore);
1647 static Instruction *CreateFree(Value *Source,
1648 ArrayRef<OperandBundleDef> Bundles,
1649 BasicBlock *InsertAtEnd);
1650
1651 // Note that 'musttail' implies 'tail'.
1652 enum TailCallKind : unsigned {
1653 TCK_None = 0,
1654 TCK_Tail = 1,
1655 TCK_MustTail = 2,
1656 TCK_NoTail = 3,
1657 TCK_LAST = TCK_NoTail
1658 };
1659
1660 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>;
1661 static_assert(
1662 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1663 "Bitfields must be contiguous");
1664
1665 TailCallKind getTailCallKind() const {
1666 return getSubclassData<TailCallKindField>();
1667 }
1668
1669 bool isTailCall() const {
1670 TailCallKind Kind = getTailCallKind();
1671 return Kind == TCK_Tail || Kind == TCK_MustTail;
1672 }
1673
1674 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1675
1676 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1677
1678 void setTailCallKind(TailCallKind TCK) {
1679 setSubclassData<TailCallKindField>(TCK);
1680 }
1681
1682 void setTailCall(bool IsTc = true) {
1683 setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1684 }
1685
1686 /// Return true if the call can return twice
1687 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1688 void setCanReturnTwice() {
1689 addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice);
1690 }
1691
1692 // Methods for support type inquiry through isa, cast, and dyn_cast:
1693 static bool classof(const Instruction *I) {
1694 return I->getOpcode() == Instruction::Call;
1695 }
1696 static bool classof(const Value *V) {
1697 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1698 }
1699
1700 /// Updates profile metadata by scaling it by \p S / \p T.
1701 void updateProfWeight(uint64_t S, uint64_t T);
1702
1703private:
1704 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1705 // method so that subclasses cannot accidentally use it.
1706 template <typename Bitfield>
1707 void setSubclassData(typename Bitfield::Type Value) {
1708 Instruction::setSubclassData<Bitfield>(Value);
1709 }
1710};
1711
1712CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1713 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1714 BasicBlock *InsertAtEnd)
1715 : CallBase(Ty->getReturnType(), Instruction::Call,
1716 OperandTraits<CallBase>::op_end(this) -
1717 (Args.size() + CountBundleInputs(Bundles) + 1),
1718 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1719 InsertAtEnd) {
1720 init(Ty, Func, Args, Bundles, NameStr);
1721}
1722
1723CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1724 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1725 Instruction *InsertBefore)
1726 : CallBase(Ty->getReturnType(), Instruction::Call,
1727 OperandTraits<CallBase>::op_end(this) -
1728 (Args.size() + CountBundleInputs(Bundles) + 1),
1729 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1730 InsertBefore) {
1731 init(Ty, Func, Args, Bundles, NameStr);
1732}
1733
1734//===----------------------------------------------------------------------===//
1735// SelectInst Class
1736//===----------------------------------------------------------------------===//
1737
1738/// This class represents the LLVM 'select' instruction.
1739///
1740class SelectInst : public Instruction {
1741 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1742 Instruction *InsertBefore)
1743 : Instruction(S1->getType(), Instruction::Select,
1744 &Op<0>(), 3, InsertBefore) {
1745 init(C, S1, S2);
1746 setName(NameStr);
1747 }
1748
1749 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1750 BasicBlock *InsertAtEnd)
1751 : Instruction(S1->getType(), Instruction::Select,
1752 &Op<0>(), 3, InsertAtEnd) {
1753 init(C, S1, S2);
1754 setName(NameStr);
1755 }
1756
1757 void init(Value *C, Value *S1, Value *S2) {
1758 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")(static_cast <bool> (!areInvalidOperands(C, S1, S2) &&
"Invalid operands for select") ? void (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 1758, __extension__ __PRETTY_FUNCTION__))
;
1759 Op<0>() = C;
1760 Op<1>() = S1;
1761 Op<2>() = S2;
1762 }
1763
1764protected:
1765 // Note: Instruction needs to be a friend here to call cloneImpl.
1766 friend class Instruction;
1767
1768 SelectInst *cloneImpl() const;
1769
1770public:
1771 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1772 const Twine &NameStr = "",
1773 Instruction *InsertBefore = nullptr,
1774 Instruction *MDFrom = nullptr) {
1775 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1776 if (MDFrom)
1777 Sel->copyMetadata(*MDFrom);
1778 return Sel;
1779 }
1780
1781 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1782 const Twine &NameStr,
1783 BasicBlock *InsertAtEnd) {
1784 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1785 }
1786
1787 const Value *getCondition() const { return Op<0>(); }
1788 const Value *getTrueValue() const { return Op<1>(); }
1789 const Value *getFalseValue() const { return Op<2>(); }
1790 Value *getCondition() { return Op<0>(); }
1791 Value *getTrueValue() { return Op<1>(); }
1792 Value *getFalseValue() { return Op<2>(); }
1793
1794 void setCondition(Value *V) { Op<0>() = V; }
1795 void setTrueValue(Value *V) { Op<1>() = V; }
1796 void setFalseValue(Value *V) { Op<2>() = V; }
1797
1798 /// Swap the true and false values of the select instruction.
1799 /// This doesn't swap prof metadata.
1800 void swapValues() { Op<1>().swap(Op<2>()); }
1801
1802 /// Return a string if the specified operands are invalid
1803 /// for a select operation, otherwise return null.
1804 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1805
1806 /// Transparently provide more efficient getOperand methods.
1807 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1808
1809 OtherOps getOpcode() const {
1810 return static_cast<OtherOps>(Instruction::getOpcode());
1811 }
1812
1813 // Methods for support type inquiry through isa, cast, and dyn_cast:
1814 static bool classof(const Instruction *I) {
1815 return I->getOpcode() == Instruction::Select;
1816 }
1817 static bool classof(const Value *V) {
1818 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1819 }
1820};
1821
1822template <>
1823struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1824};
1825
1826DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits
<SelectInst>::op_begin(this); } SelectInst::const_op_iterator
SelectInst::op_begin() const { return OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this)); } SelectInst
::op_iterator SelectInst::op_end() { return OperandTraits<
SelectInst>::op_end(this); } SelectInst::const_op_iterator
SelectInst::op_end() const { return OperandTraits<SelectInst
>::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SelectInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 1826, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<SelectInst>::op_begin(const_cast
<SelectInst*>(this))[i_nocapture].get()); } void SelectInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<SelectInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 1826, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
SelectInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned SelectInst::getNumOperands() const { return OperandTraits
<SelectInst>::operands(this); } template <int Idx_nocapture
> Use &SelectInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SelectInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
1827
1828//===----------------------------------------------------------------------===//
1829// VAArgInst Class
1830//===----------------------------------------------------------------------===//
1831
1832/// This class represents the va_arg llvm instruction, which returns
1833/// an argument of the specified type given a va_list and increments that list
1834///
1835class VAArgInst : public UnaryInstruction {
1836protected:
1837 // Note: Instruction needs to be a friend here to call cloneImpl.
1838 friend class Instruction;
1839
1840 VAArgInst *cloneImpl() const;
1841
1842public:
1843 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1844 Instruction *InsertBefore = nullptr)
1845 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1846 setName(NameStr);
1847 }
1848
1849 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1850 BasicBlock *InsertAtEnd)
1851 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1852 setName(NameStr);
1853 }
1854
1855 Value *getPointerOperand() { return getOperand(0); }
1856 const Value *getPointerOperand() const { return getOperand(0); }
1857 static unsigned getPointerOperandIndex() { return 0U; }
1858
1859 // Methods for support type inquiry through isa, cast, and dyn_cast:
1860 static bool classof(const Instruction *I) {
1861 return I->getOpcode() == VAArg;
1862 }
1863 static bool classof(const Value *V) {
1864 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1865 }
1866};
1867
1868//===----------------------------------------------------------------------===//
1869// ExtractElementInst Class
1870//===----------------------------------------------------------------------===//
1871
1872/// This instruction extracts a single (scalar)
1873/// element from a VectorType value
1874///
1875class ExtractElementInst : public Instruction {
1876 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1877 Instruction *InsertBefore = nullptr);
1878 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1879 BasicBlock *InsertAtEnd);
1880
1881protected:
1882 // Note: Instruction needs to be a friend here to call cloneImpl.
1883 friend class Instruction;
1884
1885 ExtractElementInst *cloneImpl() const;
1886
1887public:
1888 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1889 const Twine &NameStr = "",
1890 Instruction *InsertBefore = nullptr) {
1891 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1892 }
1893
1894 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1895 const Twine &NameStr,
1896 BasicBlock *InsertAtEnd) {
1897 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1898 }
1899
1900 /// Return true if an extractelement instruction can be
1901 /// formed with the specified operands.
1902 static bool isValidOperands(const Value *Vec, const Value *Idx);
1903
1904 Value *getVectorOperand() { return Op<0>(); }
1905 Value *getIndexOperand() { return Op<1>(); }
1906 const Value *getVectorOperand() const { return Op<0>(); }
1907 const Value *getIndexOperand() const { return Op<1>(); }
1908
1909 VectorType *getVectorOperandType() const {
1910 return cast<VectorType>(getVectorOperand()->getType());
1911 }
1912
1913 /// Transparently provide more efficient getOperand methods.
1914 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1915
1916 // Methods for support type inquiry through isa, cast, and dyn_cast:
1917 static bool classof(const Instruction *I) {
1918 return I->getOpcode() == Instruction::ExtractElement;
1919 }
1920 static bool classof(const Value *V) {
1921 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1922 }
1923};
1924
1925template <>
1926struct OperandTraits<ExtractElementInst> :
1927 public FixedNumOperandTraits<ExtractElementInst, 2> {
1928};
1929
1930DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin(
) { return OperandTraits<ExtractElementInst>::op_begin(
this); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_begin() const { return OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this)); }
ExtractElementInst::op_iterator ExtractElementInst::op_end()
{ return OperandTraits<ExtractElementInst>::op_end(this
); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_end() const { return OperandTraits<ExtractElementInst
>::op_end(const_cast<ExtractElementInst*>(this)); } Value
*ExtractElementInst::getOperand(unsigned i_nocapture) const {
(static_cast <bool> (i_nocapture < OperandTraits<
ExtractElementInst>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 1930, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ExtractElementInst>::op_begin
(const_cast<ExtractElementInst*>(this))[i_nocapture].get
()); } void ExtractElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ExtractElementInst>::operands(this)
&& "setOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 1930, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ExtractElementInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned ExtractElementInst::getNumOperands() const { return
OperandTraits<ExtractElementInst>::operands(this); } template
<int Idx_nocapture> Use &ExtractElementInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &ExtractElementInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1931
1932//===----------------------------------------------------------------------===//
1933// InsertElementInst Class
1934//===----------------------------------------------------------------------===//
1935
1936/// This instruction inserts a single (scalar)
1937/// element into a VectorType value
1938///
1939class InsertElementInst : public Instruction {
1940 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1941 const Twine &NameStr = "",
1942 Instruction *InsertBefore = nullptr);
1943 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1944 BasicBlock *InsertAtEnd);
1945
1946protected:
1947 // Note: Instruction needs to be a friend here to call cloneImpl.
1948 friend class Instruction;
1949
1950 InsertElementInst *cloneImpl() const;
1951
1952public:
1953 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1954 const Twine &NameStr = "",
1955 Instruction *InsertBefore = nullptr) {
1956 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1957 }
1958
1959 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1960 const Twine &NameStr,
1961 BasicBlock *InsertAtEnd) {
1962 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1963 }
1964
1965 /// Return true if an insertelement instruction can be
1966 /// formed with the specified operands.
1967 static bool isValidOperands(const Value *Vec, const Value *NewElt,
1968 const Value *Idx);
1969
1970 /// Overload to return most specific vector type.
1971 ///
1972 VectorType *getType() const {
1973 return cast<VectorType>(Instruction::getType());
1974 }
1975
1976 /// Transparently provide more efficient getOperand methods.
1977 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1978
1979 // Methods for support type inquiry through isa, cast, and dyn_cast:
1980 static bool classof(const Instruction *I) {
1981 return I->getOpcode() == Instruction::InsertElement;
1982 }
1983 static bool classof(const Value *V) {
1984 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1985 }
1986};
1987
1988template <>
1989struct OperandTraits<InsertElementInst> :
1990 public FixedNumOperandTraits<InsertElementInst, 3> {
1991};
1992
1993DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() {
return OperandTraits<InsertElementInst>::op_begin(this
); } InsertElementInst::const_op_iterator InsertElementInst::
op_begin() const { return OperandTraits<InsertElementInst>
::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst
::op_iterator InsertElementInst::op_end() { return OperandTraits
<InsertElementInst>::op_end(this); } InsertElementInst::
const_op_iterator InsertElementInst::op_end() const { return OperandTraits
<InsertElementInst>::op_end(const_cast<InsertElementInst
*>(this)); } Value *InsertElementInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<InsertElementInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 1993, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<InsertElementInst>::op_begin
(const_cast<InsertElementInst*>(this))[i_nocapture].get
()); } void InsertElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<InsertElementInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 1993, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
InsertElementInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned InsertElementInst::getNumOperands() const { return
OperandTraits<InsertElementInst>::operands(this); } template
<int Idx_nocapture> Use &InsertElementInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &InsertElementInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1994
1995//===----------------------------------------------------------------------===//
1996// ShuffleVectorInst Class
1997//===----------------------------------------------------------------------===//
1998
1999constexpr int UndefMaskElem = -1;
2000
2001/// This instruction constructs a fixed permutation of two
2002/// input vectors.
2003///
2004/// For each element of the result vector, the shuffle mask selects an element
2005/// from one of the input vectors to copy to the result. Non-negative elements
2006/// in the mask represent an index into the concatenated pair of input vectors.
2007/// UndefMaskElem (-1) specifies that the result element is undefined.
2008///
2009/// For scalable vectors, all the elements of the mask must be 0 or -1. This
2010/// requirement may be relaxed in the future.
2011class ShuffleVectorInst : public Instruction {
2012 SmallVector<int, 4> ShuffleMask;
2013 Constant *ShuffleMaskForBitcode;
2014
2015protected:
2016 // Note: Instruction needs to be a friend here to call cloneImpl.
2017 friend class Instruction;
2018
2019 ShuffleVectorInst *cloneImpl() const;
2020
2021public:
2022 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2023 const Twine &NameStr = "",
2024 Instruction *InsertBefor = nullptr);
2025 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2026 const Twine &NameStr, BasicBlock *InsertAtEnd);
2027 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2028 const Twine &NameStr = "",
2029 Instruction *InsertBefor = nullptr);
2030 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2031 const Twine &NameStr, BasicBlock *InsertAtEnd);
2032
2033 void *operator new(size_t S) { return User::operator new(S, 2); }
2034 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
2035
2036 /// Swap the operands and adjust the mask to preserve the semantics
2037 /// of the instruction.
2038 void commute();
2039
2040 /// Return true if a shufflevector instruction can be
2041 /// formed with the specified operands.
2042 static bool isValidOperands(const Value *V1, const Value *V2,
2043 const Value *Mask);
2044 static bool isValidOperands(const Value *V1, const Value *V2,
2045 ArrayRef<int> Mask);
2046
2047 /// Overload to return most specific vector type.
2048 ///
2049 VectorType *getType() const {
2050 return cast<VectorType>(Instruction::getType());
2051 }
2052
2053 /// Transparently provide more efficient getOperand methods.
2054 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2055
2056 /// Return the shuffle mask value of this instruction for the given element
2057 /// index. Return UndefMaskElem if the element is undef.
2058 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2059
2060 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2061 /// elements of the mask are returned as UndefMaskElem.
2062 static void getShuffleMask(const Constant *Mask,
2063 SmallVectorImpl<int> &Result);
2064
2065 /// Return the mask for this instruction as a vector of integers. Undefined
2066 /// elements of the mask are returned as UndefMaskElem.
2067 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2068 Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2069 }
2070
2071 /// Return the mask for this instruction, for use in bitcode.
2072 ///
2073 /// TODO: This is temporary until we decide a new bitcode encoding for
2074 /// shufflevector.
2075 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2076
2077 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2078 Type *ResultTy);
2079
2080 void setShuffleMask(ArrayRef<int> Mask);
2081
2082 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2083
2084 /// Return true if this shuffle returns a vector with a different number of
2085 /// elements than its source vectors.
2086 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2087 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2088 bool changesLength() const {
2089 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2090 ->getElementCount()
2091 .getKnownMinValue();
2092 unsigned NumMaskElts = ShuffleMask.size();
2093 return NumSourceElts != NumMaskElts;
2094 }
2095
2096 /// Return true if this shuffle returns a vector with a greater number of
2097 /// elements than its source vectors.
2098 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2099 bool increasesLength() const {
2100 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2101 ->getElementCount()
2102 .getKnownMinValue();
2103 unsigned NumMaskElts = ShuffleMask.size();
2104 return NumSourceElts < NumMaskElts;
2105 }
2106
2107 /// Return true if this shuffle mask chooses elements from exactly one source
2108 /// vector.
2109 /// Example: <7,5,undef,7>
2110 /// This assumes that vector operands are the same length as the mask.
2111 static bool isSingleSourceMask(ArrayRef<int> Mask);
2112 static bool isSingleSourceMask(const Constant *Mask) {
2113 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2113, __extension__ __PRETTY_FUNCTION__))
;
2114 SmallVector<int, 16> MaskAsInts;
2115 getShuffleMask(Mask, MaskAsInts);
2116 return isSingleSourceMask(MaskAsInts);
2117 }
2118
2119 /// Return true if this shuffle chooses elements from exactly one source
2120 /// vector without changing the length of that vector.
2121 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2122 /// TODO: Optionally allow length-changing shuffles.
2123 bool isSingleSource() const {
2124 return !changesLength() && isSingleSourceMask(ShuffleMask);
2125 }
2126
2127 /// Return true if this shuffle mask chooses elements from exactly one source
2128 /// vector without lane crossings. A shuffle using this mask is not
2129 /// necessarily a no-op because it may change the number of elements from its
2130 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2131 /// Example: <undef,undef,2,3>
2132 static bool isIdentityMask(ArrayRef<int> Mask);
2133 static bool isIdentityMask(const Constant *Mask) {
2134 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2134, __extension__ __PRETTY_FUNCTION__))
;
2135 SmallVector<int, 16> MaskAsInts;
2136 getShuffleMask(Mask, MaskAsInts);
2137 return isIdentityMask(MaskAsInts);
2138 }
2139
2140 /// Return true if this shuffle chooses elements from exactly one source
2141 /// vector without lane crossings and does not change the number of elements
2142 /// from its input vectors.
2143 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2144 bool isIdentity() const {
2145 return !changesLength() && isIdentityMask(ShuffleMask);
2146 }
2147
2148 /// Return true if this shuffle lengthens exactly one source vector with
2149 /// undefs in the high elements.
2150 bool isIdentityWithPadding() const;
2151
2152 /// Return true if this shuffle extracts the first N elements of exactly one
2153 /// source vector.
2154 bool isIdentityWithExtract() const;
2155
2156 /// Return true if this shuffle concatenates its 2 source vectors. This
2157 /// returns false if either input is undefined. In that case, the shuffle is
2158 /// is better classified as an identity with padding operation.
2159 bool isConcat() const;
2160
2161 /// Return true if this shuffle mask chooses elements from its source vectors
2162 /// without lane crossings. A shuffle using this mask would be
2163 /// equivalent to a vector select with a constant condition operand.
2164 /// Example: <4,1,6,undef>
2165 /// This returns false if the mask does not choose from both input vectors.
2166 /// In that case, the shuffle is better classified as an identity shuffle.
2167 /// This assumes that vector operands are the same length as the mask
2168 /// (a length-changing shuffle can never be equivalent to a vector select).
2169 static bool isSelectMask(ArrayRef<int> Mask);
2170 static bool isSelectMask(const Constant *Mask) {
2171 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2171, __extension__ __PRETTY_FUNCTION__))
;
2172 SmallVector<int, 16> MaskAsInts;
2173 getShuffleMask(Mask, MaskAsInts);
2174 return isSelectMask(MaskAsInts);
2175 }
2176
2177 /// Return true if this shuffle chooses elements from its source vectors
2178 /// without lane crossings and all operands have the same number of elements.
2179 /// In other words, this shuffle is equivalent to a vector select with a
2180 /// constant condition operand.
2181 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2182 /// This returns false if the mask does not choose from both input vectors.
2183 /// In that case, the shuffle is better classified as an identity shuffle.
2184 /// TODO: Optionally allow length-changing shuffles.
2185 bool isSelect() const {
2186 return !changesLength() && isSelectMask(ShuffleMask);
2187 }
2188
2189 /// Return true if this shuffle mask swaps the order of elements from exactly
2190 /// one source vector.
2191 /// Example: <7,6,undef,4>
2192 /// This assumes that vector operands are the same length as the mask.
2193 static bool isReverseMask(ArrayRef<int> Mask);
2194 static bool isReverseMask(const Constant *Mask) {
2195 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2195, __extension__ __PRETTY_FUNCTION__))
;
2196 SmallVector<int, 16> MaskAsInts;
2197 getShuffleMask(Mask, MaskAsInts);
2198 return isReverseMask(MaskAsInts);
2199 }
2200
2201 /// Return true if this shuffle swaps the order of elements from exactly
2202 /// one source vector.
2203 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2204 /// TODO: Optionally allow length-changing shuffles.
2205 bool isReverse() const {
2206 return !changesLength() && isReverseMask(ShuffleMask);
2207 }
2208
2209 /// Return true if this shuffle mask chooses all elements with the same value
2210 /// as the first element of exactly one source vector.
2211 /// Example: <4,undef,undef,4>
2212 /// This assumes that vector operands are the same length as the mask.
2213 static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2214 static bool isZeroEltSplatMask(const Constant *Mask) {
2215 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2215, __extension__ __PRETTY_FUNCTION__))
;
2216 SmallVector<int, 16> MaskAsInts;
2217 getShuffleMask(Mask, MaskAsInts);
2218 return isZeroEltSplatMask(MaskAsInts);
2219 }
2220
2221 /// Return true if all elements of this shuffle are the same value as the
2222 /// first element of exactly one source vector without changing the length
2223 /// of that vector.
2224 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2225 /// TODO: Optionally allow length-changing shuffles.
2226 /// TODO: Optionally allow splats from other elements.
2227 bool isZeroEltSplat() const {
2228 return !changesLength() && isZeroEltSplatMask(ShuffleMask);
2229 }
2230
2231 /// Return true if this shuffle mask is a transpose mask.
2232 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2233 /// even- or odd-numbered vector elements from two n-dimensional source
2234 /// vectors and write each result into consecutive elements of an
2235 /// n-dimensional destination vector. Two shuffles are necessary to complete
2236 /// the transpose, one for the even elements and another for the odd elements.
2237 /// This description closely follows how the TRN1 and TRN2 AArch64
2238 /// instructions operate.
2239 ///
2240 /// For example, a simple 2x2 matrix can be transposed with:
2241 ///
2242 /// ; Original matrix
2243 /// m0 = < a, b >
2244 /// m1 = < c, d >
2245 ///
2246 /// ; Transposed matrix
2247 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2248 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2249 ///
2250 /// For matrices having greater than n columns, the resulting nx2 transposed
2251 /// matrix is stored in two result vectors such that one vector contains
2252 /// interleaved elements from all the even-numbered rows and the other vector
2253 /// contains interleaved elements from all the odd-numbered rows. For example,
2254 /// a 2x4 matrix can be transposed with:
2255 ///
2256 /// ; Original matrix
2257 /// m0 = < a, b, c, d >
2258 /// m1 = < e, f, g, h >
2259 ///
2260 /// ; Transposed matrix
2261 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2262 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2263 static bool isTransposeMask(ArrayRef<int> Mask);
2264 static bool isTransposeMask(const Constant *Mask) {
2265 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2265, __extension__ __PRETTY_FUNCTION__))
;
2266 SmallVector<int, 16> MaskAsInts;
2267 getShuffleMask(Mask, MaskAsInts);
2268 return isTransposeMask(MaskAsInts);
2269 }
2270
2271 /// Return true if this shuffle transposes the elements of its inputs without
2272 /// changing the length of the vectors. This operation may also be known as a
2273 /// merge or interleave. See the description for isTransposeMask() for the
2274 /// exact specification.
2275 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2276 bool isTranspose() const {
2277 return !changesLength() && isTransposeMask(ShuffleMask);
2278 }
2279
2280 /// Return true if this shuffle mask is an extract subvector mask.
2281 /// A valid extract subvector mask returns a smaller vector from a single
2282 /// source operand. The base extraction index is returned as well.
2283 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2284 int &Index);
2285 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2286 int &Index) {
2287 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2287, __extension__ __PRETTY_FUNCTION__))
;
2288 // Not possible to express a shuffle mask for a scalable vector for this
2289 // case.
2290 if (isa<ScalableVectorType>(Mask->getType()))
2291 return false;
2292 SmallVector<int, 16> MaskAsInts;
2293 getShuffleMask(Mask, MaskAsInts);
2294 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2295 }
2296
2297 /// Return true if this shuffle mask is an extract subvector mask.
2298 bool isExtractSubvectorMask(int &Index) const {
2299 // Not possible to express a shuffle mask for a scalable vector for this
2300 // case.
2301 if (isa<ScalableVectorType>(getType()))
2302 return false;
2303
2304 int NumSrcElts =
2305 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2306 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2307 }
2308
2309 /// Change values in a shuffle permute mask assuming the two vector operands
2310 /// of length InVecNumElts have swapped position.
2311 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2312 unsigned InVecNumElts) {
2313 for (int &Idx : Mask) {
2314 if (Idx == -1)
2315 continue;
2316 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2317 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2318, __extension__ __PRETTY_FUNCTION__))
2318 "shufflevector mask index out of range")(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2318, __extension__ __PRETTY_FUNCTION__))
;
2319 }
2320 }
2321
2322 // Methods for support type inquiry through isa, cast, and dyn_cast:
2323 static bool classof(const Instruction *I) {
2324 return I->getOpcode() == Instruction::ShuffleVector;
2325 }
2326 static bool classof(const Value *V) {
2327 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2328 }
2329};
2330
2331template <>
2332struct OperandTraits<ShuffleVectorInst>
2333 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2334
2335DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() {
return OperandTraits<ShuffleVectorInst>::op_begin(this
); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst::
op_begin() const { return OperandTraits<ShuffleVectorInst>
::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst
::op_iterator ShuffleVectorInst::op_end() { return OperandTraits
<ShuffleVectorInst>::op_end(this); } ShuffleVectorInst::
const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits
<ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst
*>(this)); } Value *ShuffleVectorInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<ShuffleVectorInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2335, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ShuffleVectorInst>::op_begin
(const_cast<ShuffleVectorInst*>(this))[i_nocapture].get
()); } void ShuffleVectorInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ShuffleVectorInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2335, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ShuffleVectorInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned ShuffleVectorInst::getNumOperands() const { return
OperandTraits<ShuffleVectorInst>::operands(this); } template
<int Idx_nocapture> Use &ShuffleVectorInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &ShuffleVectorInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2336
2337//===----------------------------------------------------------------------===//
2338// ExtractValueInst Class
2339//===----------------------------------------------------------------------===//
2340
2341/// This instruction extracts a struct member or array
2342/// element value from an aggregate value.
2343///
2344class ExtractValueInst : public UnaryInstruction {
2345 SmallVector<unsigned, 4> Indices;
2346
2347 ExtractValueInst(const ExtractValueInst &EVI);
2348
2349 /// Constructors - Create a extractvalue instruction with a base aggregate
2350 /// value and a list of indices. The first ctor can optionally insert before
2351 /// an existing instruction, the second appends the new instruction to the
2352 /// specified BasicBlock.
2353 inline ExtractValueInst(Value *Agg,
2354 ArrayRef<unsigned> Idxs,
2355 const Twine &NameStr,
2356 Instruction *InsertBefore);
2357 inline ExtractValueInst(Value *Agg,
2358 ArrayRef<unsigned> Idxs,
2359 const Twine &NameStr, BasicBlock *InsertAtEnd);
2360
2361 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2362
2363protected:
2364 // Note: Instruction needs to be a friend here to call cloneImpl.
2365 friend class Instruction;
2366
2367 ExtractValueInst *cloneImpl() const;
2368
2369public:
2370 static ExtractValueInst *Create(Value *Agg,
2371 ArrayRef<unsigned> Idxs,
2372 const Twine &NameStr = "",
2373 Instruction *InsertBefore = nullptr) {
2374 return new
2375 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2376 }
2377
2378 static ExtractValueInst *Create(Value *Agg,
2379 ArrayRef<unsigned> Idxs,
2380 const Twine &NameStr,
2381 BasicBlock *InsertAtEnd) {
2382 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2383 }
2384
2385 /// Returns the type of the element that would be extracted
2386 /// with an extractvalue instruction with the specified parameters.
2387 ///
2388 /// Null is returned if the indices are invalid for the specified type.
2389 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2390
2391 using idx_iterator = const unsigned*;
2392
2393 inline idx_iterator idx_begin() const { return Indices.begin(); }
2394 inline idx_iterator idx_end() const { return Indices.end(); }
2395 inline iterator_range<idx_iterator> indices() const {
2396 return make_range(idx_begin(), idx_end());
2397 }
2398
2399 Value *getAggregateOperand() {
2400 return getOperand(0);
2401 }
2402 const Value *getAggregateOperand() const {
2403 return getOperand(0);
2404 }
2405 static unsigned getAggregateOperandIndex() {
2406 return 0U; // get index for modifying correct operand
2407 }
2408
2409 ArrayRef<unsigned> getIndices() const {
2410 return Indices;
2411 }
2412
2413 unsigned getNumIndices() const {
2414 return (unsigned)Indices.size();
2415 }
2416
2417 bool hasIndices() const {
2418 return true;
2419 }
2420
2421 // Methods for support type inquiry through isa, cast, and dyn_cast:
2422 static bool classof(const Instruction *I) {
2423 return I->getOpcode() == Instruction::ExtractValue;
2424 }
2425 static bool classof(const Value *V) {
2426 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2427 }
2428};
2429
2430ExtractValueInst::ExtractValueInst(Value *Agg,
2431 ArrayRef<unsigned> Idxs,
2432 const Twine &NameStr,
2433 Instruction *InsertBefore)
2434 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2435 ExtractValue, Agg, InsertBefore) {
2436 init(Idxs, NameStr);
2437}
2438
2439ExtractValueInst::ExtractValueInst(Value *Agg,
2440 ArrayRef<unsigned> Idxs,
2441 const Twine &NameStr,
2442 BasicBlock *InsertAtEnd)
2443 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2444 ExtractValue, Agg, InsertAtEnd) {
2445 init(Idxs, NameStr);
2446}
2447
2448//===----------------------------------------------------------------------===//
2449// InsertValueInst Class
2450//===----------------------------------------------------------------------===//
2451
2452/// This instruction inserts a struct field of array element
2453/// value into an aggregate value.
2454///
2455class InsertValueInst : public Instruction {
2456 SmallVector<unsigned, 4> Indices;
2457
2458 InsertValueInst(const InsertValueInst &IVI);
2459
2460 /// Constructors - Create a insertvalue instruction with a base aggregate
2461 /// value, a value to insert, and a list of indices. The first ctor can
2462 /// optionally insert before an existing instruction, the second appends
2463 /// the new instruction to the specified BasicBlock.
2464 inline InsertValueInst(Value *Agg, Value *Val,
2465 ArrayRef<unsigned> Idxs,
2466 const Twine &NameStr,
2467 Instruction *InsertBefore);
2468 inline InsertValueInst(Value *Agg, Value *Val,
2469 ArrayRef<unsigned> Idxs,
2470 const Twine &NameStr, BasicBlock *InsertAtEnd);
2471
2472 /// Constructors - These two constructors are convenience methods because one
2473 /// and two index insertvalue instructions are so common.
2474 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2475 const Twine &NameStr = "",
2476 Instruction *InsertBefore = nullptr);
2477 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2478 BasicBlock *InsertAtEnd);
2479
2480 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2481 const Twine &NameStr);
2482
2483protected:
2484 // Note: Instruction needs to be a friend here to call cloneImpl.
2485 friend class Instruction;
2486
2487 InsertValueInst *cloneImpl() const;
2488
2489public:
2490 // allocate space for exactly two operands
2491 void *operator new(size_t S) { return User::operator new(S, 2); }
2492 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2493
2494 static InsertValueInst *Create(Value *Agg, Value *Val,
2495 ArrayRef<unsigned> Idxs,
2496 const Twine &NameStr = "",
2497 Instruction *InsertBefore = nullptr) {
2498 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2499 }
2500
2501 static InsertValueInst *Create(Value *Agg, Value *Val,
2502 ArrayRef<unsigned> Idxs,
2503 const Twine &NameStr,
2504 BasicBlock *InsertAtEnd) {
2505 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2506 }
2507
2508 /// Transparently provide more efficient getOperand methods.
2509 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2510
2511 using idx_iterator = const unsigned*;
2512
2513 inline idx_iterator idx_begin() const { return Indices.begin(); }
2514 inline idx_iterator idx_end() const { return Indices.end(); }
2515 inline iterator_range<idx_iterator> indices() const {
2516 return make_range(idx_begin(), idx_end());
2517 }
2518
2519 Value *getAggregateOperand() {
2520 return getOperand(0);
2521 }
2522 const Value *getAggregateOperand() const {
2523 return getOperand(0);
2524 }
2525 static unsigned getAggregateOperandIndex() {
2526 return 0U; // get index for modifying correct operand
2527 }
2528
2529 Value *getInsertedValueOperand() {
2530 return getOperand(1);
2531 }
2532 const Value *getInsertedValueOperand() const {
2533 return getOperand(1);
2534 }
2535 static unsigned getInsertedValueOperandIndex() {
2536 return 1U; // get index for modifying correct operand
2537 }
2538
2539 ArrayRef<unsigned> getIndices() const {
2540 return Indices;
2541 }
2542
2543 unsigned getNumIndices() const {
2544 return (unsigned)Indices.size();
2545 }
2546
2547 bool hasIndices() const {
2548 return true;
2549 }
2550
2551 // Methods for support type inquiry through isa, cast, and dyn_cast:
2552 static bool classof(const Instruction *I) {
2553 return I->getOpcode() == Instruction::InsertValue;
2554 }
2555 static bool classof(const Value *V) {
2556 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2557 }
2558};
2559
2560template <>
2561struct OperandTraits<InsertValueInst> :
2562 public FixedNumOperandTraits<InsertValueInst, 2> {
2563};
2564
2565InsertValueInst::InsertValueInst(Value *Agg,
2566 Value *Val,
2567 ArrayRef<unsigned> Idxs,
2568 const Twine &NameStr,
2569 Instruction *InsertBefore)
2570 : Instruction(Agg->getType(), InsertValue,
2571 OperandTraits<InsertValueInst>::op_begin(this),
2572 2, InsertBefore) {
2573 init(Agg, Val, Idxs, NameStr);
2574}
2575
2576InsertValueInst::InsertValueInst(Value *Agg,
2577 Value *Val,
2578 ArrayRef<unsigned> Idxs,
2579 const Twine &NameStr,
2580 BasicBlock *InsertAtEnd)
2581 : Instruction(Agg->getType(), InsertValue,
2582 OperandTraits<InsertValueInst>::op_begin(this),
2583 2, InsertAtEnd) {
2584 init(Agg, Val, Idxs, NameStr);
2585}
2586
2587DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return
OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst
::const_op_iterator InsertValueInst::op_begin() const { return
OperandTraits<InsertValueInst>::op_begin(const_cast<
InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst
::op_end() { return OperandTraits<InsertValueInst>::op_end
(this); } InsertValueInst::const_op_iterator InsertValueInst::
op_end() const { return OperandTraits<InsertValueInst>::
op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<InsertValueInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2587, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<InsertValueInst>::op_begin
(const_cast<InsertValueInst*>(this))[i_nocapture].get()
); } void InsertValueInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<InsertValueInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2587, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
InsertValueInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned InsertValueInst::getNumOperands() const { return
OperandTraits<InsertValueInst>::operands(this); } template
<int Idx_nocapture> Use &InsertValueInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &InsertValueInst::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
2588
2589//===----------------------------------------------------------------------===//
2590// PHINode Class
2591//===----------------------------------------------------------------------===//
2592
2593// PHINode - The PHINode class is used to represent the magical mystical PHI
2594// node, that can not exist in nature, but can be synthesized in a computer
2595// scientist's overactive imagination.
2596//
2597class PHINode : public Instruction {
2598 /// The number of operands actually allocated. NumOperands is
2599 /// the number actually in use.
2600 unsigned ReservedSpace;
2601
2602 PHINode(const PHINode &PN);
2603
2604 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2605 const Twine &NameStr = "",
2606 Instruction *InsertBefore = nullptr)
2607 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2608 ReservedSpace(NumReservedValues) {
2609 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!"
) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2609, __extension__ __PRETTY_FUNCTION__))
;
2610 setName(NameStr);
2611 allocHungoffUses(ReservedSpace);
2612 }
2613
2614 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2615 BasicBlock *InsertAtEnd)
2616 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2617 ReservedSpace(NumReservedValues) {
2618 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!"
) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2618, __extension__ __PRETTY_FUNCTION__))
;
2619 setName(NameStr);
2620 allocHungoffUses(ReservedSpace);
2621 }
2622
2623protected:
2624 // Note: Instruction needs to be a friend here to call cloneImpl.
2625 friend class Instruction;
2626
2627 PHINode *cloneImpl() const;
2628
2629 // allocHungoffUses - this is more complicated than the generic
2630 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2631 // values and pointers to the incoming blocks, all in one allocation.
2632 void allocHungoffUses(unsigned N) {
2633 User::allocHungoffUses(N, /* IsPhi */ true);
2634 }
2635
2636public:
2637 /// Constructors - NumReservedValues is a hint for the number of incoming
2638 /// edges that this phi node will have (use 0 if you really have no idea).
2639 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2640 const Twine &NameStr = "",
2641 Instruction *InsertBefore = nullptr) {
2642 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2643 }
2644
2645 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2646 const Twine &NameStr, BasicBlock *InsertAtEnd) {
2647 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2648 }
2649
2650 /// Provide fast operand accessors
2651 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2652
2653 // Block iterator interface. This provides access to the list of incoming
2654 // basic blocks, which parallels the list of incoming values.
2655
2656 using block_iterator = BasicBlock **;
2657 using const_block_iterator = BasicBlock * const *;
2658
2659 block_iterator block_begin() {
2660 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
2661 }
2662
2663 const_block_iterator block_begin() const {
2664 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2665 }
2666
2667 block_iterator block_end() {
2668 return block_begin() + getNumOperands();
2669 }
2670
2671 const_block_iterator block_end() const {
2672 return block_begin() + getNumOperands();
2673 }
2674
2675 iterator_range<block_iterator> blocks() {
2676 return make_range(block_begin(), block_end());
2677 }
2678
2679 iterator_range<const_block_iterator> blocks() const {
2680 return make_range(block_begin(), block_end());
2681 }
2682
2683 op_range incoming_values() { return operands(); }
2684
2685 const_op_range incoming_values() const { return operands(); }
2686
2687 /// Return the number of incoming edges
2688 ///
2689 unsigned getNumIncomingValues() const { return getNumOperands(); }
2690
2691 /// Return incoming value number x
2692 ///
2693 Value *getIncomingValue(unsigned i) const {
2694 return getOperand(i);
2695 }
2696 void setIncomingValue(unsigned i, Value *V) {
2697 assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!"
) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2697, __extension__ __PRETTY_FUNCTION__))
;
2698 assert(getType() == V->getType() &&(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2699, __extension__ __PRETTY_FUNCTION__))
2699 "All operands to PHI node must be the same type as the PHI node!")(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2699, __extension__ __PRETTY_FUNCTION__))
;
2700 setOperand(i, V);
2701 }
2702
2703 static unsigned getOperandNumForIncomingValue(unsigned i) {
2704 return i;
2705 }
2706
2707 static unsigned getIncomingValueNumForOperand(unsigned i) {
2708 return i;
2709 }
2710
2711 /// Return incoming basic block number @p i.
2712 ///
2713 BasicBlock *getIncomingBlock(unsigned i) const {
2714 return block_begin()[i];
2715 }
2716
2717 /// Return incoming basic block corresponding
2718 /// to an operand of the PHI.
2719 ///
2720 BasicBlock *getIncomingBlock(const Use &U) const {
2721 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?"
) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2721, __extension__ __PRETTY_FUNCTION__))
;
2722 return getIncomingBlock(unsigned(&U - op_begin()));
2723 }
2724
2725 /// Return incoming basic block corresponding
2726 /// to value use iterator.
2727 ///
2728 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
2729 return getIncomingBlock(I.getUse());
2730 }
2731
2732 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2733 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2733, __extension__ __PRETTY_FUNCTION__))
;
2734 block_begin()[i] = BB;
2735 }
2736
2737 /// Replace every incoming basic block \p Old to basic block \p New.
2738 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) {
2739 assert(New && Old && "PHI node got a null basic block!")(static_cast <bool> (New && Old && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("New && Old && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2739, __extension__ __PRETTY_FUNCTION__))
;
2740 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2741 if (getIncomingBlock(Op) == Old)
2742 setIncomingBlock(Op, New);
2743 }
2744
2745 /// Add an incoming value to the end of the PHI list
2746 ///
2747 void addIncoming(Value *V, BasicBlock *BB) {
2748 if (getNumOperands() == ReservedSpace)
2749 growOperands(); // Get more space!
2750 // Initialize some new operands.
2751 setNumHungOffUseOperands(getNumOperands() + 1);
2752 setIncomingValue(getNumOperands() - 1, V);
2753 setIncomingBlock(getNumOperands() - 1, BB);
2754 }
2755
2756 /// Remove an incoming value. This is useful if a
2757 /// predecessor basic block is deleted. The value removed is returned.
2758 ///
2759 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2760 /// is true), the PHI node is destroyed and any uses of it are replaced with
2761 /// dummy values. The only time there should be zero incoming values to a PHI
2762 /// node is when the block is dead, so this strategy is sound.
2763 ///
2764 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2765
2766 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2767 int Idx = getBasicBlockIndex(BB);
2768 assert(Idx >= 0 && "Invalid basic block argument to remove!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument to remove!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2768, __extension__ __PRETTY_FUNCTION__))
;
2769 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2770 }
2771
2772 /// Return the first index of the specified basic
2773 /// block in the value list for this PHI. Returns -1 if no instance.
2774 ///
2775 int getBasicBlockIndex(const BasicBlock *BB) const {
2776 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2777 if (block_begin()[i] == BB)
2778 return i;
2779 return -1;
2780 }
2781
2782 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
2783 int Idx = getBasicBlockIndex(BB);
2784 assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2784, __extension__ __PRETTY_FUNCTION__))
;
2785 return getIncomingValue(Idx);
2786 }
2787
2788 /// Set every incoming value(s) for block \p BB to \p V.
2789 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) {
2790 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2790, __extension__ __PRETTY_FUNCTION__))
;
2791 bool Found = false;
2792 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2793 if (getIncomingBlock(Op) == BB) {
2794 Found = true;
2795 setIncomingValue(Op, V);
2796 }
2797 (void)Found;
2798 assert(Found && "Invalid basic block argument to set!")(static_cast <bool> (Found && "Invalid basic block argument to set!"
) ? void (0) : __assert_fail ("Found && \"Invalid basic block argument to set!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2798, __extension__ __PRETTY_FUNCTION__))
;
2799 }
2800
2801 /// If the specified PHI node always merges together the
2802 /// same value, return the value, otherwise return null.
2803 Value *hasConstantValue() const;
2804
2805 /// Whether the specified PHI node always merges
2806 /// together the same value, assuming undefs are equal to a unique
2807 /// non-undef value.
2808 bool hasConstantOrUndefValue() const;
2809
2810 /// If the PHI node is complete which means all of its parent's predecessors
2811 /// have incoming value in this PHI, return true, otherwise return false.
2812 bool isComplete() const {
2813 return llvm::all_of(predecessors(getParent()),
2814 [this](const BasicBlock *Pred) {
2815 return getBasicBlockIndex(Pred) >= 0;
2816 });
2817 }
2818
2819 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2820 static bool classof(const Instruction *I) {
2821 return I->getOpcode() == Instruction::PHI;
2822 }
2823 static bool classof(const Value *V) {
2824 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2825 }
2826
2827private:
2828 void growOperands();
2829};
2830
2831template <>
2832struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
2833};
2834
2835DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits
<PHINode>::op_begin(this); } PHINode::const_op_iterator
PHINode::op_begin() const { return OperandTraits<PHINode>
::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator
PHINode::op_end() { return OperandTraits<PHINode>::op_end
(this); } PHINode::const_op_iterator PHINode::op_end() const {
return OperandTraits<PHINode>::op_end(const_cast<PHINode
*>(this)); } Value *PHINode::getOperand(unsigned i_nocapture
) const { (static_cast <bool> (i_nocapture < OperandTraits
<PHINode>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2835, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<PHINode>::op_begin(const_cast
<PHINode*>(this))[i_nocapture].get()); } void PHINode::
setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<PHINode>::
operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2835, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
PHINode>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
PHINode::getNumOperands() const { return OperandTraits<PHINode
>::operands(this); } template <int Idx_nocapture> Use
&PHINode::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
PHINode::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
2836
2837//===----------------------------------------------------------------------===//
2838// LandingPadInst Class
2839//===----------------------------------------------------------------------===//
2840
2841//===---------------------------------------------------------------------------
2842/// The landingpad instruction holds all of the information
2843/// necessary to generate correct exception handling. The landingpad instruction
2844/// cannot be moved from the top of a landing pad block, which itself is
2845/// accessible only from the 'unwind' edge of an invoke. This uses the
2846/// SubclassData field in Value to store whether or not the landingpad is a
2847/// cleanup.
2848///
2849class LandingPadInst : public Instruction {
2850 using CleanupField = BoolBitfieldElementT<0>;
2851
2852 /// The number of operands actually allocated. NumOperands is
2853 /// the number actually in use.
2854 unsigned ReservedSpace;
2855
2856 LandingPadInst(const LandingPadInst &LP);
2857
2858public:
2859 enum ClauseType { Catch, Filter };
2860
2861private:
2862 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2863 const Twine &NameStr, Instruction *InsertBefore);
2864 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2865 const Twine &NameStr, BasicBlock *InsertAtEnd);
2866
2867 // Allocate space for exactly zero operands.
2868 void *operator new(size_t S) { return User::operator new(S); }
2869
2870 void growOperands(unsigned Size);
2871 void init(unsigned NumReservedValues, const Twine &NameStr);
2872
2873protected:
2874 // Note: Instruction needs to be a friend here to call cloneImpl.
2875 friend class Instruction;
2876
2877 LandingPadInst *cloneImpl() const;
2878
2879public:
2880 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2881
2882 /// Constructors - NumReservedClauses is a hint for the number of incoming
2883 /// clauses that this landingpad will have (use 0 if you really have no idea).
2884 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2885 const Twine &NameStr = "",
2886 Instruction *InsertBefore = nullptr);
2887 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2888 const Twine &NameStr, BasicBlock *InsertAtEnd);
2889
2890 /// Provide fast operand accessors
2891 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2892
2893 /// Return 'true' if this landingpad instruction is a
2894 /// cleanup. I.e., it should be run when unwinding even if its landing pad
2895 /// doesn't catch the exception.
2896 bool isCleanup() const { return getSubclassData<CleanupField>(); }
2897
2898 /// Indicate that this landingpad instruction is a cleanup.
2899 void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
2900
2901 /// Add a catch or filter clause to the landing pad.
2902 void addClause(Constant *ClauseVal);
2903
2904 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
2905 /// determine what type of clause this is.
2906 Constant *getClause(unsigned Idx) const {
2907 return cast<Constant>(getOperandList()[Idx]);
2908 }
2909
2910 /// Return 'true' if the clause and index Idx is a catch clause.
2911 bool isCatch(unsigned Idx) const {
2912 return !isa<ArrayType>(getOperandList()[Idx]->getType());
2913 }
2914
2915 /// Return 'true' if the clause and index Idx is a filter clause.
2916 bool isFilter(unsigned Idx) const {
2917 return isa<ArrayType>(getOperandList()[Idx]->getType());
2918 }
2919
2920 /// Get the number of clauses for this landing pad.
2921 unsigned getNumClauses() const { return getNumOperands(); }
2922
2923 /// Grow the size of the operand list to accommodate the new
2924 /// number of clauses.
2925 void reserveClauses(unsigned Size) { growOperands(Size); }
2926
2927 // Methods for support type inquiry through isa, cast, and dyn_cast:
2928 static bool classof(const Instruction *I) {
2929 return I->getOpcode() == Instruction::LandingPad;
2930 }
2931 static bool classof(const Value *V) {
2932 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2933 }
2934};
2935
2936template <>
2937struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
2938};
2939
2940DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return
OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst
::const_op_iterator LandingPadInst::op_begin() const { return
OperandTraits<LandingPadInst>::op_begin(const_cast<
LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst
::op_end() { return OperandTraits<LandingPadInst>::op_end
(this); } LandingPadInst::const_op_iterator LandingPadInst::op_end
() const { return OperandTraits<LandingPadInst>::op_end
(const_cast<LandingPadInst*>(this)); } Value *LandingPadInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<LandingPadInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2940, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<LandingPadInst>::op_begin(
const_cast<LandingPadInst*>(this))[i_nocapture].get());
} void LandingPadInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<LandingPadInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 2940, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
LandingPadInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned LandingPadInst::getNumOperands() const { return OperandTraits
<LandingPadInst>::operands(this); } template <int Idx_nocapture
> Use &LandingPadInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &LandingPadInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
2941
2942//===----------------------------------------------------------------------===//
2943// ReturnInst Class
2944//===----------------------------------------------------------------------===//
2945
2946//===---------------------------------------------------------------------------
2947/// Return a value (possibly void), from a function. Execution
2948/// does not continue in this function any longer.
2949///
2950class ReturnInst : public Instruction {
2951 ReturnInst(const ReturnInst &RI);
2952
2953private:
2954 // ReturnInst constructors:
2955 // ReturnInst() - 'ret void' instruction
2956 // ReturnInst( null) - 'ret void' instruction
2957 // ReturnInst(Value* X) - 'ret X' instruction
2958 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
2959 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
2960 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
2961 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
2962 //
2963 // NOTE: If the Value* passed is of type void then the constructor behaves as
2964 // if it was passed NULL.
2965 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
2966 Instruction *InsertBefore = nullptr);
2967 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
2968 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
2969
2970protected:
2971 // Note: Instruction needs to be a friend here to call cloneImpl.
2972 friend class Instruction;
2973
2974 ReturnInst *cloneImpl() const;
2975
2976public:
2977 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
2978 Instruction *InsertBefore = nullptr) {
2979 return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
2980 }
2981
2982 static ReturnInst* Create(LLVMContext &C, Value *retVal,
2983 BasicBlock *InsertAtEnd) {
2984 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
2985 }
2986
2987 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
2988 return new(0) ReturnInst(C, InsertAtEnd);
2989 }
2990
2991 /// Provide fast operand accessors
2992 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2993
2994 /// Convenience accessor. Returns null if there is no return value.
2995 Value *getReturnValue() const {
2996 return getNumOperands() != 0 ? getOperand(0) : nullptr;
2997 }
2998
2999 unsigned getNumSuccessors() const { return 0; }
3000
3001 // Methods for support type inquiry through isa, cast, and dyn_cast:
3002 static bool classof(const Instruction *I) {
3003 return (I->getOpcode() == Instruction::Ret);
3004 }
3005 static bool classof(const Value *V) {
3006 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3007 }
3008
3009private:
3010 BasicBlock *getSuccessor(unsigned idx) const {
3011 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3011)
;
3012 }
3013
3014 void setSuccessor(unsigned idx, BasicBlock *B) {
3015 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3015)
;
3016 }
3017};
3018
3019template <>
3020struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3021};
3022
3023DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits
<ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator
ReturnInst::op_begin() const { return OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst
::op_iterator ReturnInst::op_end() { return OperandTraits<
ReturnInst>::op_end(this); } ReturnInst::const_op_iterator
ReturnInst::op_end() const { return OperandTraits<ReturnInst
>::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<ReturnInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3023, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ReturnInst>::op_begin(const_cast
<ReturnInst*>(this))[i_nocapture].get()); } void ReturnInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<ReturnInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3023, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ReturnInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned ReturnInst::getNumOperands() const { return OperandTraits
<ReturnInst>::operands(this); } template <int Idx_nocapture
> Use &ReturnInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
ReturnInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3024
3025//===----------------------------------------------------------------------===//
3026// BranchInst Class
3027//===----------------------------------------------------------------------===//
3028
3029//===---------------------------------------------------------------------------
3030/// Conditional or Unconditional Branch instruction.
3031///
3032class BranchInst : public Instruction {
3033 /// Ops list - Branches are strange. The operands are ordered:
3034 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3035 /// they don't have to check for cond/uncond branchness. These are mostly
3036 /// accessed relative from op_end().
3037 BranchInst(const BranchInst &BI);
3038 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3039 // BranchInst(BB *B) - 'br B'
3040 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3041 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3042 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3043 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3044 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3045 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3046 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3047 Instruction *InsertBefore = nullptr);
3048 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3049 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3050 BasicBlock *InsertAtEnd);
3051
3052 void AssertOK();
3053
3054protected:
3055 // Note: Instruction needs to be a friend here to call cloneImpl.
3056 friend class Instruction;
3057
3058 BranchInst *cloneImpl() const;
3059
3060public:
3061 /// Iterator type that casts an operand to a basic block.
3062 ///
3063 /// This only makes sense because the successors are stored as adjacent
3064 /// operands for branch instructions.
3065 struct succ_op_iterator
3066 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3067 std::random_access_iterator_tag, BasicBlock *,
3068 ptrdiff_t, BasicBlock *, BasicBlock *> {
3069 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3070
3071 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3072 BasicBlock *operator->() const { return operator*(); }
3073 };
3074
3075 /// The const version of `succ_op_iterator`.
3076 struct const_succ_op_iterator
3077 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3078 std::random_access_iterator_tag,
3079 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3080 const BasicBlock *> {
3081 explicit const_succ_op_iterator(const_value_op_iterator I)
3082 : iterator_adaptor_base(I) {}
3083
3084 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3085 const BasicBlock *operator->() const { return operator*(); }
3086 };
3087
3088 static BranchInst *Create(BasicBlock *IfTrue,
3089 Instruction *InsertBefore = nullptr) {
3090 return new(1) BranchInst(IfTrue, InsertBefore);
3091 }
3092
3093 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3094 Value *Cond, Instruction *InsertBefore = nullptr) {
3095 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3096 }
3097
3098 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3099 return new(1) BranchInst(IfTrue, InsertAtEnd);
3100 }
3101
3102 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3103 Value *Cond, BasicBlock *InsertAtEnd) {
3104 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3105 }
3106
3107 /// Transparently provide more efficient getOperand methods.
3108 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3109
3110 bool isUnconditional() const { return getNumOperands() == 1; }
3111 bool isConditional() const { return getNumOperands() == 3; }
3112
3113 Value *getCondition() const {
3114 assert(isConditional() && "Cannot get condition of an uncond branch!")(static_cast <bool> (isConditional() && "Cannot get condition of an uncond branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot get condition of an uncond branch!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3114, __extension__ __PRETTY_FUNCTION__))
;
3115 return Op<-3>();
3116 }
3117
3118 void setCondition(Value *V) {
3119 assert(isConditional() && "Cannot set condition of unconditional branch!")(static_cast <bool> (isConditional() && "Cannot set condition of unconditional branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot set condition of unconditional branch!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3119, __extension__ __PRETTY_FUNCTION__))
;
3120 Op<-3>() = V;
3121 }
3122
3123 unsigned getNumSuccessors() const { return 1+isConditional(); }
3124
3125 BasicBlock *getSuccessor(unsigned i) const {
3126 assert(i < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (i < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("i < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3126, __extension__ __PRETTY_FUNCTION__))
;
3127 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3128 }
3129
3130 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3131 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3131, __extension__ __PRETTY_FUNCTION__))
;
3132 *(&Op<-1>() - idx) = NewSucc;
3133 }
3134
3135 /// Swap the successors of this branch instruction.
3136 ///
3137 /// Swaps the successors of the branch instruction. This also swaps any
3138 /// branch weight metadata associated with the instruction so that it
3139 /// continues to map correctly to each operand.
3140 void swapSuccessors();
3141
3142 iterator_range<succ_op_iterator> successors() {
3143 return make_range(
3144 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3145 succ_op_iterator(value_op_end()));
3146 }
3147
3148 iterator_range<const_succ_op_iterator> successors() const {
3149 return make_range(const_succ_op_iterator(
3150 std::next(value_op_begin(), isConditional() ? 1 : 0)),
3151 const_succ_op_iterator(value_op_end()));
3152 }
3153
3154 // Methods for support type inquiry through isa, cast, and dyn_cast:
3155 static bool classof(const Instruction *I) {
3156 return (I->getOpcode() == Instruction::Br);
3157 }
3158 static bool classof(const Value *V) {
3159 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3160 }
3161};
3162
3163template <>
3164struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3165};
3166
3167DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits
<BranchInst>::op_begin(this); } BranchInst::const_op_iterator
BranchInst::op_begin() const { return OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this)); } BranchInst
::op_iterator BranchInst::op_end() { return OperandTraits<
BranchInst>::op_end(this); } BranchInst::const_op_iterator
BranchInst::op_end() const { return OperandTraits<BranchInst
>::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<BranchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3167, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<BranchInst>::op_begin(const_cast
<BranchInst*>(this))[i_nocapture].get()); } void BranchInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<BranchInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3167, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
BranchInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned BranchInst::getNumOperands() const { return OperandTraits
<BranchInst>::operands(this); } template <int Idx_nocapture
> Use &BranchInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
BranchInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3168
3169//===----------------------------------------------------------------------===//
3170// SwitchInst Class
3171//===----------------------------------------------------------------------===//
3172
3173//===---------------------------------------------------------------------------
3174/// Multiway switch
3175///
3176class SwitchInst : public Instruction {
3177 unsigned ReservedSpace;
3178
3179 // Operand[0] = Value to switch on
3180 // Operand[1] = Default basic block destination
3181 // Operand[2n ] = Value to match
3182 // Operand[2n+1] = BasicBlock to go to on match
3183 SwitchInst(const SwitchInst &SI);
3184
3185 /// Create a new switch instruction, specifying a value to switch on and a
3186 /// default destination. The number of additional cases can be specified here
3187 /// to make memory allocation more efficient. This constructor can also
3188 /// auto-insert before another instruction.
3189 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3190 Instruction *InsertBefore);
3191
3192 /// Create a new switch instruction, specifying a value to switch on and a
3193 /// default destination. The number of additional cases can be specified here
3194 /// to make memory allocation more efficient. This constructor also
3195 /// auto-inserts at the end of the specified BasicBlock.
3196 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3197 BasicBlock *InsertAtEnd);
3198
3199 // allocate space for exactly zero operands
3200 void *operator new(size_t S) { return User::operator new(S); }
3201
3202 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3203 void growOperands();
3204
3205protected:
3206 // Note: Instruction needs to be a friend here to call cloneImpl.
3207 friend class Instruction;
3208
3209 SwitchInst *cloneImpl() const;
3210
3211public:
3212 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3213
3214 // -2
3215 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3216
3217 template <typename CaseHandleT> class CaseIteratorImpl;
3218
3219 /// A handle to a particular switch case. It exposes a convenient interface
3220 /// to both the case value and the successor block.
3221 ///
3222 /// We define this as a template and instantiate it to form both a const and
3223 /// non-const handle.
3224 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3225 class CaseHandleImpl {
3226 // Directly befriend both const and non-const iterators.
3227 friend class SwitchInst::CaseIteratorImpl<
3228 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3229
3230 protected:
3231 // Expose the switch type we're parameterized with to the iterator.
3232 using SwitchInstType = SwitchInstT;
3233
3234 SwitchInstT *SI;
3235 ptrdiff_t Index;
3236
3237 CaseHandleImpl() = default;
3238 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3239
3240 public:
3241 /// Resolves case value for current case.
3242 ConstantIntT *getCaseValue() const {
3243 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3244, __extension__ __PRETTY_FUNCTION__))
3244 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3244, __extension__ __PRETTY_FUNCTION__))
;
3245 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3246 }
3247
3248 /// Resolves successor for current case.
3249 BasicBlockT *getCaseSuccessor() const {
3250 assert(((unsigned)Index < SI->getNumCases() ||(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3252, __extension__ __PRETTY_FUNCTION__))
3251 (unsigned)Index == DefaultPseudoIndex) &&(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3252, __extension__ __PRETTY_FUNCTION__))
3252 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3252, __extension__ __PRETTY_FUNCTION__))
;
3253 return SI->getSuccessor(getSuccessorIndex());
3254 }
3255
3256 /// Returns number of current case.
3257 unsigned getCaseIndex() const { return Index; }
3258
3259 /// Returns successor index for current case successor.
3260 unsigned getSuccessorIndex() const {
3261 assert(((unsigned)Index == DefaultPseudoIndex ||(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3263, __extension__ __PRETTY_FUNCTION__))
3262 (unsigned)Index < SI->getNumCases()) &&(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3263, __extension__ __PRETTY_FUNCTION__))
3263 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3263, __extension__ __PRETTY_FUNCTION__))
;
3264 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3265 }
3266
3267 bool operator==(const CaseHandleImpl &RHS) const {
3268 assert(SI == RHS.SI && "Incompatible operators.")(static_cast <bool> (SI == RHS.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("SI == RHS.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3268, __extension__ __PRETTY_FUNCTION__))
;
3269 return Index == RHS.Index;
3270 }
3271 };
3272
3273 using ConstCaseHandle =
3274 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>;
3275
3276 class CaseHandle
3277 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3278 friend class SwitchInst::CaseIteratorImpl<CaseHandle>;
3279
3280 public:
3281 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {}
3282
3283 /// Sets the new value for current case.
3284 void setValue(ConstantInt *V) {
3285 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3286, __extension__ __PRETTY_FUNCTION__))
3286 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3286, __extension__ __PRETTY_FUNCTION__))
;
3287 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3288 }
3289
3290 /// Sets the new successor for current case.
3291 void setSuccessor(BasicBlock *S) {
3292 SI->setSuccessor(getSuccessorIndex(), S);
3293 }
3294 };
3295
3296 template <typename CaseHandleT>
3297 class CaseIteratorImpl
3298 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3299 std::random_access_iterator_tag,
3300 CaseHandleT> {
3301 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3302
3303 CaseHandleT Case;
3304
3305 public:
3306 /// Default constructed iterator is in an invalid state until assigned to
3307 /// a case for a particular switch.
3308 CaseIteratorImpl() = default;
3309
3310 /// Initializes case iterator for given SwitchInst and for given
3311 /// case number.
3312 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3313
3314 /// Initializes case iterator for given SwitchInst and for given
3315 /// successor index.
3316 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
3317 unsigned SuccessorIndex) {
3318 assert(SuccessorIndex < SI->getNumSuccessors() &&(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3319, __extension__ __PRETTY_FUNCTION__))
3319 "Successor index # out of range!")(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3319, __extension__ __PRETTY_FUNCTION__))
;
3320 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3321 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3322 }
3323
3324 /// Support converting to the const variant. This will be a no-op for const
3325 /// variant.
3326 operator CaseIteratorImpl<ConstCaseHandle>() const {
3327 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3328 }
3329
3330 CaseIteratorImpl &operator+=(ptrdiff_t N) {
3331 // Check index correctness after addition.
3332 // Note: Index == getNumCases() means end().
3333 assert(Case.Index + N >= 0 &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3335, __extension__ __PRETTY_FUNCTION__))
3334 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3335, __extension__ __PRETTY_FUNCTION__))
3335 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3335, __extension__ __PRETTY_FUNCTION__))
;
3336 Case.Index += N;
3337 return *this;
3338 }
3339 CaseIteratorImpl &operator-=(ptrdiff_t N) {
3340 // Check index correctness after subtraction.
3341 // Note: Case.Index == getNumCases() means end().
3342 assert(Case.Index - N >= 0 &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3344, __extension__ __PRETTY_FUNCTION__))
3343 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3344, __extension__ __PRETTY_FUNCTION__))
3344 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3344, __extension__ __PRETTY_FUNCTION__))
;
3345 Case.Index -= N;
3346 return *this;
3347 }
3348 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const {
3349 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3349, __extension__ __PRETTY_FUNCTION__))
;
3350 return Case.Index - RHS.Case.Index;
3351 }
3352 bool operator==(const CaseIteratorImpl &RHS) const {
3353 return Case == RHS.Case;
3354 }
3355 bool operator<(const CaseIteratorImpl &RHS) const {
3356 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3356, __extension__ __PRETTY_FUNCTION__))
;
3357 return Case.Index < RHS.Case.Index;
3358 }
3359 CaseHandleT &operator*() { return Case; }
3360 const CaseHandleT &operator*() const { return Case; }
3361 };
3362
3363 using CaseIt = CaseIteratorImpl<CaseHandle>;
3364 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>;
3365
3366 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3367 unsigned NumCases,
3368 Instruction *InsertBefore = nullptr) {
3369 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3370 }
3371
3372 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3373 unsigned NumCases, BasicBlock *InsertAtEnd) {
3374 return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3375 }
3376
3377 /// Provide fast operand accessors
3378 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3379
3380 // Accessor Methods for Switch stmt
3381 Value *getCondition() const { return getOperand(0); }
3382 void setCondition(Value *V) { setOperand(0, V); }
3383
3384 BasicBlock *getDefaultDest() const {
3385 return cast<BasicBlock>(getOperand(1));
3386 }
3387
3388 void setDefaultDest(BasicBlock *DefaultCase) {
3389 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3390 }
3391
3392 /// Return the number of 'cases' in this switch instruction, excluding the
3393 /// default case.
3394 unsigned getNumCases() const {
3395 return getNumOperands()/2 - 1;
3396 }
3397
3398 /// Returns a read/write iterator that points to the first case in the
3399 /// SwitchInst.
3400 CaseIt case_begin() {
3401 return CaseIt(this, 0);
3402 }
3403
3404 /// Returns a read-only iterator that points to the first case in the
3405 /// SwitchInst.
3406 ConstCaseIt case_begin() const {
3407 return ConstCaseIt(this, 0);
3408 }
3409
3410 /// Returns a read/write iterator that points one past the last in the
3411 /// SwitchInst.
3412 CaseIt case_end() {
3413 return CaseIt(this, getNumCases());
3414 }
3415
3416 /// Returns a read-only iterator that points one past the last in the
3417 /// SwitchInst.
3418 ConstCaseIt case_end() const {
3419 return ConstCaseIt(this, getNumCases());
3420 }
3421
3422 /// Iteration adapter for range-for loops.
3423 iterator_range<CaseIt> cases() {
3424 return make_range(case_begin(), case_end());
3425 }
3426
3427 /// Constant iteration adapter for range-for loops.
3428 iterator_range<ConstCaseIt> cases() const {
3429 return make_range(case_begin(), case_end());
3430 }
3431
3432 /// Returns an iterator that points to the default case.
3433 /// Note: this iterator allows to resolve successor only. Attempt
3434 /// to resolve case value causes an assertion.
3435 /// Also note, that increment and decrement also causes an assertion and
3436 /// makes iterator invalid.
3437 CaseIt case_default() {
3438 return CaseIt(this, DefaultPseudoIndex);
3439 }
3440 ConstCaseIt case_default() const {
3441 return ConstCaseIt(this, DefaultPseudoIndex);
3442 }
3443
3444 /// Search all of the case values for the specified constant. If it is
3445 /// explicitly handled, return the case iterator of it, otherwise return
3446 /// default case iterator to indicate that it is handled by the default
3447 /// handler.
3448 CaseIt findCaseValue(const ConstantInt *C) {
3449 CaseIt I = llvm::find_if(
3450 cases(), [C](CaseHandle &Case) { return Case.getCaseValue() == C; });
3451 if (I != case_end())
3452 return I;
3453
3454 return case_default();
3455 }
3456 ConstCaseIt findCaseValue(const ConstantInt *C) const {
3457 ConstCaseIt I = llvm::find_if(cases(), [C](ConstCaseHandle &Case) {
3458 return Case.getCaseValue() == C;
3459 });
3460 if (I != case_end())
3461 return I;
3462
3463 return case_default();
3464 }
3465
3466 /// Finds the unique case value for a given successor. Returns null if the
3467 /// successor is not found, not unique, or is the default case.
3468 ConstantInt *findCaseDest(BasicBlock *BB) {
3469 if (BB == getDefaultDest())
3470 return nullptr;
3471
3472 ConstantInt *CI = nullptr;
3473 for (auto Case : cases()) {
3474 if (Case.getCaseSuccessor() != BB)
3475 continue;
3476
3477 if (CI)
3478 return nullptr; // Multiple cases lead to BB.
3479
3480 CI = Case.getCaseValue();
3481 }
3482
3483 return CI;
3484 }
3485
3486 /// Add an entry to the switch instruction.
3487 /// Note:
3488 /// This action invalidates case_end(). Old case_end() iterator will
3489 /// point to the added case.
3490 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3491
3492 /// This method removes the specified case and its successor from the switch
3493 /// instruction. Note that this operation may reorder the remaining cases at
3494 /// index idx and above.
3495 /// Note:
3496 /// This action invalidates iterators for all cases following the one removed,
3497 /// including the case_end() iterator. It returns an iterator for the next
3498 /// case.
3499 CaseIt removeCase(CaseIt I);
3500
3501 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3502 BasicBlock *getSuccessor(unsigned idx) const {
3503 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor idx out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() &&\"Successor idx out of range for switch!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3503, __extension__ __PRETTY_FUNCTION__))
;
3504 return cast<BasicBlock>(getOperand(idx*2+1));
3505 }
3506 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3507 assert(idx < getNumSuccessors() && "Successor # out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for switch!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3507, __extension__ __PRETTY_FUNCTION__))
;
3508 setOperand(idx * 2 + 1, NewSucc);
3509 }
3510
3511 // Methods for support type inquiry through isa, cast, and dyn_cast:
3512 static bool classof(const Instruction *I) {
3513 return I->getOpcode() == Instruction::Switch;
3514 }
3515 static bool classof(const Value *V) {
3516 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3517 }
3518};
3519
3520/// A wrapper class to simplify modification of SwitchInst cases along with
3521/// their prof branch_weights metadata.
3522class SwitchInstProfUpdateWrapper {
3523 SwitchInst &SI;
3524 Optional<SmallVector<uint32_t, 8> > Weights = None;
3525 bool Changed = false;
3526
3527protected:
3528 static MDNode *getProfBranchWeightsMD(const SwitchInst &SI);
3529
3530 MDNode *buildProfBranchWeightsMD();
3531
3532 void init();
3533
3534public:
3535 using CaseWeightOpt = Optional<uint32_t>;
3536 SwitchInst *operator->() { return &SI; }
3537 SwitchInst &operator*() { return SI; }
3538 operator SwitchInst *() { return &SI; }
3539
3540 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); }
3541
3542 ~SwitchInstProfUpdateWrapper() {
3543 if (Changed)
3544 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3545 }
3546
3547 /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3548 /// correspondent branch weight.
3549 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I);
3550
3551 /// Delegate the call to the underlying SwitchInst::addCase() and set the
3552 /// specified branch weight for the added case.
3553 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3554
3555 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3556 /// this object to not touch the underlying SwitchInst in destructor.
3557 SymbolTableList<Instruction>::iterator eraseFromParent();
3558
3559 void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3560 CaseWeightOpt getSuccessorWeight(unsigned idx);
3561
3562 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3563};
3564
3565template <>
3566struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> {
3567};
3568
3569DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)SwitchInst::op_iterator SwitchInst::op_begin() { return OperandTraits
<SwitchInst>::op_begin(this); } SwitchInst::const_op_iterator
SwitchInst::op_begin() const { return OperandTraits<SwitchInst
>::op_begin(const_cast<SwitchInst*>(this)); } SwitchInst
::op_iterator SwitchInst::op_end() { return OperandTraits<
SwitchInst>::op_end(this); } SwitchInst::const_op_iterator
SwitchInst::op_end() const { return OperandTraits<SwitchInst
>::op_end(const_cast<SwitchInst*>(this)); } Value *SwitchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SwitchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3569, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<SwitchInst>::op_begin(const_cast
<SwitchInst*>(this))[i_nocapture].get()); } void SwitchInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<SwitchInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3569, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
SwitchInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned SwitchInst::getNumOperands() const { return OperandTraits
<SwitchInst>::operands(this); } template <int Idx_nocapture
> Use &SwitchInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SwitchInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3570
3571//===----------------------------------------------------------------------===//
3572// IndirectBrInst Class
3573//===----------------------------------------------------------------------===//
3574
3575//===---------------------------------------------------------------------------
3576/// Indirect Branch Instruction.
3577///
3578class IndirectBrInst : public Instruction {
3579 unsigned ReservedSpace;
3580
3581 // Operand[0] = Address to jump to
3582 // Operand[n+1] = n-th destination
3583 IndirectBrInst(const IndirectBrInst &IBI);
3584
3585 /// Create a new indirectbr instruction, specifying an
3586 /// Address to jump to. The number of expected destinations can be specified
3587 /// here to make memory allocation more efficient. This constructor can also
3588 /// autoinsert before another instruction.
3589 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3590
3591 /// Create a new indirectbr instruction, specifying an
3592 /// Address to jump to. The number of expected destinations can be specified
3593 /// here to make memory allocation more efficient. This constructor also
3594 /// autoinserts at the end of the specified BasicBlock.
3595 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3596
3597 // allocate space for exactly zero operands
3598 void *operator new(size_t S) { return User::operator new(S); }
3599
3600 void init(Value *Address, unsigned NumDests);
3601 void growOperands();
3602
3603protected:
3604 // Note: Instruction needs to be a friend here to call cloneImpl.
3605 friend class Instruction;
3606
3607 IndirectBrInst *cloneImpl() const;
3608
3609public:
3610 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3611
3612 /// Iterator type that casts an operand to a basic block.
3613 ///
3614 /// This only makes sense because the successors are stored as adjacent
3615 /// operands for indirectbr instructions.
3616 struct succ_op_iterator
3617 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3618 std::random_access_iterator_tag, BasicBlock *,
3619 ptrdiff_t, BasicBlock *, BasicBlock *> {
3620 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3621
3622 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3623 BasicBlock *operator->() const { return operator*(); }
3624 };
3625
3626 /// The const version of `succ_op_iterator`.
3627 struct const_succ_op_iterator
3628 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3629 std::random_access_iterator_tag,
3630 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3631 const BasicBlock *> {
3632 explicit const_succ_op_iterator(const_value_op_iterator I)
3633 : iterator_adaptor_base(I) {}
3634
3635 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3636 const BasicBlock *operator->() const { return operator*(); }
3637 };
3638
3639 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3640 Instruction *InsertBefore = nullptr) {
3641 return new IndirectBrInst(Address, NumDests, InsertBefore);
3642 }
3643
3644 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3645 BasicBlock *InsertAtEnd) {
3646 return new IndirectBrInst(Address, NumDests, InsertAtEnd);
3647 }
3648
3649 /// Provide fast operand accessors.
3650 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3651
3652 // Accessor Methods for IndirectBrInst instruction.
3653 Value *getAddress() { return getOperand(0); }
3654 const Value *getAddress() const { return getOperand(0); }
3655 void setAddress(Value *V) { setOperand(0, V); }
3656
3657 /// return the number of possible destinations in this
3658 /// indirectbr instruction.
3659 unsigned getNumDestinations() const { return getNumOperands()-1; }
3660
3661 /// Return the specified destination.
3662 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3663 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3664
3665 /// Add a destination.
3666 ///
3667 void addDestination(BasicBlock *Dest);
3668
3669 /// This method removes the specified successor from the
3670 /// indirectbr instruction.
3671 void removeDestination(unsigned i);
3672
3673 unsigned getNumSuccessors() const { return getNumOperands()-1; }
3674 BasicBlock *getSuccessor(unsigned i) const {
3675 return cast<BasicBlock>(getOperand(i+1));
3676 }
3677 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3678 setOperand(i + 1, NewSucc);
3679 }
3680
3681 iterator_range<succ_op_iterator> successors() {
3682 return make_range(succ_op_iterator(std::next(value_op_begin())),
3683 succ_op_iterator(value_op_end()));
3684 }
3685
3686 iterator_range<const_succ_op_iterator> successors() const {
3687 return make_range(const_succ_op_iterator(std::next(value_op_begin())),
3688 const_succ_op_iterator(value_op_end()));
3689 }
3690
3691 // Methods for support type inquiry through isa, cast, and dyn_cast:
3692 static bool classof(const Instruction *I) {
3693 return I->getOpcode() == Instruction::IndirectBr;
3694 }
3695 static bool classof(const Value *V) {
3696 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3697 }
3698};
3699
3700template <>
3701struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> {
3702};
3703
3704DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)IndirectBrInst::op_iterator IndirectBrInst::op_begin() { return
OperandTraits<IndirectBrInst>::op_begin(this); } IndirectBrInst
::const_op_iterator IndirectBrInst::op_begin() const { return
OperandTraits<IndirectBrInst>::op_begin(const_cast<
IndirectBrInst*>(this)); } IndirectBrInst::op_iterator IndirectBrInst
::op_end() { return OperandTraits<IndirectBrInst>::op_end
(this); } IndirectBrInst::const_op_iterator IndirectBrInst::op_end
() const { return OperandTraits<IndirectBrInst>::op_end
(const_cast<IndirectBrInst*>(this)); } Value *IndirectBrInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<IndirectBrInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3704, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<IndirectBrInst>::op_begin(
const_cast<IndirectBrInst*>(this))[i_nocapture].get());
} void IndirectBrInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<IndirectBrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3704, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
IndirectBrInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned IndirectBrInst::getNumOperands() const { return OperandTraits
<IndirectBrInst>::operands(this); } template <int Idx_nocapture
> Use &IndirectBrInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &IndirectBrInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
3705
3706//===----------------------------------------------------------------------===//
3707// InvokeInst Class
3708//===----------------------------------------------------------------------===//
3709
3710/// Invoke instruction. The SubclassData field is used to hold the
3711/// calling convention of the call.
3712///
3713class InvokeInst : public CallBase {
3714 /// The number of operands for this call beyond the called function,
3715 /// arguments, and operand bundles.
3716 static constexpr int NumExtraOperands = 2;
3717
3718 /// The index from the end of the operand array to the normal destination.
3719 static constexpr int NormalDestOpEndIdx = -3;
3720
3721 /// The index from the end of the operand array to the unwind destination.
3722 static constexpr int UnwindDestOpEndIdx = -2;
3723
3724 InvokeInst(const InvokeInst &BI);
3725
3726 /// Construct an InvokeInst given a range of arguments.
3727 ///
3728 /// Construct an InvokeInst from a range of arguments
3729 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3730 BasicBlock *IfException, ArrayRef<Value *> Args,
3731 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3732 const Twine &NameStr, Instruction *InsertBefore);
3733
3734 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3735 BasicBlock *IfException, ArrayRef<Value *> Args,
3736 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3737 const Twine &NameStr, BasicBlock *InsertAtEnd);
3738
3739 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3740 BasicBlock *IfException, ArrayRef<Value *> Args,
3741 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3742
3743 /// Compute the number of operands to allocate.
3744 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
3745 // We need one operand for the called function, plus our extra operands and
3746 // the input operand counts provided.
3747 return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
3748 }
3749
3750protected:
3751 // Note: Instruction needs to be a friend here to call cloneImpl.
3752 friend class Instruction;
3753
3754 InvokeInst *cloneImpl() const;
3755
3756public:
3757 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3758 BasicBlock *IfException, ArrayRef<Value *> Args,
3759 const Twine &NameStr,
3760 Instruction *InsertBefore = nullptr) {
3761 int NumOperands = ComputeNumOperands(Args.size());
3762 return new (NumOperands)
3763 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3764 NameStr, InsertBefore);
3765 }
3766
3767 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3768 BasicBlock *IfException, ArrayRef<Value *> Args,
3769 ArrayRef<OperandBundleDef> Bundles = None,
3770 const Twine &NameStr = "",
3771 Instruction *InsertBefore = nullptr) {
3772 int NumOperands =
3773 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3774 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3775
3776 return new (NumOperands, DescriptorBytes)
3777 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3778 NameStr, InsertBefore);
3779 }
3780
3781 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3782 BasicBlock *IfException, ArrayRef<Value *> Args,
3783 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3784 int NumOperands = ComputeNumOperands(Args.size());
3785 return new (NumOperands)
3786 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3787 NameStr, InsertAtEnd);
3788 }
3789
3790 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3791 BasicBlock *IfException, ArrayRef<Value *> Args,
3792 ArrayRef<OperandBundleDef> Bundles,
3793 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3794 int NumOperands =
3795 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3796 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3797
3798 return new (NumOperands, DescriptorBytes)
3799 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3800 NameStr, InsertAtEnd);
3801 }
3802
3803 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3804 BasicBlock *IfException, ArrayRef<Value *> Args,
3805 const Twine &NameStr,
3806 Instruction *InsertBefore = nullptr) {
3807 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3808 IfException, Args, None, NameStr, InsertBefore);
3809 }
3810
3811 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3812 BasicBlock *IfException, ArrayRef<Value *> Args,
3813 ArrayRef<OperandBundleDef> Bundles = None,
3814 const Twine &NameStr = "",
3815 Instruction *InsertBefore = nullptr) {
3816 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3817 IfException, Args, Bundles, NameStr, InsertBefore);
3818 }
3819
3820 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3821 BasicBlock *IfException, ArrayRef<Value *> Args,
3822 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3823 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3824 IfException, Args, NameStr, InsertAtEnd);
3825 }
3826
3827 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3828 BasicBlock *IfException, ArrayRef<Value *> Args,
3829 ArrayRef<OperandBundleDef> Bundles,
3830 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3831 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3832 IfException, Args, Bundles, NameStr, InsertAtEnd);
3833 }
3834
3835 /// Create a clone of \p II with a different set of operand bundles and
3836 /// insert it before \p InsertPt.
3837 ///
3838 /// The returned invoke instruction is identical to \p II in every way except
3839 /// that the operand bundles for the new instruction are set to the operand
3840 /// bundles in \p Bundles.
3841 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3842 Instruction *InsertPt = nullptr);
3843
3844 // get*Dest - Return the destination basic blocks...
3845 BasicBlock *getNormalDest() const {
3846 return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
3847 }
3848 BasicBlock *getUnwindDest() const {
3849 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
3850 }
3851 void setNormalDest(BasicBlock *B) {
3852 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3853 }
3854 void setUnwindDest(BasicBlock *B) {
3855 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3856 }
3857
3858 /// Get the landingpad instruction from the landing pad
3859 /// block (the unwind destination).
3860 LandingPadInst *getLandingPadInst() const;
3861
3862 BasicBlock *getSuccessor(unsigned i) const {
3863 assert(i < 2 && "Successor # out of range for invoke!")(static_cast <bool> (i < 2 && "Successor # out of range for invoke!"
) ? void (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3863, __extension__ __PRETTY_FUNCTION__))
;
3864 return i == 0 ? getNormalDest() : getUnwindDest();
3865 }
3866
3867 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3868 assert(i < 2 && "Successor # out of range for invoke!")(static_cast <bool> (i < 2 && "Successor # out of range for invoke!"
) ? void (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 3868, __extension__ __PRETTY_FUNCTION__))
;
3869 if (i == 0)
3870 setNormalDest(NewSucc);
3871 else
3872 setUnwindDest(NewSucc);
3873 }
3874
3875 unsigned getNumSuccessors() const { return 2; }
3876
3877 // Methods for support type inquiry through isa, cast, and dyn_cast:
3878 static bool classof(const Instruction *I) {
3879 return (I->getOpcode() == Instruction::Invoke);
3880 }
3881 static bool classof(const Value *V) {
3882 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3883 }
3884
3885private:
3886 // Shadow Instruction::setInstructionSubclassData with a private forwarding
3887 // method so that subclasses cannot accidentally use it.
3888 template <typename Bitfield>
3889 void setSubclassData(typename Bitfield::Type Value) {
3890 Instruction::setSubclassData<Bitfield>(Value);
3891 }
3892};
3893
3894InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3895 BasicBlock *IfException, ArrayRef<Value *> Args,
3896 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3897 const Twine &NameStr, Instruction *InsertBefore)
3898 : CallBase(Ty->getReturnType(), Instruction::Invoke,
3899 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3900 InsertBefore) {
3901 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3902}
3903
3904InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3905 BasicBlock *IfException, ArrayRef<Value *> Args,
3906 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3907 const Twine &NameStr, BasicBlock *InsertAtEnd)
3908 : CallBase(Ty->getReturnType(), Instruction::Invoke,
3909 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3910 InsertAtEnd) {
3911 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3912}
3913
3914//===----------------------------------------------------------------------===//
3915// CallBrInst Class
3916//===----------------------------------------------------------------------===//
3917
3918/// CallBr instruction, tracking function calls that may not return control but
3919/// instead transfer it to a third location. The SubclassData field is used to
3920/// hold the calling convention of the call.
3921///
3922class CallBrInst : public CallBase {
3923
3924 unsigned NumIndirectDests;
3925
3926 CallBrInst(const CallBrInst &BI);
3927
3928 /// Construct a CallBrInst given a range of arguments.
3929 ///
3930 /// Construct a CallBrInst from a range of arguments
3931 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3932 ArrayRef<BasicBlock *> IndirectDests,
3933 ArrayRef<Value *> Args,
3934 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3935 const Twine &NameStr, Instruction *InsertBefore);
3936
3937 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3938 ArrayRef<BasicBlock *> IndirectDests,
3939 ArrayRef<Value *> Args,
3940 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3941 const Twine &NameStr, BasicBlock *InsertAtEnd);
3942
3943 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
3944 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
3945 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3946
3947 /// Should the Indirect Destinations change, scan + update the Arg list.
3948 void updateArgBlockAddresses(unsigned i, BasicBlock *B);
3949
3950 /// Compute the number of operands to allocate.
3951 static int ComputeNumOperands(int NumArgs, int NumIndirectDests,
3952 int NumBundleInputs = 0) {
3953 // We need one operand for the called function, plus our extra operands and
3954 // the input operand counts provided.
3955 return 2 + NumIndirectDests + NumArgs + NumBundleInputs;
3956 }
3957
3958protected:
3959 // Note: Instruction needs to be a friend here to call cloneImpl.
3960 friend class Instruction;
3961
3962 CallBrInst *cloneImpl() const;
3963
3964public:
3965 static CallBrInst *Create(FunctionType *Ty, Value *Func,
3966 BasicBlock *DefaultDest,
3967 ArrayRef<BasicBlock *> IndirectDests,
3968 ArrayRef<Value *> Args, const Twine &NameStr,
3969 Instruction *InsertBefore = nullptr) {
3970 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
3971 return new (NumOperands)
3972 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
3973 NumOperands, NameStr, InsertBefore);
3974 }
3975
3976 static CallBrInst *Create(FunctionType *Ty, Value *Func,
3977 BasicBlock *DefaultDest,
3978 ArrayRef<BasicBlock *> IndirectDests,
3979 ArrayRef<Value *> Args,
3980 ArrayRef<OperandBundleDef> Bundles = None,
3981 const Twine &NameStr = "",
3982 Instruction *InsertBefore = nullptr) {
3983 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
3984 CountBundleInputs(Bundles));
3985 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3986
3987 return new (NumOperands, DescriptorBytes)
3988 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
3989 NumOperands, NameStr, InsertBefore);
3990 }
3991
3992 static CallBrInst *Create(FunctionType *Ty, Value *Func,
3993 BasicBlock *DefaultDest,
3994 ArrayRef<BasicBlock *> IndirectDests,
3995 ArrayRef<Value *> Args, const Twine &NameStr,
3996 BasicBlock *InsertAtEnd) {
3997 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
3998 return new (NumOperands)
3999 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
4000 NumOperands, NameStr, InsertAtEnd);
4001 }
4002
4003 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4004 BasicBlock *DefaultDest,
4005 ArrayRef<BasicBlock *> IndirectDests,
4006 ArrayRef<Value *> Args,
4007 ArrayRef<OperandBundleDef> Bundles,
4008 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4009 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4010 CountBundleInputs(Bundles));
4011 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4012
4013 return new (NumOperands, DescriptorBytes)
4014 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4015 NumOperands, NameStr, InsertAtEnd);
4016 }
4017
4018 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4019 ArrayRef<BasicBlock *> IndirectDests,
4020 ArrayRef<Value *> Args, const Twine &NameStr,
4021 Instruction *InsertBefore = nullptr) {
4022 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4023 IndirectDests, Args, NameStr, InsertBefore);
4024 }
4025
4026 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4027 ArrayRef<BasicBlock *> IndirectDests,
4028 ArrayRef<Value *> Args,
4029 ArrayRef<OperandBundleDef> Bundles = None,
4030 const Twine &NameStr = "",
4031 Instruction *InsertBefore = nullptr) {
4032 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4033 IndirectDests, Args, Bundles, NameStr, InsertBefore);
4034 }
4035
4036 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4037 ArrayRef<BasicBlock *> IndirectDests,
4038 ArrayRef<Value *> Args, const Twine &NameStr,
4039 BasicBlock *InsertAtEnd) {
4040 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4041 IndirectDests, Args, NameStr, InsertAtEnd);
4042 }
4043
4044 static CallBrInst *Create(FunctionCallee Func,
4045 BasicBlock *DefaultDest,
4046 ArrayRef<BasicBlock *> IndirectDests,
4047 ArrayRef<Value *> Args,
4048 ArrayRef<OperandBundleDef> Bundles,
4049 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4050 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4051 IndirectDests, Args, Bundles, NameStr, InsertAtEnd);
4052 }
4053
4054 /// Create a clone of \p CBI with a different set of operand bundles and
4055 /// insert it before \p InsertPt.
4056 ///
4057 /// The returned callbr instruction is identical to \p CBI in every way
4058 /// except that the operand bundles for the new instruction are set to the
4059 /// operand bundles in \p Bundles.
4060 static CallBrInst *Create(CallBrInst *CBI,
4061 ArrayRef<OperandBundleDef> Bundles,
4062 Instruction *InsertPt = nullptr);
4063
4064 /// Return the number of callbr indirect dest labels.
4065 ///
4066 unsigned getNumIndirectDests() const { return NumIndirectDests; }
4067
4068 /// getIndirectDestLabel - Return the i-th indirect dest label.
4069 ///
4070 Value *getIndirectDestLabel(unsigned i) const {
4071 assert(i < getNumIndirectDests() && "Out of bounds!")(static_cast <bool> (i < getNumIndirectDests() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4071, __extension__ __PRETTY_FUNCTION__))
;
4072 return getOperand(i + getNumArgOperands() + getNumTotalBundleOperands() +
4073 1);
4074 }
4075
4076 Value *getIndirectDestLabelUse(unsigned i) const {
4077 assert(i < getNumIndirectDests() && "Out of bounds!")(static_cast <bool> (i < getNumIndirectDests() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4077, __extension__ __PRETTY_FUNCTION__))
;
4078 return getOperandUse(i + getNumArgOperands() + getNumTotalBundleOperands() +
4079 1);
4080 }
4081
4082 // Return the destination basic blocks...
4083 BasicBlock *getDefaultDest() const {
4084 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
4085 }
4086 BasicBlock *getIndirectDest(unsigned i) const {
4087 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
4088 }
4089 SmallVector<BasicBlock *, 16> getIndirectDests() const {
4090 SmallVector<BasicBlock *, 16> IndirectDests;
4091 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
4092 IndirectDests.push_back(getIndirectDest(i));
4093 return IndirectDests;
4094 }
4095 void setDefaultDest(BasicBlock *B) {
4096 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
4097 }
4098 void setIndirectDest(unsigned i, BasicBlock *B) {
4099 updateArgBlockAddresses(i, B);
4100 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
4101 }
4102
4103 BasicBlock *getSuccessor(unsigned i) const {
4104 assert(i < getNumSuccessors() + 1 &&(static_cast <bool> (i < getNumSuccessors() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4105, __extension__ __PRETTY_FUNCTION__))
4105 "Successor # out of range for callbr!")(static_cast <bool> (i < getNumSuccessors() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4105, __extension__ __PRETTY_FUNCTION__))
;
4106 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
4107 }
4108
4109 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4110 assert(i < getNumIndirectDests() + 1 &&(static_cast <bool> (i < getNumIndirectDests() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4111, __extension__ __PRETTY_FUNCTION__))
4111 "Successor # out of range for callbr!")(static_cast <bool> (i < getNumIndirectDests() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4111, __extension__ __PRETTY_FUNCTION__))
;
4112 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
4113 }
4114
4115 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
4116
4117 // Methods for support type inquiry through isa, cast, and dyn_cast:
4118 static bool classof(const Instruction *I) {
4119 return (I->getOpcode() == Instruction::CallBr);
4120 }
4121 static bool classof(const Value *V) {
4122 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4123 }
4124
4125private:
4126 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4127 // method so that subclasses cannot accidentally use it.
4128 template <typename Bitfield>
4129 void setSubclassData(typename Bitfield::Type Value) {
4130 Instruction::setSubclassData<Bitfield>(Value);
4131 }
4132};
4133
4134CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4135 ArrayRef<BasicBlock *> IndirectDests,
4136 ArrayRef<Value *> Args,
4137 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4138 const Twine &NameStr, Instruction *InsertBefore)
4139 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4140 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4141 InsertBefore) {
4142 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4143}
4144
4145CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4146 ArrayRef<BasicBlock *> IndirectDests,
4147 ArrayRef<Value *> Args,
4148 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4149 const Twine &NameStr, BasicBlock *InsertAtEnd)
4150 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4151 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4152 InsertAtEnd) {
4153 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4154}
4155
4156//===----------------------------------------------------------------------===//
4157// ResumeInst Class
4158//===----------------------------------------------------------------------===//
4159
4160//===---------------------------------------------------------------------------
4161/// Resume the propagation of an exception.
4162///
4163class ResumeInst : public Instruction {
4164 ResumeInst(const ResumeInst &RI);
4165
4166 explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
4167 ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
4168
4169protected:
4170 // Note: Instruction needs to be a friend here to call cloneImpl.
4171 friend class Instruction;
4172
4173 ResumeInst *cloneImpl() const;
4174
4175public:
4176 static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
4177 return new(1) ResumeInst(Exn, InsertBefore);
4178 }
4179
4180 static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
4181 return new(1) ResumeInst(Exn, InsertAtEnd);
4182 }
4183
4184 /// Provide fast operand accessors
4185 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4186
4187 /// Convenience accessor.
4188 Value *getValue() const { return Op<0>(); }
4189
4190 unsigned getNumSuccessors() const { return 0; }
4191
4192 // Methods for support type inquiry through isa, cast, and dyn_cast:
4193 static bool classof(const Instruction *I) {
4194 return I->getOpcode() == Instruction::Resume;
4195 }
4196 static bool classof(const Value *V) {
4197 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4198 }
4199
4200private:
4201 BasicBlock *getSuccessor(unsigned idx) const {
4202 llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4202)
;
4203 }
4204
4205 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4206 llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4206)
;
4207 }
4208};
4209
4210template <>
4211struct OperandTraits<ResumeInst> :
4212 public FixedNumOperandTraits<ResumeInst, 1> {
4213};
4214
4215DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)ResumeInst::op_iterator ResumeInst::op_begin() { return OperandTraits
<ResumeInst>::op_begin(this); } ResumeInst::const_op_iterator
ResumeInst::op_begin() const { return OperandTraits<ResumeInst
>::op_begin(const_cast<ResumeInst*>(this)); } ResumeInst
::op_iterator ResumeInst::op_end() { return OperandTraits<
ResumeInst>::op_end(this); } ResumeInst::const_op_iterator
ResumeInst::op_end() const { return OperandTraits<ResumeInst
>::op_end(const_cast<ResumeInst*>(this)); } Value *ResumeInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<ResumeInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4215, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ResumeInst>::op_begin(const_cast
<ResumeInst*>(this))[i_nocapture].get()); } void ResumeInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<ResumeInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4215, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ResumeInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned ResumeInst::getNumOperands() const { return OperandTraits
<ResumeInst>::operands(this); } template <int Idx_nocapture
> Use &ResumeInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
ResumeInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
4216
4217//===----------------------------------------------------------------------===//
4218// CatchSwitchInst Class
4219//===----------------------------------------------------------------------===//
4220class CatchSwitchInst : public Instruction {
4221 using UnwindDestField = BoolBitfieldElementT<0>;
4222
4223 /// The number of operands actually allocated. NumOperands is
4224 /// the number actually in use.
4225 unsigned ReservedSpace;
4226
4227 // Operand[0] = Outer scope
4228 // Operand[1] = Unwind block destination
4229 // Operand[n] = BasicBlock to go to on match
4230 CatchSwitchInst(const CatchSwitchInst &CSI);
4231
4232 /// Create a new switch instruction, specifying a
4233 /// default destination. The number of additional handlers can be specified
4234 /// here to make memory allocation more efficient.
4235 /// This constructor can also autoinsert before another instruction.
4236 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4237 unsigned NumHandlers, const Twine &NameStr,
4238 Instruction *InsertBefore);
4239
4240 /// Create a new switch instruction, specifying a
4241 /// default destination. The number of additional handlers can be specified
4242 /// here to make memory allocation more efficient.
4243 /// This constructor also autoinserts at the end of the specified BasicBlock.
4244 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4245 unsigned NumHandlers, const Twine &NameStr,
4246 BasicBlock *InsertAtEnd);
4247
4248 // allocate space for exactly zero operands
4249 void *operator new(size_t S) { return User::operator new(S); }
4250
4251 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4252 void growOperands(unsigned Size);
4253
4254protected:
4255 // Note: Instruction needs to be a friend here to call cloneImpl.
4256 friend class Instruction;
4257
4258 CatchSwitchInst *cloneImpl() const;
4259
4260public:
4261 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
4262
4263 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4264 unsigned NumHandlers,
4265 const Twine &NameStr = "",
4266 Instruction *InsertBefore = nullptr) {
4267 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4268 InsertBefore);
4269 }
4270
4271 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4272 unsigned NumHandlers, const Twine &NameStr,
4273 BasicBlock *InsertAtEnd) {
4274 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4275 InsertAtEnd);
4276 }
4277
4278 /// Provide fast operand accessors
4279 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4280
4281 // Accessor Methods for CatchSwitch stmt
4282 Value *getParentPad() const { return getOperand(0); }
4283 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4284
4285 // Accessor Methods for CatchSwitch stmt
4286 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4287 bool unwindsToCaller() const { return !hasUnwindDest(); }
4288 BasicBlock *getUnwindDest() const {
4289 if (hasUnwindDest())
4290 return cast<BasicBlock>(getOperand(1));
4291 return nullptr;
4292 }
4293 void setUnwindDest(BasicBlock *UnwindDest) {
4294 assert(UnwindDest)(static_cast <bool> (UnwindDest) ? void (0) : __assert_fail
("UnwindDest", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4294, __extension__ __PRETTY_FUNCTION__))
;
4295 assert(hasUnwindDest())(static_cast <bool> (hasUnwindDest()) ? void (0) : __assert_fail
("hasUnwindDest()", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4295, __extension__ __PRETTY_FUNCTION__))
;
4296 setOperand(1, UnwindDest);
4297 }
4298
4299 /// return the number of 'handlers' in this catchswitch
4300 /// instruction, except the default handler
4301 unsigned getNumHandlers() const {
4302 if (hasUnwindDest())
4303 return getNumOperands() - 2;
4304 return getNumOperands() - 1;
4305 }
4306
4307private:
4308 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
4309 static const BasicBlock *handler_helper(const Value *V) {
4310 return cast<BasicBlock>(V);
4311 }
4312
4313public:
4314 using DerefFnTy = BasicBlock *(*)(Value *);
4315 using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>;
4316 using handler_range = iterator_range<handler_iterator>;
4317 using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4318 using const_handler_iterator =
4319 mapped_iterator<const_op_iterator, ConstDerefFnTy>;
4320 using const_handler_range = iterator_range<const_handler_iterator>;
4321
4322 /// Returns an iterator that points to the first handler in CatchSwitchInst.
4323 handler_iterator handler_begin() {
4324 op_iterator It = op_begin() + 1;
4325 if (hasUnwindDest())
4326 ++It;
4327 return handler_iterator(It, DerefFnTy(handler_helper));
4328 }
4329
4330 /// Returns an iterator that points to the first handler in the
4331 /// CatchSwitchInst.
4332 const_handler_iterator handler_begin() const {
4333 const_op_iterator It = op_begin() + 1;
4334 if (hasUnwindDest())
4335 ++It;
4336 return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4337 }
4338
4339 /// Returns a read-only iterator that points one past the last
4340 /// handler in the CatchSwitchInst.
4341 handler_iterator handler_end() {
4342 return handler_iterator(op_end(), DerefFnTy(handler_helper));
4343 }
4344
4345 /// Returns an iterator that points one past the last handler in the
4346 /// CatchSwitchInst.
4347 const_handler_iterator handler_end() const {
4348 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4349 }
4350
4351 /// iteration adapter for range-for loops.
4352 handler_range handlers() {
4353 return make_range(handler_begin(), handler_end());
4354 }
4355
4356 /// iteration adapter for range-for loops.
4357 const_handler_range handlers() const {
4358 return make_range(handler_begin(), handler_end());
4359 }
4360
4361 /// Add an entry to the switch instruction...
4362 /// Note:
4363 /// This action invalidates handler_end(). Old handler_end() iterator will
4364 /// point to the added handler.
4365 void addHandler(BasicBlock *Dest);
4366
4367 void removeHandler(handler_iterator HI);
4368
4369 unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4370 BasicBlock *getSuccessor(unsigned Idx) const {
4371 assert(Idx < getNumSuccessors() &&(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4372, __extension__ __PRETTY_FUNCTION__))
4372 "Successor # out of range for catchswitch!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4372, __extension__ __PRETTY_FUNCTION__))
;
4373 return cast<BasicBlock>(getOperand(Idx + 1));
4374 }
4375 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4376 assert(Idx < getNumSuccessors() &&(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4377, __extension__ __PRETTY_FUNCTION__))
4377 "Successor # out of range for catchswitch!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4377, __extension__ __PRETTY_FUNCTION__))
;
4378 setOperand(Idx + 1, NewSucc);
4379 }
4380
4381 // Methods for support type inquiry through isa, cast, and dyn_cast:
4382 static bool classof(const Instruction *I) {
4383 return I->getOpcode() == Instruction::CatchSwitch;
4384 }
4385 static bool classof(const Value *V) {
4386 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4387 }
4388};
4389
4390template <>
4391struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {};
4392
4393DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)CatchSwitchInst::op_iterator CatchSwitchInst::op_begin() { return
OperandTraits<CatchSwitchInst>::op_begin(this); } CatchSwitchInst
::const_op_iterator CatchSwitchInst::op_begin() const { return
OperandTraits<CatchSwitchInst>::op_begin(const_cast<
CatchSwitchInst*>(this)); } CatchSwitchInst::op_iterator CatchSwitchInst
::op_end() { return OperandTraits<CatchSwitchInst>::op_end
(this); } CatchSwitchInst::const_op_iterator CatchSwitchInst::
op_end() const { return OperandTraits<CatchSwitchInst>::
op_end(const_cast<CatchSwitchInst*>(this)); } Value *CatchSwitchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<CatchSwitchInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4393, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<CatchSwitchInst>::op_begin
(const_cast<CatchSwitchInst*>(this))[i_nocapture].get()
); } void CatchSwitchInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<CatchSwitchInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4393, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
CatchSwitchInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned CatchSwitchInst::getNumOperands() const { return
OperandTraits<CatchSwitchInst>::operands(this); } template
<int Idx_nocapture> Use &CatchSwitchInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &CatchSwitchInst::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
4394
4395//===----------------------------------------------------------------------===//
4396// CleanupPadInst Class
4397//===----------------------------------------------------------------------===//
4398class CleanupPadInst : public FuncletPadInst {
4399private:
4400 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4401 unsigned Values, const Twine &NameStr,
4402 Instruction *InsertBefore)
4403 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4404 NameStr, InsertBefore) {}
4405 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4406 unsigned Values, const Twine &NameStr,
4407 BasicBlock *InsertAtEnd)
4408 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4409 NameStr, InsertAtEnd) {}
4410
4411public:
4412 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = None,
4413 const Twine &NameStr = "",
4414 Instruction *InsertBefore = nullptr) {
4415 unsigned Values = 1 + Args.size();
4416 return new (Values)
4417 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
4418 }
4419
4420 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args,
4421 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4422 unsigned Values = 1 + Args.size();
4423 return new (Values)
4424 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd);
4425 }
4426
4427 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4428 static bool classof(const Instruction *I) {
4429 return I->getOpcode() == Instruction::CleanupPad;
4430 }
4431 static bool classof(const Value *V) {
4432 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4433 }
4434};
4435
4436//===----------------------------------------------------------------------===//
4437// CatchPadInst Class
4438//===----------------------------------------------------------------------===//
4439class CatchPadInst : public FuncletPadInst {
4440private:
4441 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4442 unsigned Values, const Twine &NameStr,
4443 Instruction *InsertBefore)
4444 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4445 NameStr, InsertBefore) {}
4446 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4447 unsigned Values, const Twine &NameStr,
4448 BasicBlock *InsertAtEnd)
4449 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4450 NameStr, InsertAtEnd) {}
4451
4452public:
4453 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4454 const Twine &NameStr = "",
4455 Instruction *InsertBefore = nullptr) {
4456 unsigned Values = 1 + Args.size();
4457 return new (Values)
4458 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
4459 }
4460
4461 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4462 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4463 unsigned Values = 1 + Args.size();
4464 return new (Values)
4465 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd);
4466 }
4467
4468 /// Convenience accessors
4469 CatchSwitchInst *getCatchSwitch() const {
4470 return cast<CatchSwitchInst>(Op<-1>());
4471 }
4472 void setCatchSwitch(Value *CatchSwitch) {
4473 assert(CatchSwitch)(static_cast <bool> (CatchSwitch) ? void (0) : __assert_fail
("CatchSwitch", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4473, __extension__ __PRETTY_FUNCTION__))
;
4474 Op<-1>() = CatchSwitch;
4475 }
4476
4477 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4478 static bool classof(const Instruction *I) {
4479 return I->getOpcode() == Instruction::CatchPad;
4480 }
4481 static bool classof(const Value *V) {
4482 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4483 }
4484};
4485
4486//===----------------------------------------------------------------------===//
4487// CatchReturnInst Class
4488//===----------------------------------------------------------------------===//
4489
4490class CatchReturnInst : public Instruction {
4491 CatchReturnInst(const CatchReturnInst &RI);
4492 CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
4493 CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);
4494
4495 void init(Value *CatchPad, BasicBlock *BB);
4496
4497protected:
4498 // Note: Instruction needs to be a friend here to call cloneImpl.
4499 friend class Instruction;
4500
4501 CatchReturnInst *cloneImpl() const;
4502
4503public:
4504 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4505 Instruction *InsertBefore = nullptr) {
4506 assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail
("CatchPad", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4506, __extension__ __PRETTY_FUNCTION__))
;
4507 assert(BB)(static_cast <bool> (BB) ? void (0) : __assert_fail ("BB"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4507, __extension__ __PRETTY_FUNCTION__))
;
4508 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
4509 }
4510
4511 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4512 BasicBlock *InsertAtEnd) {
4513 assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail
("CatchPad", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4513, __extension__ __PRETTY_FUNCTION__))
;
4514 assert(BB)(static_cast <bool> (BB) ? void (0) : __assert_fail ("BB"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4514, __extension__ __PRETTY_FUNCTION__))
;
4515 return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd);
4516 }
4517
4518 /// Provide fast operand accessors
4519 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4520
4521 /// Convenience accessors.
4522 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); }
4523 void setCatchPad(CatchPadInst *CatchPad) {
4524 assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail
("CatchPad", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4524, __extension__ __PRETTY_FUNCTION__))
;
4525 Op<0>() = CatchPad;
4526 }
4527
4528 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); }
4529 void setSuccessor(BasicBlock *NewSucc) {
4530 assert(NewSucc)(static_cast <bool> (NewSucc) ? void (0) : __assert_fail
("NewSucc", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4530, __extension__ __PRETTY_FUNCTION__))
;
4531 Op<1>() = NewSucc;
4532 }
4533 unsigned getNumSuccessors() const { return 1; }
4534
4535 /// Get the parentPad of this catchret's catchpad's catchswitch.
4536 /// The successor block is implicitly a member of this funclet.
4537 Value *getCatchSwitchParentPad() const {
4538 return getCatchPad()->getCatchSwitch()->getParentPad();
4539 }
4540
4541 // Methods for support type inquiry through isa, cast, and dyn_cast:
4542 static bool classof(const Instruction *I) {
4543 return (I->getOpcode() == Instruction::CatchRet);
4544 }
4545 static bool classof(const Value *V) {
4546 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4547 }
4548
4549private:
4550 BasicBlock *getSuccessor(unsigned Idx) const {
4551 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchret!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4551, __extension__ __PRETTY_FUNCTION__))
;
4552 return getSuccessor();
4553 }
4554
4555 void setSuccessor(unsigned Idx, BasicBlock *B) {
4556 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchret!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4556, __extension__ __PRETTY_FUNCTION__))
;
4557 setSuccessor(B);
4558 }
4559};
4560
4561template <>
4562struct OperandTraits<CatchReturnInst>
4563 : public FixedNumOperandTraits<CatchReturnInst, 2> {};
4564
4565DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)CatchReturnInst::op_iterator CatchReturnInst::op_begin() { return
OperandTraits<CatchReturnInst>::op_begin(this); } CatchReturnInst
::const_op_iterator CatchReturnInst::op_begin() const { return
OperandTraits<CatchReturnInst>::op_begin(const_cast<
CatchReturnInst*>(this)); } CatchReturnInst::op_iterator CatchReturnInst
::op_end() { return OperandTraits<CatchReturnInst>::op_end
(this); } CatchReturnInst::const_op_iterator CatchReturnInst::
op_end() const { return OperandTraits<CatchReturnInst>::
op_end(const_cast<CatchReturnInst*>(this)); } Value *CatchReturnInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<CatchReturnInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4565, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<CatchReturnInst>::op_begin
(const_cast<CatchReturnInst*>(this))[i_nocapture].get()
); } void CatchReturnInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<CatchReturnInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4565, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
CatchReturnInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned CatchReturnInst::getNumOperands() const { return
OperandTraits<CatchReturnInst>::operands(this); } template
<int Idx_nocapture> Use &CatchReturnInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &CatchReturnInst::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
4566
4567//===----------------------------------------------------------------------===//
4568// CleanupReturnInst Class
4569//===----------------------------------------------------------------------===//
4570
4571class CleanupReturnInst : public Instruction {
4572 using UnwindDestField = BoolBitfieldElementT<0>;
4573
4574private:
4575 CleanupReturnInst(const CleanupReturnInst &RI);
4576 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4577 Instruction *InsertBefore = nullptr);
4578 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4579 BasicBlock *InsertAtEnd);
4580
4581 void init(Value *CleanupPad, BasicBlock *UnwindBB);
4582
4583protected:
4584 // Note: Instruction needs to be a friend here to call cloneImpl.
4585 friend class Instruction;
4586
4587 CleanupReturnInst *cloneImpl() const;
4588
4589public:
4590 static CleanupReturnInst *Create(Value *CleanupPad,
4591 BasicBlock *UnwindBB = nullptr,
4592 Instruction *InsertBefore = nullptr) {
4593 assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail
("CleanupPad", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4593, __extension__ __PRETTY_FUNCTION__))
;
4594 unsigned Values = 1;
4595 if (UnwindBB)
4596 ++Values;
4597 return new (Values)
4598 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore);
4599 }
4600
4601 static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB,
4602 BasicBlock *InsertAtEnd) {
4603 assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail
("CleanupPad", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4603, __extension__ __PRETTY_FUNCTION__))
;
4604 unsigned Values = 1;
4605 if (UnwindBB)
4606 ++Values;
4607 return new (Values)
4608 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd);
4609 }
4610
4611 /// Provide fast operand accessors
4612 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4613
4614 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4615 bool unwindsToCaller() const { return !hasUnwindDest(); }
4616
4617 /// Convenience accessor.
4618 CleanupPadInst *getCleanupPad() const {
4619 return cast<CleanupPadInst>(Op<0>());
4620 }
4621 void setCleanupPad(CleanupPadInst *CleanupPad) {
4622 assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail
("CleanupPad", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4622, __extension__ __PRETTY_FUNCTION__))
;
4623 Op<0>() = CleanupPad;
4624 }
4625
4626 unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; }
4627
4628 BasicBlock *getUnwindDest() const {
4629 return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr;
4630 }
4631 void setUnwindDest(BasicBlock *NewDest) {
4632 assert(NewDest)(static_cast <bool> (NewDest) ? void (0) : __assert_fail
("NewDest", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4632, __extension__ __PRETTY_FUNCTION__))
;
4633 assert(hasUnwindDest())(static_cast <bool> (hasUnwindDest()) ? void (0) : __assert_fail
("hasUnwindDest()", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4633, __extension__ __PRETTY_FUNCTION__))
;
4634 Op<1>() = NewDest;
4635 }
4636
4637 // Methods for support type inquiry through isa, cast, and dyn_cast:
4638 static bool classof(const Instruction *I) {
4639 return (I->getOpcode() == Instruction::CleanupRet);
4640 }
4641 static bool classof(const Value *V) {
4642 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4643 }
4644
4645private:
4646 BasicBlock *getSuccessor(unsigned Idx) const {
4647 assert(Idx == 0)(static_cast <bool> (Idx == 0) ? void (0) : __assert_fail
("Idx == 0", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4647, __extension__ __PRETTY_FUNCTION__))
;
4648 return getUnwindDest();
4649 }
4650
4651 void setSuccessor(unsigned Idx, BasicBlock *B) {
4652 assert(Idx == 0)(static_cast <bool> (Idx == 0) ? void (0) : __assert_fail
("Idx == 0", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4652, __extension__ __PRETTY_FUNCTION__))
;
4653 setUnwindDest(B);
4654 }
4655
4656 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4657 // method so that subclasses cannot accidentally use it.
4658 template <typename Bitfield>
4659 void setSubclassData(typename Bitfield::Type Value) {
4660 Instruction::setSubclassData<Bitfield>(Value);
4661 }
4662};
4663
4664template <>
4665struct OperandTraits<CleanupReturnInst>
4666 : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {};
4667
4668DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)CleanupReturnInst::op_iterator CleanupReturnInst::op_begin() {
return OperandTraits<CleanupReturnInst>::op_begin(this
); } CleanupReturnInst::const_op_iterator CleanupReturnInst::
op_begin() const { return OperandTraits<CleanupReturnInst>
::op_begin(const_cast<CleanupReturnInst*>(this)); } CleanupReturnInst
::op_iterator CleanupReturnInst::op_end() { return OperandTraits
<CleanupReturnInst>::op_end(this); } CleanupReturnInst::
const_op_iterator CleanupReturnInst::op_end() const { return OperandTraits
<CleanupReturnInst>::op_end(const_cast<CleanupReturnInst
*>(this)); } Value *CleanupReturnInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<CleanupReturnInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4668, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<CleanupReturnInst>::op_begin
(const_cast<CleanupReturnInst*>(this))[i_nocapture].get
()); } void CleanupReturnInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<CleanupReturnInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4668, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
CleanupReturnInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned CleanupReturnInst::getNumOperands() const { return
OperandTraits<CleanupReturnInst>::operands(this); } template
<int Idx_nocapture> Use &CleanupReturnInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &CleanupReturnInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
4669
4670//===----------------------------------------------------------------------===//
4671// UnreachableInst Class
4672//===----------------------------------------------------------------------===//
4673
4674//===---------------------------------------------------------------------------
4675/// This function has undefined behavior. In particular, the
4676/// presence of this instruction indicates some higher level knowledge that the
4677/// end of the block cannot be reached.
4678///
4679class UnreachableInst : public Instruction {
4680protected:
4681 // Note: Instruction needs to be a friend here to call cloneImpl.
4682 friend class Instruction;
4683
4684 UnreachableInst *cloneImpl() const;
4685
4686public:
4687 explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr);
4688 explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd);
4689
4690 // allocate space for exactly zero operands
4691 void *operator new(size_t S) { return User::operator new(S, 0); }
4692 void operator delete(void *Ptr) { User::operator delete(Ptr); }
4693
4694 unsigned getNumSuccessors() const { return 0; }
4695
4696 // Methods for support type inquiry through isa, cast, and dyn_cast:
4697 static bool classof(const Instruction *I) {
4698 return I->getOpcode() == Instruction::Unreachable;
4699 }
4700 static bool classof(const Value *V) {
4701 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4702 }
4703
4704private:
4705 BasicBlock *getSuccessor(unsigned idx) const {
4706 llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4706)
;
4707 }
4708
4709 void setSuccessor(unsigned idx, BasicBlock *B) {
4710 llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 4710)
;
4711 }
4712};
4713
4714//===----------------------------------------------------------------------===//
4715// TruncInst Class
4716//===----------------------------------------------------------------------===//
4717
4718/// This class represents a truncation of integer types.
4719class TruncInst : public CastInst {
4720protected:
4721 // Note: Instruction needs to be a friend here to call cloneImpl.
4722 friend class Instruction;
4723
4724 /// Clone an identical TruncInst
4725 TruncInst *cloneImpl() const;
4726
4727public:
4728 /// Constructor with insert-before-instruction semantics
4729 TruncInst(
4730 Value *S, ///< The value to be truncated
4731 Type *Ty, ///< The (smaller) type to truncate to
4732 const Twine &NameStr = "", ///< A name for the new instruction
4733 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4734 );
4735
4736 /// Constructor with insert-at-end-of-block semantics
4737 TruncInst(
4738 Value *S, ///< The value to be truncated
4739 Type *Ty, ///< The (smaller) type to truncate to
4740 const Twine &NameStr, ///< A name for the new instruction
4741 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4742 );
4743
4744 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4745 static bool classof(const Instruction *I) {
4746 return I->getOpcode() == Trunc;
4747 }
4748 static bool classof(const Value *V) {
4749 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4750 }
4751};
4752
4753//===----------------------------------------------------------------------===//
4754// ZExtInst Class
4755//===----------------------------------------------------------------------===//
4756
4757/// This class represents zero extension of integer types.
4758class ZExtInst : public CastInst {
4759protected:
4760 // Note: Instruction needs to be a friend here to call cloneImpl.
4761 friend class Instruction;
4762
4763 /// Clone an identical ZExtInst
4764 ZExtInst *cloneImpl() const;
4765
4766public:
4767 /// Constructor with insert-before-instruction semantics
4768 ZExtInst(
4769 Value *S, ///< The value to be zero extended
4770 Type *Ty, ///< The type to zero extend to
4771 const Twine &NameStr = "", ///< A name for the new instruction
4772 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4773 );
4774
4775 /// Constructor with insert-at-end semantics.
4776 ZExtInst(
4777 Value *S, ///< The value to be zero extended
4778 Type *Ty, ///< The type to zero extend to
4779 const Twine &NameStr, ///< A name for the new instruction
4780 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4781 );
4782
4783 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4784 static bool classof(const Instruction *I) {
4785 return I->getOpcode() == ZExt;
4786 }
4787 static bool classof(const Value *V) {
4788 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4789 }
4790};
4791
4792//===----------------------------------------------------------------------===//
4793// SExtInst Class
4794//===----------------------------------------------------------------------===//
4795
4796/// This class represents a sign extension of integer types.
4797class SExtInst : public CastInst {
4798protected:
4799 // Note: Instruction needs to be a friend here to call cloneImpl.
4800 friend class Instruction;
4801
4802 /// Clone an identical SExtInst
4803 SExtInst *cloneImpl() const;
4804
4805public:
4806 /// Constructor with insert-before-instruction semantics
4807 SExtInst(
4808 Value *S, ///< The value to be sign extended
4809 Type *Ty, ///< The type to sign extend to
4810 const Twine &NameStr = "", ///< A name for the new instruction
4811 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4812 );
4813
4814 /// Constructor with insert-at-end-of-block semantics
4815 SExtInst(
4816 Value *S, ///< The value to be sign extended
4817 Type *Ty, ///< The type to sign extend to
4818 const Twine &NameStr, ///< A name for the new instruction
4819 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4820 );
4821
4822 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4823 static bool classof(const Instruction *I) {
4824 return I->getOpcode() == SExt;
4825 }
4826 static bool classof(const Value *V) {
4827 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4828 }
4829};
4830
4831//===----------------------------------------------------------------------===//
4832// FPTruncInst Class
4833//===----------------------------------------------------------------------===//
4834
4835/// This class represents a truncation of floating point types.
4836class FPTruncInst : public CastInst {
4837protected:
4838 // Note: Instruction needs to be a friend here to call cloneImpl.
4839 friend class Instruction;
4840
4841 /// Clone an identical FPTruncInst
4842 FPTruncInst *cloneImpl() const;
4843
4844public:
4845 /// Constructor with insert-before-instruction semantics
4846 FPTruncInst(
4847 Value *S, ///< The value to be truncated
4848 Type *Ty, ///< The type to truncate to
4849 const Twine &NameStr = "", ///< A name for the new instruction
4850 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4851 );
4852
4853 /// Constructor with insert-before-instruction semantics
4854 FPTruncInst(
4855 Value *S, ///< The value to be truncated
4856 Type *Ty, ///< The type to truncate to
4857 const Twine &NameStr, ///< A name for the new instruction
4858 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4859 );
4860
4861 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4862 static bool classof(const Instruction *I) {
4863 return I->getOpcode() == FPTrunc;
4864 }
4865 static bool classof(const Value *V) {
4866 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4867 }
4868};
4869
4870//===----------------------------------------------------------------------===//
4871// FPExtInst Class
4872//===----------------------------------------------------------------------===//
4873
4874/// This class represents an extension of floating point types.
4875class FPExtInst : public CastInst {
4876protected:
4877 // Note: Instruction needs to be a friend here to call cloneImpl.
4878 friend class Instruction;
4879
4880 /// Clone an identical FPExtInst
4881 FPExtInst *cloneImpl() const;
4882
4883public:
4884 /// Constructor with insert-before-instruction semantics
4885 FPExtInst(
4886 Value *S, ///< The value to be extended
4887 Type *Ty, ///< The type to extend to
4888 const Twine &NameStr = "", ///< A name for the new instruction
4889 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4890 );
4891
4892 /// Constructor with insert-at-end-of-block semantics
4893 FPExtInst(
4894 Value *S, ///< The value to be extended
4895 Type *Ty, ///< The type to extend to
4896 const Twine &NameStr, ///< A name for the new instruction
4897 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4898 );
4899
4900 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4901 static bool classof(const Instruction *I) {
4902 return I->getOpcode() == FPExt;
4903 }
4904 static bool classof(const Value *V) {
4905 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4906 }
4907};
4908
4909//===----------------------------------------------------------------------===//
4910// UIToFPInst Class
4911//===----------------------------------------------------------------------===//
4912
4913/// This class represents a cast unsigned integer to floating point.
4914class UIToFPInst : public CastInst {
4915protected:
4916 // Note: Instruction needs to be a friend here to call cloneImpl.
4917 friend class Instruction;
4918
4919 /// Clone an identical UIToFPInst
4920 UIToFPInst *cloneImpl() const;
4921
4922public:
4923 /// Constructor with insert-before-instruction semantics
4924 UIToFPInst(
4925 Value *S, ///< The value to be converted
4926 Type *Ty, ///< The type to convert to
4927 const Twine &NameStr = "", ///< A name for the new instruction
4928 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4929 );
4930
4931 /// Constructor with insert-at-end-of-block semantics
4932 UIToFPInst(
4933 Value *S, ///< The value to be converted
4934 Type *Ty, ///< The type to convert to
4935 const Twine &NameStr, ///< A name for the new instruction
4936 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4937 );
4938
4939 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4940 static bool classof(const Instruction *I) {
4941 return I->getOpcode() == UIToFP;
4942 }
4943 static bool classof(const Value *V) {
4944 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4945 }
4946};
4947
4948//===----------------------------------------------------------------------===//
4949// SIToFPInst Class
4950//===----------------------------------------------------------------------===//
4951
4952/// This class represents a cast from signed integer to floating point.
4953class SIToFPInst : public CastInst {
4954protected:
4955 // Note: Instruction needs to be a friend here to call cloneImpl.
4956 friend class Instruction;
4957
4958 /// Clone an identical SIToFPInst
4959 SIToFPInst *cloneImpl() const;
4960
4961public:
4962 /// Constructor with insert-before-instruction semantics
4963 SIToFPInst(
4964 Value *S, ///< The value to be converted
4965 Type *Ty, ///< The type to convert to
4966 const Twine &NameStr = "", ///< A name for the new instruction
4967 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4968 );
4969
4970 /// Constructor with insert-at-end-of-block semantics
4971 SIToFPInst(
4972 Value *S, ///< The value to be converted
4973 Type *Ty, ///< The type to convert to
4974 const Twine &NameStr, ///< A name for the new instruction
4975 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4976 );
4977
4978 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4979 static bool classof(const Instruction *I) {
4980 return I->getOpcode() == SIToFP;
4981 }
4982 static bool classof(const Value *V) {
4983 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4984 }
4985};
4986
4987//===----------------------------------------------------------------------===//
4988// FPToUIInst Class
4989//===----------------------------------------------------------------------===//
4990
4991/// This class represents a cast from floating point to unsigned integer
4992class FPToUIInst : public CastInst {
4993protected:
4994 // Note: Instruction needs to be a friend here to call cloneImpl.
4995 friend class Instruction;
4996
4997 /// Clone an identical FPToUIInst
4998 FPToUIInst *cloneImpl() const;
4999
5000public:
5001 /// Constructor with insert-before-instruction semantics
5002 FPToUIInst(
5003 Value *S, ///< The value to be converted
5004 Type *Ty, ///< The type to convert to
5005 const Twine &NameStr = "", ///< A name for the new instruction
5006 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5007 );
5008
5009 /// Constructor with insert-at-end-of-block semantics
5010 FPToUIInst(
5011 Value *S, ///< The value to be converted
5012 Type *Ty, ///< The type to convert to
5013 const Twine &NameStr, ///< A name for the new instruction
5014 BasicBlock *InsertAtEnd ///< Where to insert the new instruction
5015 );
5016
5017 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5018 static bool classof(const Instruction *I) {
5019 return I->getOpcode() == FPToUI;
5020 }
5021 static bool classof(const Value *V) {
5022 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5023 }
5024};
5025
5026//===----------------------------------------------------------------------===//
5027// FPToSIInst Class
5028//===----------------------------------------------------------------------===//
5029
5030/// This class represents a cast from floating point to signed integer.
5031class FPToSIInst : public CastInst {
5032protected:
5033 // Note: Instruction needs to be a friend here to call cloneImpl.
5034 friend class Instruction;
5035
5036 /// Clone an identical FPToSIInst
5037 FPToSIInst *cloneImpl() const;
5038
5039public:
5040 /// Constructor with insert-before-instruction semantics
5041 FPToSIInst(
5042 Value *S, ///< The value to be converted
5043 Type *Ty, ///< The type to convert to
5044 const Twine &NameStr = "", ///< A name for the new instruction
5045 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5046 );
5047
5048 /// Constructor with insert-at-end-of-block semantics
5049 FPToSIInst(
5050 Value *S, ///< The value to be converted
5051 Type *Ty, ///< The type to convert to
5052 const Twine &NameStr, ///< A name for the new instruction
5053 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5054 );
5055
5056 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5057 static bool classof(const Instruction *I) {
5058 return I->getOpcode() == FPToSI;
5059 }
5060 static bool classof(const Value *V) {
5061 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5062 }
5063};
5064
5065//===----------------------------------------------------------------------===//
5066// IntToPtrInst Class
5067//===----------------------------------------------------------------------===//
5068
5069/// This class represents a cast from an integer to a pointer.
5070class IntToPtrInst : public CastInst {
5071public:
5072 // Note: Instruction needs to be a friend here to call cloneImpl.
5073 friend class Instruction;
5074
5075 /// Constructor with insert-before-instruction semantics
5076 IntToPtrInst(
5077 Value *S, ///< The value to be converted
5078 Type *Ty, ///< The type to convert to
5079 const Twine &NameStr = "", ///< A name for the new instruction
5080 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5081 );
5082
5083 /// Constructor with insert-at-end-of-block semantics
5084 IntToPtrInst(
5085 Value *S, ///< The value to be converted
5086 Type *Ty, ///< The type to convert to
5087 const Twine &NameStr, ///< A name for the new instruction
5088 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5089 );
5090
5091 /// Clone an identical IntToPtrInst.
5092 IntToPtrInst *cloneImpl() const;
5093
5094 /// Returns the address space of this instruction's pointer type.
5095 unsigned getAddressSpace() const {
5096 return getType()->getPointerAddressSpace();
5097 }
5098
5099 // Methods for support type inquiry through isa, cast, and dyn_cast:
5100 static bool classof(const Instruction *I) {
5101 return I->getOpcode() == IntToPtr;
5102 }
5103 static bool classof(const Value *V) {
5104 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5105 }
5106};
5107
5108//===----------------------------------------------------------------------===//
5109// PtrToIntInst Class
5110//===----------------------------------------------------------------------===//
5111
5112/// This class represents a cast from a pointer to an integer.
5113class PtrToIntInst : public CastInst {
5114protected:
5115 // Note: Instruction needs to be a friend here to call cloneImpl.
5116 friend class Instruction;
5117
5118 /// Clone an identical PtrToIntInst.
5119 PtrToIntInst *cloneImpl() const;
5120
5121public:
5122 /// Constructor with insert-before-instruction semantics
5123 PtrToIntInst(
5124 Value *S, ///< The value to be converted
5125 Type *Ty, ///< The type to convert to
5126 const Twine &NameStr = "", ///< A name for the new instruction
5127 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5128 );
5129
5130 /// Constructor with insert-at-end-of-block semantics
5131 PtrToIntInst(
5132 Value *S, ///< The value to be converted
5133 Type *Ty, ///< The type to convert to
5134 const Twine &NameStr, ///< A name for the new instruction
5135 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5136 );
5137
5138 /// Gets the pointer operand.
5139 Value *getPointerOperand() { return getOperand(0); }
5140 /// Gets the pointer operand.
5141 const Value *getPointerOperand() const { return getOperand(0); }
5142 /// Gets the operand index of the pointer operand.
5143 static unsigned getPointerOperandIndex() { return 0U; }
5144
5145 /// Returns the address space of the pointer operand.
5146 unsigned getPointerAddressSpace() const {
5147 return getPointerOperand()->getType()->getPointerAddressSpace();
5148 }
5149
5150 // Methods for support type inquiry through isa, cast, and dyn_cast:
5151 static bool classof(const Instruction *I) {
5152 return I->getOpcode() == PtrToInt;
5153 }
5154 static bool classof(const Value *V) {
5155 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5156 }
5157};
5158
5159//===----------------------------------------------------------------------===//
5160// BitCastInst Class
5161//===----------------------------------------------------------------------===//
5162
5163/// This class represents a no-op cast from one type to another.
5164class BitCastInst : public CastInst {
5165protected:
5166 // Note: Instruction needs to be a friend here to call cloneImpl.
5167 friend class Instruction;
5168
5169 /// Clone an identical BitCastInst.
5170 BitCastInst *cloneImpl() const;
5171
5172public:
5173 /// Constructor with insert-before-instruction semantics
5174 BitCastInst(
5175 Value *S, ///< The value to be casted
5176 Type *Ty, ///< The type to casted to
5177 const Twine &NameStr = "", ///< A name for the new instruction
5178 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5179 );
5180
5181 /// Constructor with insert-at-end-of-block semantics
5182 BitCastInst(
5183 Value *S, ///< The value to be casted
5184 Type *Ty, ///< The type to casted to
5185 const Twine &NameStr, ///< A name for the new instruction
5186 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5187 );
5188
5189 // Methods for support type inquiry through isa, cast, and dyn_cast:
5190 static bool classof(const Instruction *I) {
5191 return I->getOpcode() == BitCast;
5192 }
5193 static bool classof(const Value *V) {
5194 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5195 }
5196};
5197
5198//===----------------------------------------------------------------------===//
5199// AddrSpaceCastInst Class
5200//===----------------------------------------------------------------------===//
5201
5202/// This class represents a conversion between pointers from one address space
5203/// to another.
5204class AddrSpaceCastInst : public CastInst {
5205protected:
5206 // Note: Instruction needs to be a friend here to call cloneImpl.
5207 friend class Instruction;
5208
5209 /// Clone an identical AddrSpaceCastInst.
5210 AddrSpaceCastInst *cloneImpl() const;
5211
5212public:
5213 /// Constructor with insert-before-instruction semantics
5214 AddrSpaceCastInst(
5215 Value *S, ///< The value to be casted
5216 Type *Ty, ///< The type to casted to
5217 const Twine &NameStr = "", ///< A name for the new instruction
5218 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5219 );
5220
5221 /// Constructor with insert-at-end-of-block semantics
5222 AddrSpaceCastInst(
5223 Value *S, ///< The value to be casted
5224 Type *Ty, ///< The type to casted to
5225 const Twine &NameStr, ///< A name for the new instruction
5226 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5227 );
5228
5229 // Methods for support type inquiry through isa, cast, and dyn_cast:
5230 static bool classof(const Instruction *I) {
5231 return I->getOpcode() == AddrSpaceCast;
5232 }
5233 static bool classof(const Value *V) {
5234 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5235 }
5236
5237 /// Gets the pointer operand.
5238 Value *getPointerOperand() {
5239 return getOperand(0);
5240 }
5241
5242 /// Gets the pointer operand.
5243 const Value *getPointerOperand() const {
5244 return getOperand(0);
5245 }
5246
5247 /// Gets the operand index of the pointer operand.
5248 static unsigned getPointerOperandIndex() {
5249 return 0U;
5250 }
5251
5252 /// Returns the address space of the pointer operand.
5253 unsigned getSrcAddressSpace() const {
5254 return getPointerOperand()->getType()->getPointerAddressSpace();
5255 }
5256
5257 /// Returns the address space of the result.
5258 unsigned getDestAddressSpace() const {
5259 return getType()->getPointerAddressSpace();
5260 }
5261};
5262
5263/// A helper function that returns the pointer operand of a load or store
5264/// instruction. Returns nullptr if not load or store.
5265inline const Value *getLoadStorePointerOperand(const Value *V) {
5266 if (auto *Load = dyn_cast<LoadInst>(V))
5267 return Load->getPointerOperand();
5268 if (auto *Store = dyn_cast<StoreInst>(V))
5269 return Store->getPointerOperand();
5270 return nullptr;
5271}
5272inline Value *getLoadStorePointerOperand(Value *V) {
5273 return const_cast<Value *>(
5274 getLoadStorePointerOperand(static_cast<const Value *>(V)));
5275}
5276
5277/// A helper function that returns the pointer operand of a load, store
5278/// or GEP instruction. Returns nullptr if not load, store, or GEP.
5279inline const Value *getPointerOperand(const Value *V) {
5280 if (auto *Ptr = getLoadStorePointerOperand(V))
5281 return Ptr;
5282 if (auto *Gep = dyn_cast<GetElementPtrInst>(V))
5283 return Gep->getPointerOperand();
5284 return nullptr;
5285}
5286inline Value *getPointerOperand(Value *V) {
5287 return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V)));
5288}
5289
5290/// A helper function that returns the alignment of load or store instruction.
5291inline Align getLoadStoreAlignment(Value *I) {
5292 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 5293, __extension__ __PRETTY_FUNCTION__))
5293 "Expected Load or Store instruction")(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 5293, __extension__ __PRETTY_FUNCTION__))
;
5294 if (auto *LI = dyn_cast<LoadInst>(I))
5295 return LI->getAlign();
5296 return cast<StoreInst>(I)->getAlign();
5297}
5298
5299/// A helper function that returns the address space of the pointer operand of
5300/// load or store instruction.
5301inline unsigned getLoadStoreAddressSpace(Value *I) {
5302 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 5303, __extension__ __PRETTY_FUNCTION__))
5303 "Expected Load or Store instruction")(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 5303, __extension__ __PRETTY_FUNCTION__))
;
5304 if (auto *LI = dyn_cast<LoadInst>(I))
5305 return LI->getPointerAddressSpace();
5306 return cast<StoreInst>(I)->getPointerAddressSpace();
5307}
5308
5309/// A helper function that returns the type of a load or store instruction.
5310inline Type *getLoadStoreType(Value *I) {
5311 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 5312, __extension__ __PRETTY_FUNCTION__))
5312 "Expected Load or Store instruction")(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instructions.h"
, 5312, __extension__ __PRETTY_FUNCTION__))
;
5313 if (auto *LI = dyn_cast<LoadInst>(I))
5314 return LI->getType();
5315 return cast<StoreInst>(I)->getValueOperand()->getType();
5316}
5317
5318//===----------------------------------------------------------------------===//
5319// FreezeInst Class
5320//===----------------------------------------------------------------------===//
5321
5322/// This class represents a freeze function that returns random concrete
5323/// value if an operand is either a poison value or an undef value
5324class FreezeInst : public UnaryInstruction {
5325protected:
5326 // Note: Instruction needs to be a friend here to call cloneImpl.
5327 friend class Instruction;
5328
5329 /// Clone an identical FreezeInst
5330 FreezeInst *cloneImpl() const;
5331
5332public:
5333 explicit FreezeInst(Value *S,
5334 const Twine &NameStr = "",
5335 Instruction *InsertBefore = nullptr);
5336 FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd);
5337
5338 // Methods for support type inquiry through isa, cast, and dyn_cast:
5339 static inline bool classof(const Instruction *I) {
5340 return I->getOpcode() == Freeze;
5341 }
5342 static inline bool classof(const Value *V) {
5343 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5344 }
5345};
5346
5347} // end namespace llvm
5348
5349#endif // LLVM_IR_INSTRUCTIONS_H

/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Instruction.h

1//===-- llvm/Instruction.h - Instruction class definition -------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the declaration of the Instruction class, which is the
10// base class for all of the LLVM instructions.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_IR_INSTRUCTION_H
15#define LLVM_IR_INSTRUCTION_H
16
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/Bitfields.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/StringRef.h"
21#include "llvm/ADT/ilist_node.h"
22#include "llvm/IR/DebugLoc.h"
23#include "llvm/IR/SymbolTableListTraits.h"
24#include "llvm/IR/User.h"
25#include "llvm/IR/Value.h"
26#include "llvm/Support/AtomicOrdering.h"
27#include "llvm/Support/Casting.h"
28#include <algorithm>
29#include <cassert>
30#include <cstdint>
31#include <utility>
32
33namespace llvm {
34
35class BasicBlock;
36class FastMathFlags;
37class MDNode;
38class Module;
39struct AAMDNodes;
40
41template <> struct ilist_alloc_traits<Instruction> {
42 static inline void deleteNode(Instruction *V);
43};
44
45class Instruction : public User,
46 public ilist_node_with_parent<Instruction, BasicBlock> {
47 BasicBlock *Parent;
48 DebugLoc DbgLoc; // 'dbg' Metadata cache.
49
50 /// Relative order of this instruction in its parent basic block. Used for
51 /// O(1) local dominance checks between instructions.
52 mutable unsigned Order = 0;
53
54protected:
55 // The 15 first bits of `Value::SubclassData` are available for subclasses of
56 // `Instruction` to use.
57 using OpaqueField = Bitfield::Element<uint16_t, 0, 15>;
58
59 // Template alias so that all Instruction storing alignment use the same
60 // definiton.
61 // Valid alignments are powers of two from 2^0 to 2^MaxAlignmentExponent =
62 // 2^29. We store them as Log2(Alignment), so we need 5 bits to encode the 30
63 // possible values.
64 template <unsigned Offset>
65 using AlignmentBitfieldElementT =
66 typename Bitfield::Element<unsigned, Offset, 5,
67 Value::MaxAlignmentExponent>;
68
69 template <unsigned Offset>
70 using BoolBitfieldElementT = typename Bitfield::Element<bool, Offset, 1>;
71
72 template <unsigned Offset>
73 using AtomicOrderingBitfieldElementT =
74 typename Bitfield::Element<AtomicOrdering, Offset, 3,
75 AtomicOrdering::LAST>;
76
77private:
78 // The last bit is used to store whether the instruction has metadata attached
79 // or not.
80 using HasMetadataField = Bitfield::Element<bool, 15, 1>;
81
82protected:
83 ~Instruction(); // Use deleteValue() to delete a generic Instruction.
84
85public:
86 Instruction(const Instruction &) = delete;
87 Instruction &operator=(const Instruction &) = delete;
88
89 /// Specialize the methods defined in Value, as we know that an instruction
90 /// can only be used by other instructions.
91 Instruction *user_back() { return cast<Instruction>(*user_begin());}
92 const Instruction *user_back() const { return cast<Instruction>(*user_begin());}
93
94 inline const BasicBlock *getParent() const { return Parent; }
95 inline BasicBlock *getParent() { return Parent; }
96
97 /// Return the module owning the function this instruction belongs to
98 /// or nullptr it the function does not have a module.
99 ///
100 /// Note: this is undefined behavior if the instruction does not have a
101 /// parent, or the parent basic block does not have a parent function.
102 const Module *getModule() const;
103 Module *getModule() {
104 return const_cast<Module *>(
105 static_cast<const Instruction *>(this)->getModule());
106 }
107
108 /// Return the function this instruction belongs to.
109 ///
110 /// Note: it is undefined behavior to call this on an instruction not
111 /// currently inserted into a function.
112 const Function *getFunction() const;
113 Function *getFunction() {
114 return const_cast<Function *>(
115 static_cast<const Instruction *>(this)->getFunction());
116 }
117
118 /// This method unlinks 'this' from the containing basic block, but does not
119 /// delete it.
120 void removeFromParent();
121
122 /// This method unlinks 'this' from the containing basic block and deletes it.
123 ///
124 /// \returns an iterator pointing to the element after the erased one
125 SymbolTableList<Instruction>::iterator eraseFromParent();
126
127 /// Insert an unlinked instruction into a basic block immediately before
128 /// the specified instruction.
129 void insertBefore(Instruction *InsertPos);
130
131 /// Insert an unlinked instruction into a basic block immediately after the
132 /// specified instruction.
133 void insertAfter(Instruction *InsertPos);
134
135 /// Unlink this instruction from its current basic block and insert it into
136 /// the basic block that MovePos lives in, right before MovePos.
137 void moveBefore(Instruction *MovePos);
138
139 /// Unlink this instruction and insert into BB before I.
140 ///
141 /// \pre I is a valid iterator into BB.
142 void moveBefore(BasicBlock &BB, SymbolTableList<Instruction>::iterator I);
143
144 /// Unlink this instruction from its current basic block and insert it into
145 /// the basic block that MovePos lives in, right after MovePos.
146 void moveAfter(Instruction *MovePos);
147
148 /// Given an instruction Other in the same basic block as this instruction,
149 /// return true if this instruction comes before Other. In this worst case,
150 /// this takes linear time in the number of instructions in the block. The
151 /// results are cached, so in common cases when the block remains unmodified,
152 /// it takes constant time.
153 bool comesBefore(const Instruction *Other) const;
154
155 //===--------------------------------------------------------------------===//
156 // Subclass classification.
157 //===--------------------------------------------------------------------===//
158
159 /// Returns a member of one of the enums like Instruction::Add.
160 unsigned getOpcode() const { return getValueID() - InstructionVal; }
161
162 const char *getOpcodeName() const { return getOpcodeName(getOpcode()); }
163 bool isTerminator() const { return isTerminator(getOpcode()); }
164 bool isUnaryOp() const { return isUnaryOp(getOpcode()); }
165 bool isBinaryOp() const { return isBinaryOp(getOpcode()); }
166 bool isIntDivRem() const { return isIntDivRem(getOpcode()); }
167 bool isShift() const { return isShift(getOpcode()); }
168 bool isCast() const { return isCast(getOpcode()); }
169 bool isFuncletPad() const { return isFuncletPad(getOpcode()); }
170 bool isExceptionalTerminator() const {
171 return isExceptionalTerminator(getOpcode());
172 }
173
174 /// It checks if this instruction is the only user of at least one of
175 /// its operands.
176 bool isOnlyUserOfAnyOperand();
177
178 bool isIndirectTerminator() const {
179 return isIndirectTerminator(getOpcode());
180 }
181
182 static const char* getOpcodeName(unsigned OpCode);
183
184 static inline bool isTerminator(unsigned OpCode) {
185 return OpCode >= TermOpsBegin && OpCode < TermOpsEnd;
186 }
187
188 static inline bool isUnaryOp(unsigned Opcode) {
189 return Opcode >= UnaryOpsBegin && Opcode < UnaryOpsEnd;
190 }
191 static inline bool isBinaryOp(unsigned Opcode) {
192 return Opcode >= BinaryOpsBegin && Opcode < BinaryOpsEnd;
193 }
194
195 static inline bool isIntDivRem(unsigned Opcode) {
196 return Opcode == UDiv || Opcode == SDiv || Opcode == URem || Opcode == SRem;
197 }
198
199 /// Determine if the Opcode is one of the shift instructions.
200 static inline bool isShift(unsigned Opcode) {
201 return Opcode >= Shl && Opcode <= AShr;
202 }
203
204 /// Return true if this is a logical shift left or a logical shift right.
205 inline bool isLogicalShift() const {
206 return getOpcode() == Shl || getOpcode() == LShr;
207 }
208
209 /// Return true if this is an arithmetic shift right.
210 inline bool isArithmeticShift() const {
211 return getOpcode() == AShr;
212 }
213
214 /// Determine if the Opcode is and/or/xor.
215 static inline bool isBitwiseLogicOp(unsigned Opcode) {
216 return Opcode == And || Opcode == Or || Opcode == Xor;
217 }
218
219 /// Return true if this is and/or/xor.
220 inline bool isBitwiseLogicOp() const {
221 return isBitwiseLogicOp(getOpcode());
222 }
223
224 /// Determine if the OpCode is one of the CastInst instructions.
225 static inline bool isCast(unsigned OpCode) {
226 return OpCode >= CastOpsBegin && OpCode < CastOpsEnd;
227 }
228
229 /// Determine if the OpCode is one of the FuncletPadInst instructions.
230 static inline bool isFuncletPad(unsigned OpCode) {
231 return OpCode >= FuncletPadOpsBegin && OpCode < FuncletPadOpsEnd;
232 }
233
234 /// Returns true if the OpCode is a terminator related to exception handling.
235 static inline bool isExceptionalTerminator(unsigned OpCode) {
236 switch (OpCode) {
237 case Instruction::CatchSwitch:
238 case Instruction::CatchRet:
239 case Instruction::CleanupRet:
240 case Instruction::Invoke:
241 case Instruction::Resume:
242 return true;
243 default:
244 return false;
245 }
246 }
247
248 /// Returns true if the OpCode is a terminator with indirect targets.
249 static inline bool isIndirectTerminator(unsigned OpCode) {
250 switch (OpCode) {
251 case Instruction::IndirectBr:
252 case Instruction::CallBr:
253 return true;
254 default:
255 return false;
256 }
257 }
258
259 //===--------------------------------------------------------------------===//
260 // Metadata manipulation.
261 //===--------------------------------------------------------------------===//
262
263 /// Return true if this instruction has any metadata attached to it.
264 bool hasMetadata() const { return DbgLoc || Value::hasMetadata(); }
265
266 /// Return true if this instruction has metadata attached to it other than a
267 /// debug location.
268 bool hasMetadataOtherThanDebugLoc() const { return Value::hasMetadata(); }
269
270 /// Return true if this instruction has the given type of metadata attached.
271 bool hasMetadata(unsigned KindID) const {
272 return getMetadata(KindID) != nullptr;
273 }
274
275 /// Return true if this instruction has the given type of metadata attached.
276 bool hasMetadata(StringRef Kind) const {
277 return getMetadata(Kind) != nullptr;
278 }
279
280 /// Get the metadata of given kind attached to this Instruction.
281 /// If the metadata is not found then return null.
282 MDNode *getMetadata(unsigned KindID) const {
283 if (!hasMetadata()) return nullptr;
30
Assuming the condition is true
31
Taking true branch
32
Returning null pointer, which participates in a condition later
284 return getMetadataImpl(KindID);
285 }
286
287 /// Get the metadata of given kind attached to this Instruction.
288 /// If the metadata is not found then return null.
289 MDNode *getMetadata(StringRef Kind) const {
290 if (!hasMetadata()) return nullptr;
291 return getMetadataImpl(Kind);
292 }
293
294 /// Get all metadata attached to this Instruction. The first element of each
295 /// pair returned is the KindID, the second element is the metadata value.
296 /// This list is returned sorted by the KindID.
297 void
298 getAllMetadata(SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs) const {
299 if (hasMetadata())
300 getAllMetadataImpl(MDs);
301 }
302
303 /// This does the same thing as getAllMetadata, except that it filters out the
304 /// debug location.
305 void getAllMetadataOtherThanDebugLoc(
306 SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs) const {
307 Value::getAllMetadata(MDs);
308 }
309
310 /// Fills the AAMDNodes structure with AA metadata from this instruction.
311 /// When Merge is true, the existing AA metadata is merged with that from this
312 /// instruction providing the most-general result.
313 void getAAMetadata(AAMDNodes &N, bool Merge = false) const;
314
315 /// Set the metadata of the specified kind to the specified node. This updates
316 /// or replaces metadata if already present, or removes it if Node is null.
317 void setMetadata(unsigned KindID, MDNode *Node);
318 void setMetadata(StringRef Kind, MDNode *Node);
319
320 /// Copy metadata from \p SrcInst to this instruction. \p WL, if not empty,
321 /// specifies the list of meta data that needs to be copied. If \p WL is
322 /// empty, all meta data will be copied.
323 void copyMetadata(const Instruction &SrcInst,
324 ArrayRef<unsigned> WL = ArrayRef<unsigned>());
325
326 /// If the instruction has "branch_weights" MD_prof metadata and the MDNode
327 /// has three operands (including name string), swap the order of the
328 /// metadata.
329 void swapProfMetadata();
330
331 /// Drop all unknown metadata except for debug locations.
332 /// @{
333 /// Passes are required to drop metadata they don't understand. This is a
334 /// convenience method for passes to do so.
335 void dropUnknownNonDebugMetadata(ArrayRef<unsigned> KnownIDs);
336 void dropUnknownNonDebugMetadata() {
337 return dropUnknownNonDebugMetadata(None);
338 }
339 void dropUnknownNonDebugMetadata(unsigned ID1) {
340 return dropUnknownNonDebugMetadata(makeArrayRef(ID1));
341 }
342 void dropUnknownNonDebugMetadata(unsigned ID1, unsigned ID2) {
343 unsigned IDs[] = {ID1, ID2};
344 return dropUnknownNonDebugMetadata(IDs);
345 }
346 /// @}
347
348 /// Adds an !annotation metadata node with \p Annotation to this instruction.
349 /// If this instruction already has !annotation metadata, append \p Annotation
350 /// to the existing node.
351 void addAnnotationMetadata(StringRef Annotation);
352
353 /// Sets the metadata on this instruction from the AAMDNodes structure.
354 void setAAMetadata(const AAMDNodes &N);
355
356 /// Retrieve the raw weight values of a conditional branch or select.
357 /// Returns true on success with profile weights filled in.
358 /// Returns false if no metadata or invalid metadata was found.
359 bool extractProfMetadata(uint64_t &TrueVal, uint64_t &FalseVal) const;
360
361 /// Retrieve total raw weight values of a branch.
362 /// Returns true on success with profile total weights filled in.
363 /// Returns false if no metadata was found.
364 bool extractProfTotalWeight(uint64_t &TotalVal) const;
365
366 /// Set the debug location information for this instruction.
367 void setDebugLoc(DebugLoc Loc) { DbgLoc = std::move(Loc); }
368
369 /// Return the debug location for this node as a DebugLoc.
370 const DebugLoc &getDebugLoc() const { return DbgLoc; }
371
372 /// Set or clear the nuw flag on this instruction, which must be an operator
373 /// which supports this flag. See LangRef.html for the meaning of this flag.
374 void setHasNoUnsignedWrap(bool b = true);
375
376 /// Set or clear the nsw flag on this instruction, which must be an operator
377 /// which supports this flag. See LangRef.html for the meaning of this flag.
378 void setHasNoSignedWrap(bool b = true);
379
380 /// Set or clear the exact flag on this instruction, which must be an operator
381 /// which supports this flag. See LangRef.html for the meaning of this flag.
382 void setIsExact(bool b = true);
383
384 /// Determine whether the no unsigned wrap flag is set.
385 bool hasNoUnsignedWrap() const;
386
387 /// Determine whether the no signed wrap flag is set.
388 bool hasNoSignedWrap() const;
389
390 /// Drops flags that may cause this instruction to evaluate to poison despite
391 /// having non-poison inputs.
392 void dropPoisonGeneratingFlags();
393
394 /// Determine whether the exact flag is set.
395 bool isExact() const;
396
397 /// Set or clear all fast-math-flags on this instruction, which must be an
398 /// operator which supports this flag. See LangRef.html for the meaning of
399 /// this flag.
400 void setFast(bool B);
401
402 /// Set or clear the reassociation flag on this instruction, which must be
403 /// an operator which supports this flag. See LangRef.html for the meaning of
404 /// this flag.
405 void setHasAllowReassoc(bool B);
406
407 /// Set or clear the no-nans flag on this instruction, which must be an
408 /// operator which supports this flag. See LangRef.html for the meaning of
409 /// this flag.
410 void setHasNoNaNs(bool B);
411
412 /// Set or clear the no-infs flag on this instruction, which must be an
413 /// operator which supports this flag. See LangRef.html for the meaning of
414 /// this flag.
415 void setHasNoInfs(bool B);
416
417 /// Set or clear the no-signed-zeros flag on this instruction, which must be
418 /// an operator which supports this flag. See LangRef.html for the meaning of
419 /// this flag.
420 void setHasNoSignedZeros(bool B);
421
422 /// Set or clear the allow-reciprocal flag on this instruction, which must be
423 /// an operator which supports this flag. See LangRef.html for the meaning of
424 /// this flag.
425 void setHasAllowReciprocal(bool B);
426
427 /// Set or clear the allow-contract flag on this instruction, which must be
428 /// an operator which supports this flag. See LangRef.html for the meaning of
429 /// this flag.
430 void setHasAllowContract(bool B);
431
432 /// Set or clear the approximate-math-functions flag on this instruction,
433 /// which must be an operator which supports this flag. See LangRef.html for
434 /// the meaning of this flag.
435 void setHasApproxFunc(bool B);
436
437 /// Convenience function for setting multiple fast-math flags on this
438 /// instruction, which must be an operator which supports these flags. See
439 /// LangRef.html for the meaning of these flags.
440 void setFastMathFlags(FastMathFlags FMF);
441
442 /// Convenience function for transferring all fast-math flag values to this
443 /// instruction, which must be an operator which supports these flags. See
444 /// LangRef.html for the meaning of these flags.
445 void copyFastMathFlags(FastMathFlags FMF);
446
447 /// Determine whether all fast-math-flags are set.
448 bool isFast() const;
449
450 /// Determine whether the allow-reassociation flag is set.
451 bool hasAllowReassoc() const;
452
453 /// Determine whether the no-NaNs flag is set.
454 bool hasNoNaNs() const;
455
456 /// Determine whether the no-infs flag is set.
457 bool hasNoInfs() const;
458
459 /// Determine whether the no-signed-zeros flag is set.
460 bool hasNoSignedZeros() const;
461
462 /// Determine whether the allow-reciprocal flag is set.
463 bool hasAllowReciprocal() const;
464
465 /// Determine whether the allow-contract flag is set.
466 bool hasAllowContract() const;
467
468 /// Determine whether the approximate-math-functions flag is set.
469 bool hasApproxFunc() const;
470
471 /// Convenience function for getting all the fast-math flags, which must be an
472 /// operator which supports these flags. See LangRef.html for the meaning of
473 /// these flags.
474 FastMathFlags getFastMathFlags() const;
475
476 /// Copy I's fast-math flags
477 void copyFastMathFlags(const Instruction *I);
478
479 /// Convenience method to copy supported exact, fast-math, and (optionally)
480 /// wrapping flags from V to this instruction.
481 void copyIRFlags(const Value *V, bool IncludeWrapFlags = true);
482
483 /// Logical 'and' of any supported wrapping, exact, and fast-math flags of
484 /// V and this instruction.
485 void andIRFlags(const Value *V);
486
487 /// Merge 2 debug locations and apply it to the Instruction. If the
488 /// instruction is a CallIns, we need to traverse the inline chain to find
489 /// the common scope. This is not efficient for N-way merging as each time
490 /// you merge 2 iterations, you need to rebuild the hashmap to find the
491 /// common scope. However, we still choose this API because:
492 /// 1) Simplicity: it takes 2 locations instead of a list of locations.
493 /// 2) In worst case, it increases the complexity from O(N*I) to
494 /// O(2*N*I), where N is # of Instructions to merge, and I is the
495 /// maximum level of inline stack. So it is still linear.
496 /// 3) Merging of call instructions should be extremely rare in real
497 /// applications, thus the N-way merging should be in code path.
498 /// The DebugLoc attached to this instruction will be overwritten by the
499 /// merged DebugLoc.
500 void applyMergedLocation(const DILocation *LocA, const DILocation *LocB);
501
502 /// Updates the debug location given that the instruction has been hoisted
503 /// from a block to a predecessor of that block.
504 /// Note: it is undefined behavior to call this on an instruction not
505 /// currently inserted into a function.
506 void updateLocationAfterHoist();
507
508 /// Drop the instruction's debug location. This does not guarantee removal
509 /// of the !dbg source location attachment, as it must set a line 0 location
510 /// with scope information attached on call instructions. To guarantee
511 /// removal of the !dbg attachment, use the \ref setDebugLoc() API.
512 /// Note: it is undefined behavior to call this on an instruction not
513 /// currently inserted into a function.
514 void dropLocation();
515
516private:
517 // These are all implemented in Metadata.cpp.
518 MDNode *getMetadataImpl(unsigned KindID) const;
519 MDNode *getMetadataImpl(StringRef Kind) const;
520 void
521 getAllMetadataImpl(SmallVectorImpl<std::pair<unsigned, MDNode *>> &) const;
522
523public:
524 //===--------------------------------------------------------------------===//
525 // Predicates and helper methods.
526 //===--------------------------------------------------------------------===//
527
528 /// Return true if the instruction is associative:
529 ///
530 /// Associative operators satisfy: x op (y op z) === (x op y) op z
531 ///
532 /// In LLVM, the Add, Mul, And, Or, and Xor operators are associative.
533 ///
534 bool isAssociative() const LLVM_READONLY__attribute__((__pure__));
535 static bool isAssociative(unsigned Opcode) {
536 return Opcode == And || Opcode == Or || Opcode == Xor ||
537 Opcode == Add || Opcode == Mul;
538 }
539
540 /// Return true if the instruction is commutative:
541 ///
542 /// Commutative operators satisfy: (x op y) === (y op x)
543 ///
544 /// In LLVM, these are the commutative operators, plus SetEQ and SetNE, when
545 /// applied to any type.
546 ///
547 bool isCommutative() const LLVM_READONLY__attribute__((__pure__));
548 static bool isCommutative(unsigned Opcode) {
549 switch (Opcode) {
550 case Add: case FAdd:
551 case Mul: case FMul:
552 case And: case Or: case Xor:
553 return true;
554 default:
555 return false;
556 }
557 }
558
559 /// Return true if the instruction is idempotent:
560 ///
561 /// Idempotent operators satisfy: x op x === x
562 ///
563 /// In LLVM, the And and Or operators are idempotent.
564 ///
565 bool isIdempotent() const { return isIdempotent(getOpcode()); }
566 static bool isIdempotent(unsigned Opcode) {
567 return Opcode == And || Opcode == Or;
568 }
569
570 /// Return true if the instruction is nilpotent:
571 ///
572 /// Nilpotent operators satisfy: x op x === Id,
573 ///
574 /// where Id is the identity for the operator, i.e. a constant such that
575 /// x op Id === x and Id op x === x for all x.
576 ///
577 /// In LLVM, the Xor operator is nilpotent.
578 ///
579 bool isNilpotent() const { return isNilpotent(getOpcode()); }
580 static bool isNilpotent(unsigned Opcode) {
581 return Opcode == Xor;
582 }
583
584 /// Return true if this instruction may modify memory.
585 bool mayWriteToMemory() const;
586
587 /// Return true if this instruction may read memory.
588 bool mayReadFromMemory() const;
589
590 /// Return true if this instruction may read or write memory.
591 bool mayReadOrWriteMemory() const {
592 return mayReadFromMemory() || mayWriteToMemory();
593 }
594
595 /// Return true if this instruction has an AtomicOrdering of unordered or
596 /// higher.
597 bool isAtomic() const;
598
599 /// Return true if this atomic instruction loads from memory.
600 bool hasAtomicLoad() const;
601
602 /// Return true if this atomic instruction stores to memory.
603 bool hasAtomicStore() const;
604
605 /// Return true if this instruction has a volatile memory access.
606 bool isVolatile() const;
607
608 /// Return true if this instruction may throw an exception.
609 bool mayThrow() const;
610
611 /// Return true if this instruction behaves like a memory fence: it can load
612 /// or store to memory location without being given a memory location.
613 bool isFenceLike() const {
614 switch (getOpcode()) {
615 default:
616 return false;
617 // This list should be kept in sync with the list in mayWriteToMemory for
618 // all opcodes which don't have a memory location.
619 case Instruction::Fence:
620 case Instruction::CatchPad:
621 case Instruction::CatchRet:
622 case Instruction::Call:
623 case Instruction::Invoke:
624 return true;
625 }
626 }
627
628 /// Return true if the instruction may have side effects.
629 ///
630 /// Side effects are:
631 /// * Writing to memory.
632 /// * Unwinding.
633 /// * Not returning (e.g. an infinite loop).
634 ///
635 /// Note that this does not consider malloc and alloca to have side
636 /// effects because the newly allocated memory is completely invisible to
637 /// instructions which don't use the returned value. For cases where this
638 /// matters, isSafeToSpeculativelyExecute may be more appropriate.
639 bool mayHaveSideEffects() const;
640
641 /// Return true if the instruction can be removed if the result is unused.
642 ///
643 /// When constant folding some instructions cannot be removed even if their
644 /// results are unused. Specifically terminator instructions and calls that
645 /// may have side effects cannot be removed without semantically changing the
646 /// generated program.
647 bool isSafeToRemove() const;
648
649 /// Return true if the instruction will return (unwinding is considered as
650 /// a form of returning control flow here).
651 bool willReturn() const;
652
653 /// Return true if the instruction is a variety of EH-block.
654 bool isEHPad() const {
655 switch (getOpcode()) {
656 case Instruction::CatchSwitch:
657 case Instruction::CatchPad:
658 case Instruction::CleanupPad:
659 case Instruction::LandingPad:
660 return true;
661 default:
662 return false;
663 }
664 }
665
666 /// Return true if the instruction is a llvm.lifetime.start or
667 /// llvm.lifetime.end marker.
668 bool isLifetimeStartOrEnd() const;
669
670 /// Return true if the instruction is a llvm.launder.invariant.group or
671 /// llvm.strip.invariant.group.
672 bool isLaunderOrStripInvariantGroup() const;
673
674 /// Return true if the instruction is a DbgInfoIntrinsic or PseudoProbeInst.
675 bool isDebugOrPseudoInst() const;
676
677 /// Return a pointer to the next non-debug instruction in the same basic
678 /// block as 'this', or nullptr if no such instruction exists. Skip any pseudo
679 /// operations if \c SkipPseudoOp is true.
680 const Instruction *
681 getNextNonDebugInstruction(bool SkipPseudoOp = false) const;
682 Instruction *getNextNonDebugInstruction(bool SkipPseudoOp = false) {
683 return const_cast<Instruction *>(
684 static_cast<const Instruction *>(this)->getNextNonDebugInstruction(
685 SkipPseudoOp));
686 }
687
688 /// Return a pointer to the previous non-debug instruction in the same basic
689 /// block as 'this', or nullptr if no such instruction exists. Skip any pseudo
690 /// operations if \c SkipPseudoOp is true.
691 const Instruction *
692 getPrevNonDebugInstruction(bool SkipPseudoOp = false) const;
693 Instruction *getPrevNonDebugInstruction(bool SkipPseudoOp = false) {
694 return const_cast<Instruction *>(
695 static_cast<const Instruction *>(this)->getPrevNonDebugInstruction(
696 SkipPseudoOp));
697 }
698
699 /// Create a copy of 'this' instruction that is identical in all ways except
700 /// the following:
701 /// * The instruction has no parent
702 /// * The instruction has no name
703 ///
704 Instruction *clone() const;
705
706 /// Return true if the specified instruction is exactly identical to the
707 /// current one. This means that all operands match and any extra information
708 /// (e.g. load is volatile) agree.
709 bool isIdenticalTo(const Instruction *I) const;
710
711 /// This is like isIdenticalTo, except that it ignores the
712 /// SubclassOptionalData flags, which may specify conditions under which the
713 /// instruction's result is undefined.
714 bool isIdenticalToWhenDefined(const Instruction *I) const;
715
716 /// When checking for operation equivalence (using isSameOperationAs) it is
717 /// sometimes useful to ignore certain attributes.
718 enum OperationEquivalenceFlags {
719 /// Check for equivalence ignoring load/store alignment.
720 CompareIgnoringAlignment = 1<<0,
721 /// Check for equivalence treating a type and a vector of that type
722 /// as equivalent.
723 CompareUsingScalarTypes = 1<<1
724 };
725
726 /// This function determines if the specified instruction executes the same
727 /// operation as the current one. This means that the opcodes, type, operand
728 /// types and any other factors affecting the operation must be the same. This
729 /// is similar to isIdenticalTo except the operands themselves don't have to
730 /// be identical.
731 /// @returns true if the specified instruction is the same operation as
732 /// the current one.
733 /// Determine if one instruction is the same operation as another.
734 bool isSameOperationAs(const Instruction *I, unsigned flags = 0) const;
735
736 /// Return true if there are any uses of this instruction in blocks other than
737 /// the specified block. Note that PHI nodes are considered to evaluate their
738 /// operands in the corresponding predecessor block.
739 bool isUsedOutsideOfBlock(const BasicBlock *BB) const;
740
741 /// Return the number of successors that this instruction has. The instruction
742 /// must be a terminator.
743 unsigned getNumSuccessors() const;
744
745 /// Return the specified successor. This instruction must be a terminator.
746 BasicBlock *getSuccessor(unsigned Idx) const;
747
748 /// Update the specified successor to point at the provided block. This
749 /// instruction must be a terminator.
750 void setSuccessor(unsigned Idx, BasicBlock *BB);
751
752 /// Replace specified successor OldBB to point at the provided block.
753 /// This instruction must be a terminator.
754 void replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB);
755
756 /// Methods for support type inquiry through isa, cast, and dyn_cast:
757 static bool classof(const Value *V) {
758 return V->getValueID() >= Value::InstructionVal;
759 }
760
761 //----------------------------------------------------------------------
762 // Exported enumerations.
763 //
764 enum TermOps { // These terminate basic blocks
765#define FIRST_TERM_INST(N) TermOpsBegin = N,
766#define HANDLE_TERM_INST(N, OPC, CLASS) OPC = N,
767#define LAST_TERM_INST(N) TermOpsEnd = N+1
768#include "llvm/IR/Instruction.def"
769 };
770
771 enum UnaryOps {
772#define FIRST_UNARY_INST(N) UnaryOpsBegin = N,
773#define HANDLE_UNARY_INST(N, OPC, CLASS) OPC = N,
774#define LAST_UNARY_INST(N) UnaryOpsEnd = N+1
775#include "llvm/IR/Instruction.def"
776 };
777
778 enum BinaryOps {
779#define FIRST_BINARY_INST(N) BinaryOpsBegin = N,
780#define HANDLE_BINARY_INST(N, OPC, CLASS) OPC = N,
781#define LAST_BINARY_INST(N) BinaryOpsEnd = N+1
782#include "llvm/IR/Instruction.def"
783 };
784
785 enum MemoryOps {
786#define FIRST_MEMORY_INST(N) MemoryOpsBegin = N,
787#define HANDLE_MEMORY_INST(N, OPC, CLASS) OPC = N,
788#define LAST_MEMORY_INST(N) MemoryOpsEnd = N+1
789#include "llvm/IR/Instruction.def"
790 };
791
792 enum CastOps {
793#define FIRST_CAST_INST(N) CastOpsBegin = N,
794#define HANDLE_CAST_INST(N, OPC, CLASS) OPC = N,
795#define LAST_CAST_INST(N) CastOpsEnd = N+1
796#include "llvm/IR/Instruction.def"
797 };
798
799 enum FuncletPadOps {
800#define FIRST_FUNCLETPAD_INST(N) FuncletPadOpsBegin = N,
801#define HANDLE_FUNCLETPAD_INST(N, OPC, CLASS) OPC = N,
802#define LAST_FUNCLETPAD_INST(N) FuncletPadOpsEnd = N+1
803#include "llvm/IR/Instruction.def"
804 };
805
806 enum OtherOps {
807#define FIRST_OTHER_INST(N) OtherOpsBegin = N,
808#define HANDLE_OTHER_INST(N, OPC, CLASS) OPC = N,
809#define LAST_OTHER_INST(N) OtherOpsEnd = N+1
810#include "llvm/IR/Instruction.def"
811 };
812
813private:
814 friend class SymbolTableListTraits<Instruction>;
815 friend class BasicBlock; // For renumbering.
816
817 // Shadow Value::setValueSubclassData with a private forwarding method so that
818 // subclasses cannot accidentally use it.
819 void setValueSubclassData(unsigned short D) {
820 Value::setValueSubclassData(D);
821 }
822
823 unsigned short getSubclassDataFromValue() const {
824 return Value::getSubclassDataFromValue();
825 }
826
827 void setParent(BasicBlock *P);
828
829protected:
830 // Instruction subclasses can stick up to 15 bits of stuff into the
831 // SubclassData field of instruction with these members.
832
833 template <typename BitfieldElement>
834 typename BitfieldElement::Type getSubclassData() const {
835 static_assert(
836 std::is_same<BitfieldElement, HasMetadataField>::value ||
837 !Bitfield::isOverlapping<BitfieldElement, HasMetadataField>(),
838 "Must not overlap with the metadata bit");
839 return Bitfield::get<BitfieldElement>(getSubclassDataFromValue());
840 }
841
842 template <typename BitfieldElement>
843 void setSubclassData(typename BitfieldElement::Type Value) {
844 static_assert(
845 std::is_same<BitfieldElement, HasMetadataField>::value ||
846 !Bitfield::isOverlapping<BitfieldElement, HasMetadataField>(),
847 "Must not overlap with the metadata bit");
848 auto Storage = getSubclassDataFromValue();
849 Bitfield::set<BitfieldElement>(Storage, Value);
850 setValueSubclassData(Storage);
851 }
852
853 Instruction(Type *Ty, unsigned iType, Use *Ops, unsigned NumOps,
854 Instruction *InsertBefore = nullptr);
855 Instruction(Type *Ty, unsigned iType, Use *Ops, unsigned NumOps,
856 BasicBlock *InsertAtEnd);
857
858private:
859 /// Create a copy of this instruction.
860 Instruction *cloneImpl() const;
861};
862
863inline void ilist_alloc_traits<Instruction>::deleteNode(Instruction *V) {
864 V->deleteValue();
865}
866
867} // end namespace llvm
868
869#endif // LLVM_IR_INSTRUCTION_H

/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/DataLayout.h

1//===- llvm/DataLayout.h - Data size & alignment info -----------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines layout properties related to datatype size/offset/alignment
10// information. It uses lazy annotations to cache information about how
11// structure types are laid out and used.
12//
13// This structure should be created once, filled in if the defaults are not
14// correct and then passed around by const&. None of the members functions
15// require modification to the object.
16//
17//===----------------------------------------------------------------------===//
18
19#ifndef LLVM_IR_DATALAYOUT_H
20#define LLVM_IR_DATALAYOUT_H
21
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/STLExtras.h"
24#include "llvm/ADT/SmallVector.h"
25#include "llvm/ADT/StringRef.h"
26#include "llvm/IR/DerivedTypes.h"
27#include "llvm/IR/Type.h"
28#include "llvm/Support/Casting.h"
29#include "llvm/Support/ErrorHandling.h"
30#include "llvm/Support/MathExtras.h"
31#include "llvm/Support/Alignment.h"
32#include "llvm/Support/TrailingObjects.h"
33#include "llvm/Support/TypeSize.h"
34#include <cassert>
35#include <cstdint>
36#include <string>
37
38// This needs to be outside of the namespace, to avoid conflict with llvm-c
39// decl.
40using LLVMTargetDataRef = struct LLVMOpaqueTargetData *;
41
42namespace llvm {
43
44class GlobalVariable;
45class LLVMContext;
46class Module;
47class StructLayout;
48class Triple;
49class Value;
50
51/// Enum used to categorize the alignment types stored by LayoutAlignElem
52enum AlignTypeEnum {
53 INVALID_ALIGN = 0,
54 INTEGER_ALIGN = 'i',
55 VECTOR_ALIGN = 'v',
56 FLOAT_ALIGN = 'f',
57 AGGREGATE_ALIGN = 'a'
58};
59
60// FIXME: Currently the DataLayout string carries a "preferred alignment"
61// for types. As the DataLayout is module/global, this should likely be
62// sunk down to an FTTI element that is queried rather than a global
63// preference.
64
65/// Layout alignment element.
66///
67/// Stores the alignment data associated with a given alignment type (integer,
68/// vector, float) and type bit width.
69///
70/// \note The unusual order of elements in the structure attempts to reduce
71/// padding and make the structure slightly more cache friendly.
72struct LayoutAlignElem {
73 /// Alignment type from \c AlignTypeEnum
74 unsigned AlignType : 8;
75 unsigned TypeBitWidth : 24;
76 Align ABIAlign;
77 Align PrefAlign;
78
79 static LayoutAlignElem get(AlignTypeEnum align_type, Align abi_align,
80 Align pref_align, uint32_t bit_width);
81
82 bool operator==(const LayoutAlignElem &rhs) const;
83};
84
85/// Layout pointer alignment element.
86///
87/// Stores the alignment data associated with a given pointer and address space.
88///
89/// \note The unusual order of elements in the structure attempts to reduce
90/// padding and make the structure slightly more cache friendly.
91struct PointerAlignElem {
92 Align ABIAlign;
93 Align PrefAlign;
94 uint32_t TypeByteWidth;
95 uint32_t AddressSpace;
96 uint32_t IndexWidth;
97
98 /// Initializer
99 static PointerAlignElem get(uint32_t AddressSpace, Align ABIAlign,
100 Align PrefAlign, uint32_t TypeByteWidth,
101 uint32_t IndexWidth);
102
103 bool operator==(const PointerAlignElem &rhs) const;
104};
105
106/// A parsed version of the target data layout string in and methods for
107/// querying it.
108///
109/// The target data layout string is specified *by the target* - a frontend
110/// generating LLVM IR is required to generate the right target data for the
111/// target being codegen'd to.
112class DataLayout {
113public:
114 enum class FunctionPtrAlignType {
115 /// The function pointer alignment is independent of the function alignment.
116 Independent,
117 /// The function pointer alignment is a multiple of the function alignment.
118 MultipleOfFunctionAlign,
119 };
120private:
121 /// Defaults to false.
122 bool BigEndian;
123
124 unsigned AllocaAddrSpace;
125 MaybeAlign StackNaturalAlign;
126 unsigned ProgramAddrSpace;
127 unsigned DefaultGlobalsAddrSpace;
128
129 MaybeAlign FunctionPtrAlign;
130 FunctionPtrAlignType TheFunctionPtrAlignType;
131
132 enum ManglingModeT {
133 MM_None,
134 MM_ELF,
135 MM_MachO,
136 MM_WinCOFF,
137 MM_WinCOFFX86,
138 MM_Mips,
139 MM_XCOFF
140 };
141 ManglingModeT ManglingMode;
142
143 SmallVector<unsigned char, 8> LegalIntWidths;
144
145 /// Primitive type alignment data. This is sorted by type and bit
146 /// width during construction.
147 using AlignmentsTy = SmallVector<LayoutAlignElem, 16>;
148 AlignmentsTy Alignments;
149
150 AlignmentsTy::const_iterator
151 findAlignmentLowerBound(AlignTypeEnum AlignType, uint32_t BitWidth) const {
152 return const_cast<DataLayout *>(this)->findAlignmentLowerBound(AlignType,
153 BitWidth);
154 }
155
156 AlignmentsTy::iterator
157 findAlignmentLowerBound(AlignTypeEnum AlignType, uint32_t BitWidth);
158
159 /// The string representation used to create this DataLayout
160 std::string StringRepresentation;
161
162 using PointersTy = SmallVector<PointerAlignElem, 8>;
163 PointersTy Pointers;
164
165 const PointerAlignElem &getPointerAlignElem(uint32_t AddressSpace) const;
166
167 // The StructType -> StructLayout map.
168 mutable void *LayoutMap = nullptr;
169
170 /// Pointers in these address spaces are non-integral, and don't have a
171 /// well-defined bitwise representation.
172 SmallVector<unsigned, 8> NonIntegralAddressSpaces;
173
174 /// Attempts to set the alignment of the given type. Returns an error
175 /// description on failure.
176 Error setAlignment(AlignTypeEnum align_type, Align abi_align,
177 Align pref_align, uint32_t bit_width);
178
179 /// Attempts to set the alignment of a pointer in the given address space.
180 /// Returns an error description on failure.
181 Error setPointerAlignment(uint32_t AddrSpace, Align ABIAlign, Align PrefAlign,
182 uint32_t TypeByteWidth, uint32_t IndexWidth);
183
184 /// Internal helper to get alignment for integer of given bitwidth.
185 Align getIntegerAlignment(uint32_t BitWidth, bool abi_or_pref) const;
186
187 /// Internal helper method that returns requested alignment for type.
188 Align getAlignment(Type *Ty, bool abi_or_pref) const;
189
190 /// Attempts to parse a target data specification string and reports an error
191 /// if the string is malformed.
192 Error parseSpecifier(StringRef Desc);
193
194 // Free all internal data structures.
195 void clear();
196
197public:
198 /// Constructs a DataLayout from a specification string. See reset().
199 explicit DataLayout(StringRef LayoutDescription) {
200 reset(LayoutDescription);
201 }
202
203 /// Initialize target data from properties stored in the module.
204 explicit DataLayout(const Module *M);
205
206 DataLayout(const DataLayout &DL) { *this = DL; }
207
208 ~DataLayout(); // Not virtual, do not subclass this class
209
210 DataLayout &operator=(const DataLayout &DL) {
211 clear();
212 StringRepresentation = DL.StringRepresentation;
213 BigEndian = DL.isBigEndian();
214 AllocaAddrSpace = DL.AllocaAddrSpace;
215 StackNaturalAlign = DL.StackNaturalAlign;
216 FunctionPtrAlign = DL.FunctionPtrAlign;
217 TheFunctionPtrAlignType = DL.TheFunctionPtrAlignType;
218 ProgramAddrSpace = DL.ProgramAddrSpace;
219 DefaultGlobalsAddrSpace = DL.DefaultGlobalsAddrSpace;
220 ManglingMode = DL.ManglingMode;
221 LegalIntWidths = DL.LegalIntWidths;
222 Alignments = DL.Alignments;
223 Pointers = DL.Pointers;
224 NonIntegralAddressSpaces = DL.NonIntegralAddressSpaces;
225 return *this;
226 }
227
228 bool operator==(const DataLayout &Other) const;
229 bool operator!=(const DataLayout &Other) const { return !(*this == Other); }
230
231 void init(const Module *M);
232
233 /// Parse a data layout string (with fallback to default values).
234 void reset(StringRef LayoutDescription);
235
236 /// Parse a data layout string and return the layout. Return an error
237 /// description on failure.
238 static Expected<DataLayout> parse(StringRef LayoutDescription);
239
240 /// Layout endianness...
241 bool isLittleEndian() const { return !BigEndian; }
242 bool isBigEndian() const { return BigEndian; }
243
244 /// Returns the string representation of the DataLayout.
245 ///
246 /// This representation is in the same format accepted by the string
247 /// constructor above. This should not be used to compare two DataLayout as
248 /// different string can represent the same layout.
249 const std::string &getStringRepresentation() const {
250 return StringRepresentation;
251 }
252
253 /// Test if the DataLayout was constructed from an empty string.
254 bool isDefault() const { return StringRepresentation.empty(); }
255
256 /// Returns true if the specified type is known to be a native integer
257 /// type supported by the CPU.
258 ///
259 /// For example, i64 is not native on most 32-bit CPUs and i37 is not native
260 /// on any known one. This returns false if the integer width is not legal.
261 ///
262 /// The width is specified in bits.
263 bool isLegalInteger(uint64_t Width) const {
264 return llvm::is_contained(LegalIntWidths, Width);
265 }
266
267 bool isIllegalInteger(uint64_t Width) const { return !isLegalInteger(Width); }
268
269 /// Returns true if the given alignment exceeds the natural stack alignment.
270 bool exceedsNaturalStackAlignment(Align Alignment) const {
271 return StackNaturalAlign && (Alignment > *StackNaturalAlign);
272 }
273
274 Align getStackAlignment() const {
275 assert(StackNaturalAlign && "StackNaturalAlign must be defined")(static_cast <bool> (StackNaturalAlign && "StackNaturalAlign must be defined"
) ? void (0) : __assert_fail ("StackNaturalAlign && \"StackNaturalAlign must be defined\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/DataLayout.h"
, 275, __extension__ __PRETTY_FUNCTION__))
;
276 return *StackNaturalAlign;
277 }
278
279 unsigned getAllocaAddrSpace() const { return AllocaAddrSpace; }
280
281 /// Returns the alignment of function pointers, which may or may not be
282 /// related to the alignment of functions.
283 /// \see getFunctionPtrAlignType
284 MaybeAlign getFunctionPtrAlign() const { return FunctionPtrAlign; }
285
286 /// Return the type of function pointer alignment.
287 /// \see getFunctionPtrAlign
288 FunctionPtrAlignType getFunctionPtrAlignType() const {
289 return TheFunctionPtrAlignType;
290 }
291
292 unsigned getProgramAddressSpace() const { return ProgramAddrSpace; }
293 unsigned getDefaultGlobalsAddressSpace() const {
294 return DefaultGlobalsAddrSpace;
295 }
296
297 bool hasMicrosoftFastStdCallMangling() const {
298 return ManglingMode == MM_WinCOFFX86;
299 }
300
301 /// Returns true if symbols with leading question marks should not receive IR
302 /// mangling. True for Windows mangling modes.
303 bool doNotMangleLeadingQuestionMark() const {
304 return ManglingMode == MM_WinCOFF || ManglingMode == MM_WinCOFFX86;
305 }
306
307 bool hasLinkerPrivateGlobalPrefix() const { return ManglingMode == MM_MachO; }
308
309 StringRef getLinkerPrivateGlobalPrefix() const {
310 if (ManglingMode == MM_MachO)
311 return "l";
312 return "";
313 }
314
315 char getGlobalPrefix() const {
316 switch (ManglingMode) {
317 case MM_None:
318 case MM_ELF:
319 case MM_Mips:
320 case MM_WinCOFF:
321 case MM_XCOFF:
322 return '\0';
323 case MM_MachO:
324 case MM_WinCOFFX86:
325 return '_';
326 }
327 llvm_unreachable("invalid mangling mode")::llvm::llvm_unreachable_internal("invalid mangling mode", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/DataLayout.h"
, 327)
;
328 }
329
330 StringRef getPrivateGlobalPrefix() const {
331 switch (ManglingMode) {
332 case MM_None:
333 return "";
334 case MM_ELF:
335 case MM_WinCOFF:
336 return ".L";
337 case MM_Mips:
338 return "$";
339 case MM_MachO:
340 case MM_WinCOFFX86:
341 return "L";
342 case MM_XCOFF:
343 return "L..";
344 }
345 llvm_unreachable("invalid mangling mode")::llvm::llvm_unreachable_internal("invalid mangling mode", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/DataLayout.h"
, 345)
;
346 }
347
348 static const char *getManglingComponent(const Triple &T);
349
350 /// Returns true if the specified type fits in a native integer type
351 /// supported by the CPU.
352 ///
353 /// For example, if the CPU only supports i32 as a native integer type, then
354 /// i27 fits in a legal integer type but i45 does not.
355 bool fitsInLegalInteger(unsigned Width) const {
356 for (unsigned LegalIntWidth : LegalIntWidths)
357 if (Width <= LegalIntWidth)
358 return true;
359 return false;
360 }
361
362 /// Layout pointer alignment
363 Align getPointerABIAlignment(unsigned AS) const;
364
365 /// Return target's alignment for stack-based pointers
366 /// FIXME: The defaults need to be removed once all of
367 /// the backends/clients are updated.
368 Align getPointerPrefAlignment(unsigned AS = 0) const;
369
370 /// Layout pointer size
371 /// FIXME: The defaults need to be removed once all of
372 /// the backends/clients are updated.
373 unsigned getPointerSize(unsigned AS = 0) const;
374
375 /// Returns the maximum pointer size over all address spaces.
376 unsigned getMaxPointerSize() const;
377
378 // Index size used for address calculation.
379 unsigned getIndexSize(unsigned AS) const;
380
381 /// Return the address spaces containing non-integral pointers. Pointers in
382 /// this address space don't have a well-defined bitwise representation.
383 ArrayRef<unsigned> getNonIntegralAddressSpaces() const {
384 return NonIntegralAddressSpaces;
385 }
386
387 bool isNonIntegralAddressSpace(unsigned AddrSpace) const {
388 ArrayRef<unsigned> NonIntegralSpaces = getNonIntegralAddressSpaces();
389 return is_contained(NonIntegralSpaces, AddrSpace);
390 }
391
392 bool isNonIntegralPointerType(PointerType *PT) const {
393 return isNonIntegralAddressSpace(PT->getAddressSpace());
394 }
395
396 bool isNonIntegralPointerType(Type *Ty) const {
397 auto *PTy = dyn_cast<PointerType>(Ty);
36
Assuming 'Ty' is not a 'PointerType'
398 return PTy
36.1
'PTy' is null
36.1
'PTy' is null
36.1
'PTy' is null
36.1
'PTy' is null
36.1
'PTy' is null
36.1
'PTy' is null
36.1
'PTy' is null
&& isNonIntegralPointerType(PTy)
;
37
Returning zero, which participates in a condition later
399 }
400
401 /// Layout pointer size, in bits
402 /// FIXME: The defaults need to be removed once all of
403 /// the backends/clients are updated.
404 unsigned getPointerSizeInBits(unsigned AS = 0) const {
405 return getPointerSize(AS) * 8;
406 }
407
408 /// Returns the maximum pointer size over all address spaces.
409 unsigned getMaxPointerSizeInBits() const {
410 return getMaxPointerSize() * 8;
411 }
412
413 /// Size in bits of index used for address calculation in getelementptr.
414 unsigned getIndexSizeInBits(unsigned AS) const {
415 return getIndexSize(AS) * 8;
416 }
417
418 /// Layout pointer size, in bits, based on the type. If this function is
419 /// called with a pointer type, then the type size of the pointer is returned.
420 /// If this function is called with a vector of pointers, then the type size
421 /// of the pointer is returned. This should only be called with a pointer or
422 /// vector of pointers.
423 unsigned getPointerTypeSizeInBits(Type *) const;
424
425 /// Layout size of the index used in GEP calculation.
426 /// The function should be called with pointer or vector of pointers type.
427 unsigned getIndexTypeSizeInBits(Type *Ty) const;
428
429 unsigned getPointerTypeSize(Type *Ty) const {
430 return getPointerTypeSizeInBits(Ty) / 8;
431 }
432
433 /// Size examples:
434 ///
435 /// Type SizeInBits StoreSizeInBits AllocSizeInBits[*]
436 /// ---- ---------- --------------- ---------------
437 /// i1 1 8 8
438 /// i8 8 8 8
439 /// i19 19 24 32
440 /// i32 32 32 32
441 /// i100 100 104 128
442 /// i128 128 128 128
443 /// Float 32 32 32
444 /// Double 64 64 64
445 /// X86_FP80 80 80 96
446 ///
447 /// [*] The alloc size depends on the alignment, and thus on the target.
448 /// These values are for x86-32 linux.
449
450 /// Returns the number of bits necessary to hold the specified type.
451 ///
452 /// If Ty is a scalable vector type, the scalable property will be set and
453 /// the runtime size will be a positive integer multiple of the base size.
454 ///
455 /// For example, returns 36 for i36 and 80 for x86_fp80. The type passed must
456 /// have a size (Type::isSized() must return true).
457 TypeSize getTypeSizeInBits(Type *Ty) const;
458
459 /// Returns the maximum number of bytes that may be overwritten by
460 /// storing the specified type.
461 ///
462 /// If Ty is a scalable vector type, the scalable property will be set and
463 /// the runtime size will be a positive integer multiple of the base size.
464 ///
465 /// For example, returns 5 for i36 and 10 for x86_fp80.
466 TypeSize getTypeStoreSize(Type *Ty) const {
467 TypeSize BaseSize = getTypeSizeInBits(Ty);
468 return { (BaseSize.getKnownMinSize() + 7) / 8, BaseSize.isScalable() };
469 }
470
471 /// Returns the maximum number of bits that may be overwritten by
472 /// storing the specified type; always a multiple of 8.
473 ///
474 /// If Ty is a scalable vector type, the scalable property will be set and
475 /// the runtime size will be a positive integer multiple of the base size.
476 ///
477 /// For example, returns 40 for i36 and 80 for x86_fp80.
478 TypeSize getTypeStoreSizeInBits(Type *Ty) const {
479 return 8 * getTypeStoreSize(Ty);
480 }
481
482 /// Returns true if no extra padding bits are needed when storing the
483 /// specified type.
484 ///
485 /// For example, returns false for i19 that has a 24-bit store size.
486 bool typeSizeEqualsStoreSize(Type *Ty) const {
487 return getTypeSizeInBits(Ty) == getTypeStoreSizeInBits(Ty);
488 }
489
490 /// Returns the offset in bytes between successive objects of the
491 /// specified type, including alignment padding.
492 ///
493 /// If Ty is a scalable vector type, the scalable property will be set and
494 /// the runtime size will be a positive integer multiple of the base size.
495 ///
496 /// This is the amount that alloca reserves for this type. For example,
497 /// returns 12 or 16 for x86_fp80, depending on alignment.
498 TypeSize getTypeAllocSize(Type *Ty) const {
499 // Round up to the next alignment boundary.
500 return alignTo(getTypeStoreSize(Ty), getABITypeAlignment(Ty));
501 }
502
503 /// Returns the offset in bits between successive objects of the
504 /// specified type, including alignment padding; always a multiple of 8.
505 ///
506 /// If Ty is a scalable vector type, the scalable property will be set and
507 /// the runtime size will be a positive integer multiple of the base size.
508 ///
509 /// This is the amount that alloca reserves for this type. For example,
510 /// returns 96 or 128 for x86_fp80, depending on alignment.
511 TypeSize getTypeAllocSizeInBits(Type *Ty) const {
512 return 8 * getTypeAllocSize(Ty);
513 }
514
515 /// Returns the minimum ABI-required alignment for the specified type.
516 /// FIXME: Deprecate this function once migration to Align is over.
517 unsigned getABITypeAlignment(Type *Ty) const;
518
519 /// Returns the minimum ABI-required alignment for the specified type.
520 Align getABITypeAlign(Type *Ty) const;
521
522 /// Helper function to return `Alignment` if it's set or the result of
523 /// `getABITypeAlignment(Ty)`, in any case the result is a valid alignment.
524 inline Align getValueOrABITypeAlignment(MaybeAlign Alignment,
525 Type *Ty) const {
526 return Alignment ? *Alignment : getABITypeAlign(Ty);
527 }
528
529 /// Returns the minimum ABI-required alignment for an integer type of
530 /// the specified bitwidth.
531 Align getABIIntegerTypeAlignment(unsigned BitWidth) const {
532 return getIntegerAlignment(BitWidth, /* abi_or_pref */ true);
533 }
534
535 /// Returns the preferred stack/global alignment for the specified
536 /// type.
537 ///
538 /// This is always at least as good as the ABI alignment.
539 /// FIXME: Deprecate this function once migration to Align is over.
540 unsigned getPrefTypeAlignment(Type *Ty) const;
541
542 /// Returns the preferred stack/global alignment for the specified
543 /// type.
544 ///
545 /// This is always at least as good as the ABI alignment.
546 Align getPrefTypeAlign(Type *Ty) const;
547
548 /// Returns an integer type with size at least as big as that of a
549 /// pointer in the given address space.
550 IntegerType *getIntPtrType(LLVMContext &C, unsigned AddressSpace = 0) const;
551
552 /// Returns an integer (vector of integer) type with size at least as
553 /// big as that of a pointer of the given pointer (vector of pointer) type.
554 Type *getIntPtrType(Type *) const;
555
556 /// Returns the smallest integer type with size at least as big as
557 /// Width bits.
558 Type *getSmallestLegalIntType(LLVMContext &C, unsigned Width = 0) const;
559
560 /// Returns the largest legal integer type, or null if none are set.
561 Type *getLargestLegalIntType(LLVMContext &C) const {
562 unsigned LargestSize = getLargestLegalIntTypeSizeInBits();
563 return (LargestSize == 0) ? nullptr : Type::getIntNTy(C, LargestSize);
564 }
565
566 /// Returns the size of largest legal integer type size, or 0 if none
567 /// are set.
568 unsigned getLargestLegalIntTypeSizeInBits() const;
569
570 /// Returns the type of a GEP index.
571 /// If it was not specified explicitly, it will be the integer type of the
572 /// pointer width - IntPtrType.
573 Type *getIndexType(Type *PtrTy) const;
574
575 /// Returns the offset from the beginning of the type for the specified
576 /// indices.
577 ///
578 /// Note that this takes the element type, not the pointer type.
579 /// This is used to implement getelementptr.
580 int64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef<Value *> Indices) const;
581
582 /// Returns a StructLayout object, indicating the alignment of the
583 /// struct, its size, and the offsets of its fields.
584 ///
585 /// Note that this information is lazily cached.
586 const StructLayout *getStructLayout(StructType *Ty) const;
587
588 /// Returns the preferred alignment of the specified global.
589 ///
590 /// This includes an explicitly requested alignment (if the global has one).
591 Align getPreferredAlign(const GlobalVariable *GV) const;
592};
593
594inline DataLayout *unwrap(LLVMTargetDataRef P) {
595 return reinterpret_cast<DataLayout *>(P);
596}
597
598inline LLVMTargetDataRef wrap(const DataLayout *P) {
599 return reinterpret_cast<LLVMTargetDataRef>(const_cast<DataLayout *>(P));
600}
601
602/// Used to lazily calculate structure layout information for a target machine,
603/// based on the DataLayout structure.
604class StructLayout final : public TrailingObjects<StructLayout, uint64_t> {
605 uint64_t StructSize;
606 Align StructAlignment;
607 unsigned IsPadded : 1;
608 unsigned NumElements : 31;
609
610public:
611 uint64_t getSizeInBytes() const { return StructSize; }
612
613 uint64_t getSizeInBits() const { return 8 * StructSize; }
614
615 Align getAlignment() const { return StructAlignment; }
616
617 /// Returns whether the struct has padding or not between its fields.
618 /// NB: Padding in nested element is not taken into account.
619 bool hasPadding() const { return IsPadded; }
620
621 /// Given a valid byte offset into the structure, returns the structure
622 /// index that contains it.
623 unsigned getElementContainingOffset(uint64_t Offset) const;
624
625 MutableArrayRef<uint64_t> getMemberOffsets() {
626 return llvm::makeMutableArrayRef(getTrailingObjects<uint64_t>(),
627 NumElements);
628 }
629
630 ArrayRef<uint64_t> getMemberOffsets() const {
631 return llvm::makeArrayRef(getTrailingObjects<uint64_t>(), NumElements);
632 }
633
634 uint64_t getElementOffset(unsigned Idx) const {
635 assert(Idx < NumElements && "Invalid element idx!")(static_cast <bool> (Idx < NumElements && "Invalid element idx!"
) ? void (0) : __assert_fail ("Idx < NumElements && \"Invalid element idx!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/DataLayout.h"
, 635, __extension__ __PRETTY_FUNCTION__))
;
636 return getMemberOffsets()[Idx];
637 }
638
639 uint64_t getElementOffsetInBits(unsigned Idx) const {
640 return getElementOffset(Idx) * 8;
641 }
642
643private:
644 friend class DataLayout; // Only DataLayout can create this class
645
646 StructLayout(StructType *ST, const DataLayout &DL);
647
648 size_t numTrailingObjects(OverloadToken<uint64_t>) const {
649 return NumElements;
650 }
651};
652
653// The implementation of this method is provided inline as it is particularly
654// well suited to constant folding when called on a specific Type subclass.
655inline TypeSize DataLayout::getTypeSizeInBits(Type *Ty) const {
656 assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!")(static_cast <bool> (Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!"
) ? void (0) : __assert_fail ("Ty->isSized() && \"Cannot getTypeInfo() on a type that is unsized!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/DataLayout.h"
, 656, __extension__ __PRETTY_FUNCTION__))
;
657 switch (Ty->getTypeID()) {
658 case Type::LabelTyID:
659 return TypeSize::Fixed(getPointerSizeInBits(0));
660 case Type::PointerTyID:
661 return TypeSize::Fixed(getPointerSizeInBits(Ty->getPointerAddressSpace()));
662 case Type::ArrayTyID: {
663 ArrayType *ATy = cast<ArrayType>(Ty);
664 return ATy->getNumElements() *
665 getTypeAllocSizeInBits(ATy->getElementType());
666 }
667 case Type::StructTyID:
668 // Get the layout annotation... which is lazily created on demand.
669 return TypeSize::Fixed(
670 getStructLayout(cast<StructType>(Ty))->getSizeInBits());
671 case Type::IntegerTyID:
672 return TypeSize::Fixed(Ty->getIntegerBitWidth());
673 case Type::HalfTyID:
674 case Type::BFloatTyID:
675 return TypeSize::Fixed(16);
676 case Type::FloatTyID:
677 return TypeSize::Fixed(32);
678 case Type::DoubleTyID:
679 case Type::X86_MMXTyID:
680 return TypeSize::Fixed(64);
681 case Type::PPC_FP128TyID:
682 case Type::FP128TyID:
683 return TypeSize::Fixed(128);
684 case Type::X86_AMXTyID:
685 return TypeSize::Fixed(8192);
686 // In memory objects this is always aligned to a higher boundary, but
687 // only 80 bits contain information.
688 case Type::X86_FP80TyID:
689 return TypeSize::Fixed(80);
690 case Type::FixedVectorTyID:
691 case Type::ScalableVectorTyID: {
692 VectorType *VTy = cast<VectorType>(Ty);
693 auto EltCnt = VTy->getElementCount();
694 uint64_t MinBits = EltCnt.getKnownMinValue() *
695 getTypeSizeInBits(VTy->getElementType()).getFixedSize();
696 return TypeSize(MinBits, EltCnt.isScalable());
697 }
698 default:
699 llvm_unreachable("DataLayout::getTypeSizeInBits(): Unsupported type")::llvm::llvm_unreachable_internal("DataLayout::getTypeSizeInBits(): Unsupported type"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/DataLayout.h"
, 699)
;
700 }
701}
702
703} // end namespace llvm
704
705#endif // LLVM_IR_DATALAYOUT_H

/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Value.h

1//===- llvm/Value.h - Definition of the Value class -------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the Value class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_IR_VALUE_H
14#define LLVM_IR_VALUE_H
15
16#include "llvm-c/Types.h"
17#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/StringRef.h"
19#include "llvm/ADT/iterator_range.h"
20#include "llvm/IR/Use.h"
21#include "llvm/Support/Alignment.h"
22#include "llvm/Support/CBindingWrapping.h"
23#include "llvm/Support/Casting.h"
24#include <cassert>
25#include <iterator>
26#include <memory>
27
28namespace llvm {
29
30class APInt;
31class Argument;
32class BasicBlock;
33class Constant;
34class ConstantData;
35class ConstantAggregate;
36class DataLayout;
37class Function;
38class GlobalAlias;
39class GlobalIFunc;
40class GlobalIndirectSymbol;
41class GlobalObject;
42class GlobalValue;
43class GlobalVariable;
44class InlineAsm;
45class Instruction;
46class LLVMContext;
47class MDNode;
48class Module;
49class ModuleSlotTracker;
50class raw_ostream;
51template<typename ValueTy> class StringMapEntry;
52class Twine;
53class Type;
54class User;
55
56using ValueName = StringMapEntry<Value *>;
57
58//===----------------------------------------------------------------------===//
59// Value Class
60//===----------------------------------------------------------------------===//
61
62/// LLVM Value Representation
63///
64/// This is a very important LLVM class. It is the base class of all values
65/// computed by a program that may be used as operands to other values. Value is
66/// the super class of other important classes such as Instruction and Function.
67/// All Values have a Type. Type is not a subclass of Value. Some values can
68/// have a name and they belong to some Module. Setting the name on the Value
69/// automatically updates the module's symbol table.
70///
71/// Every value has a "use list" that keeps track of which other Values are
72/// using this Value. A Value can also have an arbitrary number of ValueHandle
73/// objects that watch it and listen to RAUW and Destroy events. See
74/// llvm/IR/ValueHandle.h for details.
75class Value {
76 Type *VTy;
77 Use *UseList;
78
79 friend class ValueAsMetadata; // Allow access to IsUsedByMD.
80 friend class ValueHandleBase;
81
82 const unsigned char SubclassID; // Subclass identifier (for isa/dyn_cast)
83 unsigned char HasValueHandle : 1; // Has a ValueHandle pointing to this?
84
85protected:
86 /// Hold subclass data that can be dropped.
87 ///
88 /// This member is similar to SubclassData, however it is for holding
89 /// information which may be used to aid optimization, but which may be
90 /// cleared to zero without affecting conservative interpretation.
91 unsigned char SubclassOptionalData : 7;
92
93private:
94 /// Hold arbitrary subclass data.
95 ///
96 /// This member is defined by this class, but is not used for anything.
97 /// Subclasses can use it to hold whatever state they find useful. This
98 /// field is initialized to zero by the ctor.
99 unsigned short SubclassData;
100
101protected:
102 /// The number of operands in the subclass.
103 ///
104 /// This member is defined by this class, but not used for anything.
105 /// Subclasses can use it to store their number of operands, if they have
106 /// any.
107 ///
108 /// This is stored here to save space in User on 64-bit hosts. Since most
109 /// instances of Value have operands, 32-bit hosts aren't significantly
110 /// affected.
111 ///
112 /// Note, this should *NOT* be used directly by any class other than User.
113 /// User uses this value to find the Use list.
114 enum : unsigned { NumUserOperandsBits = 27 };
115 unsigned NumUserOperands : NumUserOperandsBits;
116
117 // Use the same type as the bitfield above so that MSVC will pack them.
118 unsigned IsUsedByMD : 1;
119 unsigned HasName : 1;
120 unsigned HasMetadata : 1; // Has metadata attached to this?
121 unsigned HasHungOffUses : 1;
122 unsigned HasDescriptor : 1;
123
124private:
125 template <typename UseT> // UseT == 'Use' or 'const Use'
126 class use_iterator_impl {
127 friend class Value;
128
129 UseT *U;
130
131 explicit use_iterator_impl(UseT *u) : U(u) {}
132
133 public:
134 using iterator_category = std::forward_iterator_tag;
135 using value_type = UseT *;
136 using difference_type = std::ptrdiff_t;
137 using pointer = value_type *;
138 using reference = value_type &;
139
140 use_iterator_impl() : U() {}
141
142 bool operator==(const use_iterator_impl &x) const { return U == x.U; }
143 bool operator!=(const use_iterator_impl &x) const { return !operator==(x); }
144
145 use_iterator_impl &operator++() { // Preincrement
146 assert(U && "Cannot increment end iterator!")(static_cast <bool> (U && "Cannot increment end iterator!"
) ? void (0) : __assert_fail ("U && \"Cannot increment end iterator!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Value.h"
, 146, __extension__ __PRETTY_FUNCTION__))
;
147 U = U->getNext();
148 return *this;
149 }
150
151 use_iterator_impl operator++(int) { // Postincrement
152 auto tmp = *this;
153 ++*this;
154 return tmp;
155 }
156
157 UseT &operator*() const {
158 assert(U && "Cannot dereference end iterator!")(static_cast <bool> (U && "Cannot dereference end iterator!"
) ? void (0) : __assert_fail ("U && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Value.h"
, 158, __extension__ __PRETTY_FUNCTION__))
;
159 return *U;
160 }
161
162 UseT *operator->() const { return &operator*(); }
163
164 operator use_iterator_impl<const UseT>() const {
165 return use_iterator_impl<const UseT>(U);
166 }
167 };
168
169 template <typename UserTy> // UserTy == 'User' or 'const User'
170 class user_iterator_impl {
171 use_iterator_impl<Use> UI;
172 explicit user_iterator_impl(Use *U) : UI(U) {}
173 friend class Value;
174
175 public:
176 using iterator_category = std::forward_iterator_tag;
177 using value_type = UserTy *;
178 using difference_type = std::ptrdiff_t;
179 using pointer = value_type *;
180 using reference = value_type &;
181
182 user_iterator_impl() = default;
183
184 bool operator==(const user_iterator_impl &x) const { return UI == x.UI; }
185 bool operator!=(const user_iterator_impl &x) const { return !operator==(x); }
186
187 /// Returns true if this iterator is equal to user_end() on the value.
188 bool atEnd() const { return *this == user_iterator_impl(); }
189
190 user_iterator_impl &operator++() { // Preincrement
191 ++UI;
192 return *this;
193 }
194
195 user_iterator_impl operator++(int) { // Postincrement
196 auto tmp = *this;
197 ++*this;
198 return tmp;
199 }
200
201 // Retrieve a pointer to the current User.
202 UserTy *operator*() const {
203 return UI->getUser();
204 }
205
206 UserTy *operator->() const { return operator*(); }
207
208 operator user_iterator_impl<const UserTy>() const {
209 return user_iterator_impl<const UserTy>(*UI);
210 }
211
212 Use &getUse() const { return *UI; }
213 };
214
215protected:
216 Value(Type *Ty, unsigned scid);
217
218 /// Value's destructor should be virtual by design, but that would require
219 /// that Value and all of its subclasses have a vtable that effectively
220 /// duplicates the information in the value ID. As a size optimization, the
221 /// destructor has been protected, and the caller should manually call
222 /// deleteValue.
223 ~Value(); // Use deleteValue() to delete a generic Value.
224
225public:
226 Value(const Value &) = delete;
227 Value &operator=(const Value &) = delete;
228
229 /// Delete a pointer to a generic Value.
230 void deleteValue();
231
232 /// Support for debugging, callable in GDB: V->dump()
233 void dump() const;
234
235 /// Implement operator<< on Value.
236 /// @{
237 void print(raw_ostream &O, bool IsForDebug = false) const;
238 void print(raw_ostream &O, ModuleSlotTracker &MST,
239 bool IsForDebug = false) const;
240 /// @}
241
242 /// Print the name of this Value out to the specified raw_ostream.
243 ///
244 /// This is useful when you just want to print 'int %reg126', not the
245 /// instruction that generated it. If you specify a Module for context, then
246 /// even constanst get pretty-printed; for example, the type of a null
247 /// pointer is printed symbolically.
248 /// @{
249 void printAsOperand(raw_ostream &O, bool PrintType = true,
250 const Module *M = nullptr) const;
251 void printAsOperand(raw_ostream &O, bool PrintType,
252 ModuleSlotTracker &MST) const;
253 /// @}
254
255 /// All values are typed, get the type of this value.
256 Type *getType() const { return VTy; }
257
258 /// All values hold a context through their type.
259 LLVMContext &getContext() const;
260
261 // All values can potentially be named.
262 bool hasName() const { return HasName; }
263 ValueName *getValueName() const;
264 void setValueName(ValueName *VN);
265
266private:
267 void destroyValueName();
268 enum class ReplaceMetadataUses { No, Yes };
269 void doRAUW(Value *New, ReplaceMetadataUses);
270 void setNameImpl(const Twine &Name);
271
272public:
273 /// Return a constant reference to the value's name.
274 ///
275 /// This guaranteed to return the same reference as long as the value is not
276 /// modified. If the value has a name, this does a hashtable lookup, so it's
277 /// not free.
278 StringRef getName() const;
279
280 /// Change the name of the value.
281 ///
282 /// Choose a new unique name if the provided name is taken.
283 ///
284 /// \param Name The new name; or "" if the value's name should be removed.
285 void setName(const Twine &Name);
286
287 /// Transfer the name from V to this value.
288 ///
289 /// After taking V's name, sets V's name to empty.
290 ///
291 /// \note It is an error to call V->takeName(V).
292 void takeName(Value *V);
293
294#ifndef NDEBUG
295 std::string getNameOrAsOperand() const;
296#endif
297
298 /// Change all uses of this to point to a new Value.
299 ///
300 /// Go through the uses list for this definition and make each use point to
301 /// "V" instead of "this". After this completes, 'this's use list is
302 /// guaranteed to be empty.
303 void replaceAllUsesWith(Value *V);
304
305 /// Change non-metadata uses of this to point to a new Value.
306 ///
307 /// Go through the uses list for this definition and make each use point to
308 /// "V" instead of "this". This function skips metadata entries in the list.
309 void replaceNonMetadataUsesWith(Value *V);
310
311 /// Go through the uses list for this definition and make each use point
312 /// to "V" if the callback ShouldReplace returns true for the given Use.
313 /// Unlike replaceAllUsesWith() this function does not support basic block
314 /// values.
315 void replaceUsesWithIf(Value *New,
316 llvm::function_ref<bool(Use &U)> ShouldReplace);
317
318 /// replaceUsesOutsideBlock - Go through the uses list for this definition and
319 /// make each use point to "V" instead of "this" when the use is outside the
320 /// block. 'This's use list is expected to have at least one element.
321 /// Unlike replaceAllUsesWith() this function does not support basic block
322 /// values.
323 void replaceUsesOutsideBlock(Value *V, BasicBlock *BB);
324
325 //----------------------------------------------------------------------
326 // Methods for handling the chain of uses of this Value.
327 //
328 // Materializing a function can introduce new uses, so these methods come in
329 // two variants:
330 // The methods that start with materialized_ check the uses that are
331 // currently known given which functions are materialized. Be very careful
332 // when using them since you might not get all uses.
333 // The methods that don't start with materialized_ assert that modules is
334 // fully materialized.
335 void assertModuleIsMaterializedImpl() const;
336 // This indirection exists so we can keep assertModuleIsMaterializedImpl()
337 // around in release builds of Value.cpp to be linked with other code built
338 // in debug mode. But this avoids calling it in any of the release built code.
339 void assertModuleIsMaterialized() const {
340#ifndef NDEBUG
341 assertModuleIsMaterializedImpl();
342#endif
343 }
344
345 bool use_empty() const {
346 assertModuleIsMaterialized();
347 return UseList == nullptr;
348 }
349
350 bool materialized_use_empty() const {
351 return UseList == nullptr;
352 }
353
354 using use_iterator = use_iterator_impl<Use>;
355 using const_use_iterator = use_iterator_impl<const Use>;
356
357 use_iterator materialized_use_begin() { return use_iterator(UseList); }
358 const_use_iterator materialized_use_begin() const {
359 return const_use_iterator(UseList);
360 }
361 use_iterator use_begin() {
362 assertModuleIsMaterialized();
363 return materialized_use_begin();
364 }
365 const_use_iterator use_begin() const {
366 assertModuleIsMaterialized();
367 return materialized_use_begin();
368 }
369 use_iterator use_end() { return use_iterator(); }
370 const_use_iterator use_end() const { return const_use_iterator(); }
371 iterator_range<use_iterator> materialized_uses() {
372 return make_range(materialized_use_begin(), use_end());
373 }
374 iterator_range<const_use_iterator> materialized_uses() const {
375 return make_range(materialized_use_begin(), use_end());
376 }
377 iterator_range<use_iterator> uses() {
378 assertModuleIsMaterialized();
379 return materialized_uses();
380 }
381 iterator_range<const_use_iterator> uses() const {
382 assertModuleIsMaterialized();
383 return materialized_uses();
384 }
385
386 bool user_empty() const {
387 assertModuleIsMaterialized();
388 return UseList == nullptr;
389 }
390
391 using user_iterator = user_iterator_impl<User>;
392 using const_user_iterator = user_iterator_impl<const User>;
393
394 user_iterator materialized_user_begin() { return user_iterator(UseList); }
395 const_user_iterator materialized_user_begin() const {
396 return const_user_iterator(UseList);
397 }
398 user_iterator user_begin() {
399 assertModuleIsMaterialized();
400 return materialized_user_begin();
401 }
402 const_user_iterator user_begin() const {
403 assertModuleIsMaterialized();
404 return materialized_user_begin();
405 }
406 user_iterator user_end() { return user_iterator(); }
407 const_user_iterator user_end() const { return const_user_iterator(); }
408 User *user_back() {
409 assertModuleIsMaterialized();
410 return *materialized_user_begin();
411 }
412 const User *user_back() const {
413 assertModuleIsMaterialized();
414 return *materialized_user_begin();
415 }
416 iterator_range<user_iterator> materialized_users() {
417 return make_range(materialized_user_begin(), user_end());
418 }
419 iterator_range<const_user_iterator> materialized_users() const {
420 return make_range(materialized_user_begin(), user_end());
421 }
422 iterator_range<user_iterator> users() {
423 assertModuleIsMaterialized();
424 return materialized_users();
425 }
426 iterator_range<const_user_iterator> users() const {
427 assertModuleIsMaterialized();
428 return materialized_users();
429 }
430
431 /// Return true if there is exactly one use of this value.
432 ///
433 /// This is specialized because it is a common request and does not require
434 /// traversing the whole use list.
435 bool hasOneUse() const { return hasSingleElement(uses()); }
48
Calling 'hasSingleElement<llvm::iterator_range<llvm::Value::use_iterator_impl<const llvm::Use>>>'
51
Returning from 'hasSingleElement<llvm::iterator_range<llvm::Value::use_iterator_impl<const llvm::Use>>>'
52
Returning value, which participates in a condition later
436
437 /// Return true if this Value has exactly N uses.
438 bool hasNUses(unsigned N) const;
439
440 /// Return true if this value has N uses or more.
441 ///
442 /// This is logically equivalent to getNumUses() >= N.
443 bool hasNUsesOrMore(unsigned N) const;
444
445 /// Return true if there is exactly one user of this value.
446 ///
447 /// Note that this is not the same as "has one use". If a value has one use,
448 /// then there certainly is a single user. But if value has several uses,
449 /// it is possible that all uses are in a single user, or not.
450 ///
451 /// This check is potentially costly, since it requires traversing,
452 /// in the worst case, the whole use list of a value.
453 bool hasOneUser() const;
454
455 /// Return true if there is exactly one use of this value that cannot be
456 /// dropped.
457 ///
458 /// This is specialized because it is a common request and does not require
459 /// traversing the whole use list.
460 Use *getSingleUndroppableUse();
461 const Use *getSingleUndroppableUse() const {
462 return const_cast<Value *>(this)->getSingleUndroppableUse();
463 }
464
465 /// Return true if there this value.
466 ///
467 /// This is specialized because it is a common request and does not require
468 /// traversing the whole use list.
469 bool hasNUndroppableUses(unsigned N) const;
470
471 /// Return true if this value has N uses or more.
472 ///
473 /// This is logically equivalent to getNumUses() >= N.
474 bool hasNUndroppableUsesOrMore(unsigned N) const;
475
476 /// Remove every uses that can safely be removed.
477 ///
478 /// This will remove for example uses in llvm.assume.
479 /// This should be used when performing want to perform a tranformation but
480 /// some Droppable uses pervent it.
481 /// This function optionally takes a filter to only remove some droppable
482 /// uses.
483 void dropDroppableUses(llvm::function_ref<bool(const Use *)> ShouldDrop =
484 [](const Use *) { return true; });
485
486 /// Remove every use of this value in \p User that can safely be removed.
487 void dropDroppableUsesIn(User &Usr);
488
489 /// Remove the droppable use \p U.
490 static void dropDroppableUse(Use &U);
491
492 /// Check if this value is used in the specified basic block.
493 bool isUsedInBasicBlock(const BasicBlock *BB) const;
494
495 /// This method computes the number of uses of this Value.
496 ///
497 /// This is a linear time operation. Use hasOneUse, hasNUses, or
498 /// hasNUsesOrMore to check for specific values.
499 unsigned getNumUses() const;
500
501 /// This method should only be used by the Use class.
502 void addUse(Use &U) { U.addToList(&UseList); }
503
504 /// Concrete subclass of this.
505 ///
506 /// An enumeration for keeping track of the concrete subclass of Value that
507 /// is actually instantiated. Values of this enumeration are kept in the
508 /// Value classes SubclassID field. They are used for concrete type
509 /// identification.
510 enum ValueTy {
511#define HANDLE_VALUE(Name) Name##Val,
512#include "llvm/IR/Value.def"
513
514 // Markers:
515#define HANDLE_CONSTANT_MARKER(Marker, Constant) Marker = Constant##Val,
516#include "llvm/IR/Value.def"
517 };
518
519 /// Return an ID for the concrete type of this object.
520 ///
521 /// This is used to implement the classof checks. This should not be used
522 /// for any other purpose, as the values may change as LLVM evolves. Also,
523 /// note that for instructions, the Instruction's opcode is added to
524 /// InstructionVal. So this means three things:
525 /// # there is no value with code InstructionVal (no opcode==0).
526 /// # there are more possible values for the value type than in ValueTy enum.
527 /// # the InstructionVal enumerator must be the highest valued enumerator in
528 /// the ValueTy enum.
529 unsigned getValueID() const {
530 return SubclassID;
531 }
532
533 /// Return the raw optional flags value contained in this value.
534 ///
535 /// This should only be used when testing two Values for equivalence.
536 unsigned getRawSubclassOptionalData() const {
537 return SubclassOptionalData;
538 }
539
540 /// Clear the optional flags contained in this value.
541 void clearSubclassOptionalData() {
542 SubclassOptionalData = 0;
543 }
544
545 /// Check the optional flags for equality.
546 bool hasSameSubclassOptionalData(const Value *V) const {
547 return SubclassOptionalData == V->SubclassOptionalData;
548 }
549
550 /// Return true if there is a value handle associated with this value.
551 bool hasValueHandle() const { return HasValueHandle; }
552
553 /// Return true if there is metadata referencing this value.
554 bool isUsedByMetadata() const { return IsUsedByMD; }
555
556 // Return true if this value is only transitively referenced by metadata.
557 bool isTransitiveUsedByMetadataOnly() const;
558
559protected:
560 /// Get the current metadata attachments for the given kind, if any.
561 ///
562 /// These functions require that the value have at most a single attachment
563 /// of the given kind, and return \c nullptr if such an attachment is missing.
564 /// @{
565 MDNode *getMetadata(unsigned KindID) const;
566 MDNode *getMetadata(StringRef Kind) const;
567 /// @}
568
569 /// Appends all attachments with the given ID to \c MDs in insertion order.
570 /// If the Value has no attachments with the given ID, or if ID is invalid,
571 /// leaves MDs unchanged.
572 /// @{
573 void getMetadata(unsigned KindID, SmallVectorImpl<MDNode *> &MDs) const;
574 void getMetadata(StringRef Kind, SmallVectorImpl<MDNode *> &MDs) const;
575 /// @}
576
577 /// Appends all metadata attached to this value to \c MDs, sorting by
578 /// KindID. The first element of each pair returned is the KindID, the second
579 /// element is the metadata value. Attachments with the same ID appear in
580 /// insertion order.
581 void
582 getAllMetadata(SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs) const;
583
584 /// Return true if this value has any metadata attached to it.
585 bool hasMetadata() const { return (bool)HasMetadata; }
586
587 /// Return true if this value has the given type of metadata attached.
588 /// @{
589 bool hasMetadata(unsigned KindID) const {
590 return getMetadata(KindID) != nullptr;
591 }
592 bool hasMetadata(StringRef Kind) const {
593 return getMetadata(Kind) != nullptr;
594 }
595 /// @}
596
597 /// Set a particular kind of metadata attachment.
598 ///
599 /// Sets the given attachment to \c MD, erasing it if \c MD is \c nullptr or
600 /// replacing it if it already exists.
601 /// @{
602 void setMetadata(unsigned KindID, MDNode *Node);
603 void setMetadata(StringRef Kind, MDNode *Node);
604 /// @}
605
606 /// Add a metadata attachment.
607 /// @{
608 void addMetadata(unsigned KindID, MDNode &MD);
609 void addMetadata(StringRef Kind, MDNode &MD);
610 /// @}
611
612 /// Erase all metadata attachments with the given kind.
613 ///
614 /// \returns true if any metadata was removed.
615 bool eraseMetadata(unsigned KindID);
616
617 /// Erase all metadata attached to this Value.
618 void clearMetadata();
619
620public:
621 /// Return true if this value is a swifterror value.
622 ///
623 /// swifterror values can be either a function argument or an alloca with a
624 /// swifterror attribute.
625 bool isSwiftError() const;
626
627 /// Strip off pointer casts, all-zero GEPs and address space casts.
628 ///
629 /// Returns the original uncasted value. If this is called on a non-pointer
630 /// value, it returns 'this'.
631 const Value *stripPointerCasts() const;
632 Value *stripPointerCasts() {
633 return const_cast<Value *>(
634 static_cast<const Value *>(this)->stripPointerCasts());
635 }
636
637 /// Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
638 ///
639 /// Returns the original uncasted value. If this is called on a non-pointer
640 /// value, it returns 'this'.
641 const Value *stripPointerCastsAndAliases() const;
642 Value *stripPointerCastsAndAliases() {
643 return const_cast<Value *>(
644 static_cast<const Value *>(this)->stripPointerCastsAndAliases());
645 }
646
647 /// Strip off pointer casts, all-zero GEPs and address space casts
648 /// but ensures the representation of the result stays the same.
649 ///
650 /// Returns the original uncasted value with the same representation. If this
651 /// is called on a non-pointer value, it returns 'this'.
652 const Value *stripPointerCastsSameRepresentation() const;
653 Value *stripPointerCastsSameRepresentation() {
654 return const_cast<Value *>(static_cast<const Value *>(this)
655 ->stripPointerCastsSameRepresentation());
656 }
657
658 /// Strip off pointer casts, all-zero GEPs, single-argument phi nodes and
659 /// invariant group info.
660 ///
661 /// Returns the original uncasted value. If this is called on a non-pointer
662 /// value, it returns 'this'. This function should be used only in
663 /// Alias analysis.
664 const Value *stripPointerCastsForAliasAnalysis() const;
665 Value *stripPointerCastsForAliasAnalysis() {
666 return const_cast<Value *>(static_cast<const Value *>(this)
667 ->stripPointerCastsForAliasAnalysis());
668 }
669
670 /// Strip off pointer casts and all-constant inbounds GEPs.
671 ///
672 /// Returns the original pointer value. If this is called on a non-pointer
673 /// value, it returns 'this'.
674 const Value *stripInBoundsConstantOffsets() const;
675 Value *stripInBoundsConstantOffsets() {
676 return const_cast<Value *>(
677 static_cast<const Value *>(this)->stripInBoundsConstantOffsets());
678 }
679
680 /// Accumulate the constant offset this value has compared to a base pointer.
681 /// Only 'getelementptr' instructions (GEPs) are accumulated but other
682 /// instructions, e.g., casts, are stripped away as well.
683 /// The accumulated constant offset is added to \p Offset and the base
684 /// pointer is returned.
685 ///
686 /// The APInt \p Offset has to have a bit-width equal to the IntPtr type for
687 /// the address space of 'this' pointer value, e.g., use
688 /// DataLayout::getIndexTypeSizeInBits(Ty).
689 ///
690 /// If \p AllowNonInbounds is true, offsets in GEPs are stripped and
691 /// accumulated even if the GEP is not "inbounds".
692 ///
693 /// If \p ExternalAnalysis is provided it will be used to calculate a offset
694 /// when a operand of GEP is not constant.
695 /// For example, for a value \p ExternalAnalysis might try to calculate a
696 /// lower bound. If \p ExternalAnalysis is successful, it should return true.
697 ///
698 /// If this is called on a non-pointer value, it returns 'this' and the
699 /// \p Offset is not modified.
700 ///
701 /// Note that this function will never return a nullptr. It will also never
702 /// manipulate the \p Offset in a way that would not match the difference
703 /// between the underlying value and the returned one. Thus, if no constant
704 /// offset was found, the returned value is the underlying one and \p Offset
705 /// is unchanged.
706 const Value *stripAndAccumulateConstantOffsets(
707 const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
708 function_ref<bool(Value &Value, APInt &Offset)> ExternalAnalysis =
709 nullptr) const;
710 Value *stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset,
711 bool AllowNonInbounds) {
712 return const_cast<Value *>(
713 static_cast<const Value *>(this)->stripAndAccumulateConstantOffsets(
714 DL, Offset, AllowNonInbounds));
715 }
716
717 /// This is a wrapper around stripAndAccumulateConstantOffsets with the
718 /// in-bounds requirement set to false.
719 const Value *stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL,
720 APInt &Offset) const {
721 return stripAndAccumulateConstantOffsets(DL, Offset,
722 /* AllowNonInbounds */ false);
723 }
724 Value *stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL,
725 APInt &Offset) {
726 return stripAndAccumulateConstantOffsets(DL, Offset,
727 /* AllowNonInbounds */ false);
728 }
729
730 /// Strip off pointer casts and inbounds GEPs.
731 ///
732 /// Returns the original pointer value. If this is called on a non-pointer
733 /// value, it returns 'this'.
734 const Value *stripInBoundsOffsets(function_ref<void(const Value *)> Func =
735 [](const Value *) {}) const;
736 inline Value *stripInBoundsOffsets(function_ref<void(const Value *)> Func =
737 [](const Value *) {}) {
738 return const_cast<Value *>(
739 static_cast<const Value *>(this)->stripInBoundsOffsets(Func));
740 }
741
742 /// Return true if the memory object referred to by V can by freed in the
743 /// scope for which the SSA value defining the allocation is statically
744 /// defined. E.g. deallocation after the static scope of a value does not
745 /// count, but a deallocation before that does.
746 bool canBeFreed() const;
747
748 /// Returns the number of bytes known to be dereferenceable for the
749 /// pointer value.
750 ///
751 /// If CanBeNull is set by this function the pointer can either be null or be
752 /// dereferenceable up to the returned number of bytes.
753 ///
754 /// IF CanBeFreed is true, the pointer is known to be dereferenceable at
755 /// point of definition only. Caller must prove that allocation is not
756 /// deallocated between point of definition and use.
757 uint64_t getPointerDereferenceableBytes(const DataLayout &DL,
758 bool &CanBeNull,
759 bool &CanBeFreed) const;
760
761 /// Returns an alignment of the pointer value.
762 ///
763 /// Returns an alignment which is either specified explicitly, e.g. via
764 /// align attribute of a function argument, or guaranteed by DataLayout.
765 Align getPointerAlignment(const DataLayout &DL) const;
766
767 /// Translate PHI node to its predecessor from the given basic block.
768 ///
769 /// If this value is a PHI node with CurBB as its parent, return the value in
770 /// the PHI node corresponding to PredBB. If not, return ourself. This is
771 /// useful if you want to know the value something has in a predecessor
772 /// block.
773 const Value *DoPHITranslation(const BasicBlock *CurBB,
774 const BasicBlock *PredBB) const;
775 Value *DoPHITranslation(const BasicBlock *CurBB, const BasicBlock *PredBB) {
776 return const_cast<Value *>(
777 static_cast<const Value *>(this)->DoPHITranslation(CurBB, PredBB));
778 }
779
780 /// The maximum alignment for instructions.
781 ///
782 /// This is the greatest alignment value supported by load, store, and alloca
783 /// instructions, and global values.
784 static const unsigned MaxAlignmentExponent = 29;
785 static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent;
786
787 /// Mutate the type of this Value to be of the specified type.
788 ///
789 /// Note that this is an extremely dangerous operation which can create
790 /// completely invalid IR very easily. It is strongly recommended that you
791 /// recreate IR objects with the right types instead of mutating them in
792 /// place.
793 void mutateType(Type *Ty) {
794 VTy = Ty;
795 }
796
797 /// Sort the use-list.
798 ///
799 /// Sorts the Value's use-list by Cmp using a stable mergesort. Cmp is
800 /// expected to compare two \a Use references.
801 template <class Compare> void sortUseList(Compare Cmp);
802
803 /// Reverse the use-list.
804 void reverseUseList();
805
806private:
807 /// Merge two lists together.
808 ///
809 /// Merges \c L and \c R using \c Cmp. To enable stable sorts, always pushes
810 /// "equal" items from L before items from R.
811 ///
812 /// \return the first element in the list.
813 ///
814 /// \note Completely ignores \a Use::Prev (doesn't read, doesn't update).
815 template <class Compare>
816 static Use *mergeUseLists(Use *L, Use *R, Compare Cmp) {
817 Use *Merged;
818 Use **Next = &Merged;
819
820 while (true) {
821 if (!L) {
822 *Next = R;
823 break;
824 }
825 if (!R) {
826 *Next = L;
827 break;
828 }
829 if (Cmp(*R, *L)) {
830 *Next = R;
831 Next = &R->Next;
832 R = R->Next;
833 } else {
834 *Next = L;
835 Next = &L->Next;
836 L = L->Next;
837 }
838 }
839
840 return Merged;
841 }
842
843protected:
844 unsigned short getSubclassDataFromValue() const { return SubclassData; }
845 void setValueSubclassData(unsigned short D) { SubclassData = D; }
846};
847
848struct ValueDeleter { void operator()(Value *V) { V->deleteValue(); } };
849
850/// Use this instead of std::unique_ptr<Value> or std::unique_ptr<Instruction>.
851/// Those don't work because Value and Instruction's destructors are protected,
852/// aren't virtual, and won't destroy the complete object.
853using unique_value = std::unique_ptr<Value, ValueDeleter>;
854
855inline raw_ostream &operator<<(raw_ostream &OS, const Value &V) {
856 V.print(OS);
857 return OS;
858}
859
860void Use::set(Value *V) {
861 if (Val) removeFromList();
862 Val = V;
863 if (V) V->addUse(*this);
864}
865
866Value *Use::operator=(Value *RHS) {
867 set(RHS);
868 return RHS;
869}
870
871const Use &Use::operator=(const Use &RHS) {
872 set(RHS.Val);
873 return *this;
874}
875
876template <class Compare> void Value::sortUseList(Compare Cmp) {
877 if (!UseList || !UseList->Next)
878 // No need to sort 0 or 1 uses.
879 return;
880
881 // Note: this function completely ignores Prev pointers until the end when
882 // they're fixed en masse.
883
884 // Create a binomial vector of sorted lists, visiting uses one at a time and
885 // merging lists as necessary.
886 const unsigned MaxSlots = 32;
887 Use *Slots[MaxSlots];
888
889 // Collect the first use, turning it into a single-item list.
890 Use *Next = UseList->Next;
891 UseList->Next = nullptr;
892 unsigned NumSlots = 1;
893 Slots[0] = UseList;
894
895 // Collect all but the last use.
896 while (Next->Next) {
897 Use *Current = Next;
898 Next = Current->Next;
899
900 // Turn Current into a single-item list.
901 Current->Next = nullptr;
902
903 // Save Current in the first available slot, merging on collisions.
904 unsigned I;
905 for (I = 0; I < NumSlots; ++I) {
906 if (!Slots[I])
907 break;
908
909 // Merge two lists, doubling the size of Current and emptying slot I.
910 //
911 // Since the uses in Slots[I] originally preceded those in Current, send
912 // Slots[I] in as the left parameter to maintain a stable sort.
913 Current = mergeUseLists(Slots[I], Current, Cmp);
914 Slots[I] = nullptr;
915 }
916 // Check if this is a new slot.
917 if (I == NumSlots) {
918 ++NumSlots;
919 assert(NumSlots <= MaxSlots && "Use list bigger than 2^32")(static_cast <bool> (NumSlots <= MaxSlots &&
"Use list bigger than 2^32") ? void (0) : __assert_fail ("NumSlots <= MaxSlots && \"Use list bigger than 2^32\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Value.h"
, 919, __extension__ __PRETTY_FUNCTION__))
;
920 }
921
922 // Found an open slot.
923 Slots[I] = Current;
924 }
925
926 // Merge all the lists together.
927 assert(Next && "Expected one more Use")(static_cast <bool> (Next && "Expected one more Use"
) ? void (0) : __assert_fail ("Next && \"Expected one more Use\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Value.h"
, 927, __extension__ __PRETTY_FUNCTION__))
;
928 assert(!Next->Next && "Expected only one Use")(static_cast <bool> (!Next->Next && "Expected only one Use"
) ? void (0) : __assert_fail ("!Next->Next && \"Expected only one Use\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Value.h"
, 928, __extension__ __PRETTY_FUNCTION__))
;
929 UseList = Next;
930 for (unsigned I = 0; I < NumSlots; ++I)
931 if (Slots[I])
932 // Since the uses in Slots[I] originally preceded those in UseList, send
933 // Slots[I] in as the left parameter to maintain a stable sort.
934 UseList = mergeUseLists(Slots[I], UseList, Cmp);
935
936 // Fix the Prev pointers.
937 for (Use *I = UseList, **Prev = &UseList; I; I = I->Next) {
938 I->Prev = Prev;
939 Prev = &I->Next;
940 }
941}
942
943// isa - Provide some specializations of isa so that we don't have to include
944// the subtype header files to test to see if the value is a subclass...
945//
946template <> struct isa_impl<Constant, Value> {
947 static inline bool doit(const Value &Val) {
948 static_assert(Value::ConstantFirstVal == 0, "Val.getValueID() >= Value::ConstantFirstVal");
949 return Val.getValueID() <= Value::ConstantLastVal;
950 }
951};
952
953template <> struct isa_impl<ConstantData, Value> {
954 static inline bool doit(const Value &Val) {
955 return Val.getValueID() >= Value::ConstantDataFirstVal &&
956 Val.getValueID() <= Value::ConstantDataLastVal;
957 }
958};
959
960template <> struct isa_impl<ConstantAggregate, Value> {
961 static inline bool doit(const Value &Val) {
962 return Val.getValueID() >= Value::ConstantAggregateFirstVal &&
963 Val.getValueID() <= Value::ConstantAggregateLastVal;
964 }
965};
966
967template <> struct isa_impl<Argument, Value> {
968 static inline bool doit (const Value &Val) {
969 return Val.getValueID() == Value::ArgumentVal;
970 }
971};
972
973template <> struct isa_impl<InlineAsm, Value> {
974 static inline bool doit(const Value &Val) {
975 return Val.getValueID() == Value::InlineAsmVal;
976 }
977};
978
979template <> struct isa_impl<Instruction, Value> {
980 static inline bool doit(const Value &Val) {
981 return Val.getValueID() >= Value::InstructionVal;
982 }
983};
984
985template <> struct isa_impl<BasicBlock, Value> {
986 static inline bool doit(const Value &Val) {
987 return Val.getValueID() == Value::BasicBlockVal;
988 }
989};
990
991template <> struct isa_impl<Function, Value> {
992 static inline bool doit(const Value &Val) {
993 return Val.getValueID() == Value::FunctionVal;
994 }
995};
996
997template <> struct isa_impl<GlobalVariable, Value> {
998 static inline bool doit(const Value &Val) {
999 return Val.getValueID() == Value::GlobalVariableVal;
1000 }
1001};
1002
1003template <> struct isa_impl<GlobalAlias, Value> {
1004 static inline bool doit(const Value &Val) {
1005 return Val.getValueID() == Value::GlobalAliasVal;
1006 }
1007};
1008
1009template <> struct isa_impl<GlobalIFunc, Value> {
1010 static inline bool doit(const Value &Val) {
1011 return Val.getValueID() == Value::GlobalIFuncVal;
1012 }
1013};
1014
1015template <> struct isa_impl<GlobalIndirectSymbol, Value> {
1016 static inline bool doit(const Value &Val) {
1017 return isa<GlobalAlias>(Val) || isa<GlobalIFunc>(Val);
1018 }
1019};
1020
1021template <> struct isa_impl<GlobalValue, Value> {
1022 static inline bool doit(const Value &Val) {
1023 return isa<GlobalObject>(Val) || isa<GlobalIndirectSymbol>(Val);
1024 }
1025};
1026
1027template <> struct isa_impl<GlobalObject, Value> {
1028 static inline bool doit(const Value &Val) {
1029 return isa<GlobalVariable>(Val) || isa<Function>(Val);
1030 }
1031};
1032
1033// Create wrappers for C Binding types (see CBindingWrapping.h).
1034DEFINE_ISA_CONVERSION_FUNCTIONS(Value, LLVMValueRef)inline Value *unwrap(LLVMValueRef P) { return reinterpret_cast
<Value*>(P); } inline LLVMValueRef wrap(const Value *P)
{ return reinterpret_cast<LLVMValueRef>(const_cast<
Value*>(P)); } template<typename T> inline T *unwrap
(LLVMValueRef P) { return cast<T>(unwrap(P)); }
1035
1036// Specialized opaque value conversions.
1037inline Value **unwrap(LLVMValueRef *Vals) {
1038 return reinterpret_cast<Value**>(Vals);
1039}
1040
1041template<typename T>
1042inline T **unwrap(LLVMValueRef *Vals, unsigned Length) {
1043#ifndef NDEBUG
1044 for (LLVMValueRef *I = Vals, *E = Vals + Length; I != E; ++I)
1045 unwrap<T>(*I); // For side effect of calling assert on invalid usage.
1046#endif
1047 (void)Length;
1048 return reinterpret_cast<T**>(Vals);
1049}
1050
1051inline LLVMValueRef *wrap(const Value **Vals) {
1052 return reinterpret_cast<LLVMValueRef*>(const_cast<Value**>(Vals));
1053}
1054
1055} // end namespace llvm
1056
1057#endif // LLVM_IR_VALUE_H

/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/STLExtras.h

1//===- llvm/ADT/STLExtras.h - Useful STL related functions ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains some templates that are useful if you are working with the
10// STL at all.
11//
12// No library is required when using these functions.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_ADT_STLEXTRAS_H
17#define LLVM_ADT_STLEXTRAS_H
18
19#include "llvm/ADT/Optional.h"
20#include "llvm/ADT/STLForwardCompat.h"
21#include "llvm/ADT/iterator.h"
22#include "llvm/ADT/iterator_range.h"
23#include "llvm/Config/abi-breaking.h"
24#include "llvm/Support/ErrorHandling.h"
25#include <algorithm>
26#include <cassert>
27#include <cstddef>
28#include <cstdint>
29#include <cstdlib>
30#include <functional>
31#include <initializer_list>
32#include <iterator>
33#include <limits>
34#include <memory>
35#include <tuple>
36#include <type_traits>
37#include <utility>
38
39#ifdef EXPENSIVE_CHECKS
40#include <random> // for std::mt19937
41#endif
42
43namespace llvm {
44
45// Only used by compiler if both template types are the same. Useful when
46// using SFINAE to test for the existence of member functions.
47template <typename T, T> struct SameType;
48
49namespace detail {
50
51template <typename RangeT>
52using IterOfRange = decltype(std::begin(std::declval<RangeT &>()));
53
54template <typename RangeT>
55using ValueOfRange = typename std::remove_reference<decltype(
56 *std::begin(std::declval<RangeT &>()))>::type;
57
58} // end namespace detail
59
60//===----------------------------------------------------------------------===//
61// Extra additions to <type_traits>
62//===----------------------------------------------------------------------===//
63
64template <typename T> struct make_const_ptr {
65 using type =
66 typename std::add_pointer<typename std::add_const<T>::type>::type;
67};
68
69template <typename T> struct make_const_ref {
70 using type = typename std::add_lvalue_reference<
71 typename std::add_const<T>::type>::type;
72};
73
74namespace detail {
75template <typename...> using void_t = void;
76template <class, template <class...> class Op, class... Args> struct detector {
77 using value_t = std::false_type;
78};
79template <template <class...> class Op, class... Args>
80struct detector<void_t<Op<Args...>>, Op, Args...> {
81 using value_t = std::true_type;
82};
83} // end namespace detail
84
85/// Detects if a given trait holds for some set of arguments 'Args'.
86/// For example, the given trait could be used to detect if a given type
87/// has a copy assignment operator:
88/// template<class T>
89/// using has_copy_assign_t = decltype(std::declval<T&>()
90/// = std::declval<const T&>());
91/// bool fooHasCopyAssign = is_detected<has_copy_assign_t, FooClass>::value;
92template <template <class...> class Op, class... Args>
93using is_detected = typename detail::detector<void, Op, Args...>::value_t;
94
95namespace detail {
96template <typename Callable, typename... Args>
97using is_invocable =
98 decltype(std::declval<Callable &>()(std::declval<Args>()...));
99} // namespace detail
100
101/// Check if a Callable type can be invoked with the given set of arg types.
102template <typename Callable, typename... Args>
103using is_invocable = is_detected<detail::is_invocable, Callable, Args...>;
104
105/// This class provides various trait information about a callable object.
106/// * To access the number of arguments: Traits::num_args
107/// * To access the type of an argument: Traits::arg_t<Index>
108/// * To access the type of the result: Traits::result_t
109template <typename T, bool isClass = std::is_class<T>::value>
110struct function_traits : public function_traits<decltype(&T::operator())> {};
111
112/// Overload for class function types.
113template <typename ClassType, typename ReturnType, typename... Args>
114struct function_traits<ReturnType (ClassType::*)(Args...) const, false> {
115 /// The number of arguments to this function.
116 enum { num_args = sizeof...(Args) };
117
118 /// The result type of this function.
119 using result_t = ReturnType;
120
121 /// The type of an argument to this function.
122 template <size_t Index>
123 using arg_t = typename std::tuple_element<Index, std::tuple<Args...>>::type;
124};
125/// Overload for class function types.
126template <typename ClassType, typename ReturnType, typename... Args>
127struct function_traits<ReturnType (ClassType::*)(Args...), false>
128 : function_traits<ReturnType (ClassType::*)(Args...) const> {};
129/// Overload for non-class function types.
130template <typename ReturnType, typename... Args>
131struct function_traits<ReturnType (*)(Args...), false> {
132 /// The number of arguments to this function.
133 enum { num_args = sizeof...(Args) };
134
135 /// The result type of this function.
136 using result_t = ReturnType;
137
138 /// The type of an argument to this function.
139 template <size_t i>
140 using arg_t = typename std::tuple_element<i, std::tuple<Args...>>::type;
141};
142/// Overload for non-class function type references.
143template <typename ReturnType, typename... Args>
144struct function_traits<ReturnType (&)(Args...), false>
145 : public function_traits<ReturnType (*)(Args...)> {};
146
147//===----------------------------------------------------------------------===//
148// Extra additions to <functional>
149//===----------------------------------------------------------------------===//
150
151template <class Ty> struct identity {
152 using argument_type = Ty;
153
154 Ty &operator()(Ty &self) const {
155 return self;
156 }
157 const Ty &operator()(const Ty &self) const {
158 return self;
159 }
160};
161
162/// An efficient, type-erasing, non-owning reference to a callable. This is
163/// intended for use as the type of a function parameter that is not used
164/// after the function in question returns.
165///
166/// This class does not own the callable, so it is not in general safe to store
167/// a function_ref.
168template<typename Fn> class function_ref;
169
170template<typename Ret, typename ...Params>
171class function_ref<Ret(Params...)> {
172 Ret (*callback)(intptr_t callable, Params ...params) = nullptr;
173 intptr_t callable;
174
175 template<typename Callable>
176 static Ret callback_fn(intptr_t callable, Params ...params) {
177 return (*reinterpret_cast<Callable*>(callable))(
178 std::forward<Params>(params)...);
179 }
180
181public:
182 function_ref() = default;
183 function_ref(std::nullptr_t) {}
184
185 template <typename Callable>
186 function_ref(
187 Callable &&callable,
188 // This is not the copy-constructor.
189 std::enable_if_t<!std::is_same<remove_cvref_t<Callable>,
190 function_ref>::value> * = nullptr,
191 // Functor must be callable and return a suitable type.
192 std::enable_if_t<std::is_void<Ret>::value ||
193 std::is_convertible<decltype(std::declval<Callable>()(
194 std::declval<Params>()...)),
195 Ret>::value> * = nullptr)
196 : callback(callback_fn<typename std::remove_reference<Callable>::type>),
197 callable(reinterpret_cast<intptr_t>(&callable)) {}
198
199 Ret operator()(Params ...params) const {
200 return callback(callable, std::forward<Params>(params)...);
201 }
202
203 explicit operator bool() const { return callback; }
204};
205
206//===----------------------------------------------------------------------===//
207// Extra additions to <iterator>
208//===----------------------------------------------------------------------===//
209
210namespace adl_detail {
211
212using std::begin;
213
214template <typename ContainerTy>
215decltype(auto) adl_begin(ContainerTy &&container) {
216 return begin(std::forward<ContainerTy>(container));
217}
218
219using std::end;
220
221template <typename ContainerTy>
222decltype(auto) adl_end(ContainerTy &&container) {
223 return end(std::forward<ContainerTy>(container));
224}
225
226using std::swap;
227
228template <typename T>
229void adl_swap(T &&lhs, T &&rhs) noexcept(noexcept(swap(std::declval<T>(),
230 std::declval<T>()))) {
231 swap(std::forward<T>(lhs), std::forward<T>(rhs));
232}
233
234} // end namespace adl_detail
235
236template <typename ContainerTy>
237decltype(auto) adl_begin(ContainerTy &&container) {
238 return adl_detail::adl_begin(std::forward<ContainerTy>(container));
239}
240
241template <typename ContainerTy>
242decltype(auto) adl_end(ContainerTy &&container) {
243 return adl_detail::adl_end(std::forward<ContainerTy>(container));
244}
245
246template <typename T>
247void adl_swap(T &&lhs, T &&rhs) noexcept(
248 noexcept(adl_detail::adl_swap(std::declval<T>(), std::declval<T>()))) {
249 adl_detail::adl_swap(std::forward<T>(lhs), std::forward<T>(rhs));
250}
251
252/// Test whether \p RangeOrContainer is empty. Similar to C++17 std::empty.
253template <typename T>
254constexpr bool empty(const T &RangeOrContainer) {
255 return adl_begin(RangeOrContainer) == adl_end(RangeOrContainer);
256}
257
258/// Returns true if the given container only contains a single element.
259template <typename ContainerTy> bool hasSingleElement(ContainerTy &&C) {
260 auto B = std::begin(C), E = std::end(C);
261 return B != E && std::next(B) == E;
49
Assuming the condition is true
50
Returning value, which participates in a condition later
262}
263
264/// Return a range covering \p RangeOrContainer with the first N elements
265/// excluded.
266template <typename T> auto drop_begin(T &&RangeOrContainer, size_t N = 1) {
267 return make_range(std::next(adl_begin(RangeOrContainer), N),
268 adl_end(RangeOrContainer));
269}
270
271// mapped_iterator - This is a simple iterator adapter that causes a function to
272// be applied whenever operator* is invoked on the iterator.
273
274template <typename ItTy, typename FuncTy,
275 typename FuncReturnTy =
276 decltype(std::declval<FuncTy>()(*std::declval<ItTy>()))>
277class mapped_iterator
278 : public iterator_adaptor_base<
279 mapped_iterator<ItTy, FuncTy>, ItTy,
280 typename std::iterator_traits<ItTy>::iterator_category,
281 typename std::remove_reference<FuncReturnTy>::type> {
282public:
283 mapped_iterator(ItTy U, FuncTy F)
284 : mapped_iterator::iterator_adaptor_base(std::move(U)), F(std::move(F)) {}
285
286 ItTy getCurrent() { return this->I; }
287
288 FuncReturnTy operator*() const { return F(*this->I); }
289
290private:
291 FuncTy F;
292};
293
294// map_iterator - Provide a convenient way to create mapped_iterators, just like
295// make_pair is useful for creating pairs...
296template <class ItTy, class FuncTy>
297inline mapped_iterator<ItTy, FuncTy> map_iterator(ItTy I, FuncTy F) {
298 return mapped_iterator<ItTy, FuncTy>(std::move(I), std::move(F));
299}
300
301template <class ContainerTy, class FuncTy>
302auto map_range(ContainerTy &&C, FuncTy F) {
303 return make_range(map_iterator(C.begin(), F), map_iterator(C.end(), F));
304}
305
306/// Helper to determine if type T has a member called rbegin().
307template <typename Ty> class has_rbegin_impl {
308 using yes = char[1];
309 using no = char[2];
310
311 template <typename Inner>
312 static yes& test(Inner *I, decltype(I->rbegin()) * = nullptr);
313
314 template <typename>
315 static no& test(...);
316
317public:
318 static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes);
319};
320
321/// Metafunction to determine if T& or T has a member called rbegin().
322template <typename Ty>
323struct has_rbegin : has_rbegin_impl<typename std::remove_reference<Ty>::type> {
324};
325
326// Returns an iterator_range over the given container which iterates in reverse.
327// Note that the container must have rbegin()/rend() methods for this to work.
328template <typename ContainerTy>
329auto reverse(ContainerTy &&C,
330 std::enable_if_t<has_rbegin<ContainerTy>::value> * = nullptr) {
331 return make_range(C.rbegin(), C.rend());
332}
333
334// Returns a std::reverse_iterator wrapped around the given iterator.
335template <typename IteratorTy>
336std::reverse_iterator<IteratorTy> make_reverse_iterator(IteratorTy It) {
337 return std::reverse_iterator<IteratorTy>(It);
338}
339
340// Returns an iterator_range over the given container which iterates in reverse.
341// Note that the container must have begin()/end() methods which return
342// bidirectional iterators for this to work.
343template <typename ContainerTy>
344auto reverse(ContainerTy &&C,
345 std::enable_if_t<!has_rbegin<ContainerTy>::value> * = nullptr) {
346 return make_range(llvm::make_reverse_iterator(std::end(C)),
347 llvm::make_reverse_iterator(std::begin(C)));
348}
349
350/// An iterator adaptor that filters the elements of given inner iterators.
351///
352/// The predicate parameter should be a callable object that accepts the wrapped
353/// iterator's reference type and returns a bool. When incrementing or
354/// decrementing the iterator, it will call the predicate on each element and
355/// skip any where it returns false.
356///
357/// \code
358/// int A[] = { 1, 2, 3, 4 };
359/// auto R = make_filter_range(A, [](int N) { return N % 2 == 1; });
360/// // R contains { 1, 3 }.
361/// \endcode
362///
363/// Note: filter_iterator_base implements support for forward iteration.
364/// filter_iterator_impl exists to provide support for bidirectional iteration,
365/// conditional on whether the wrapped iterator supports it.
366template <typename WrappedIteratorT, typename PredicateT, typename IterTag>
367class filter_iterator_base
368 : public iterator_adaptor_base<
369 filter_iterator_base<WrappedIteratorT, PredicateT, IterTag>,
370 WrappedIteratorT,
371 typename std::common_type<
372 IterTag, typename std::iterator_traits<
373 WrappedIteratorT>::iterator_category>::type> {
374 using BaseT = iterator_adaptor_base<
375 filter_iterator_base<WrappedIteratorT, PredicateT, IterTag>,
376 WrappedIteratorT,
377 typename std::common_type<
378 IterTag, typename std::iterator_traits<
379 WrappedIteratorT>::iterator_category>::type>;
380
381protected:
382 WrappedIteratorT End;
383 PredicateT Pred;
384
385 void findNextValid() {
386 while (this->I != End && !Pred(*this->I))
387 BaseT::operator++();
388 }
389
390 // Construct the iterator. The begin iterator needs to know where the end
391 // is, so that it can properly stop when it gets there. The end iterator only
392 // needs the predicate to support bidirectional iteration.
393 filter_iterator_base(WrappedIteratorT Begin, WrappedIteratorT End,
394 PredicateT Pred)
395 : BaseT(Begin), End(End), Pred(Pred) {
396 findNextValid();
397 }
398
399public:
400 using BaseT::operator++;
401
402 filter_iterator_base &operator++() {
403 BaseT::operator++();
404 findNextValid();
405 return *this;
406 }
407};
408
409/// Specialization of filter_iterator_base for forward iteration only.
410template <typename WrappedIteratorT, typename PredicateT,
411 typename IterTag = std::forward_iterator_tag>
412class filter_iterator_impl
413 : public filter_iterator_base<WrappedIteratorT, PredicateT, IterTag> {
414 using BaseT = filter_iterator_base<WrappedIteratorT, PredicateT, IterTag>;
415
416public:
417 filter_iterator_impl(WrappedIteratorT Begin, WrappedIteratorT End,
418 PredicateT Pred)
419 : BaseT(Begin, End, Pred) {}
420};
421
422/// Specialization of filter_iterator_base for bidirectional iteration.
423template <typename WrappedIteratorT, typename PredicateT>
424class filter_iterator_impl<WrappedIteratorT, PredicateT,
425 std::bidirectional_iterator_tag>
426 : public filter_iterator_base<WrappedIteratorT, PredicateT,
427 std::bidirectional_iterator_tag> {
428 using BaseT = filter_iterator_base<WrappedIteratorT, PredicateT,
429 std::bidirectional_iterator_tag>;
430 void findPrevValid() {
431 while (!this->Pred(*this->I))
432 BaseT::operator--();
433 }
434
435public:
436 using BaseT::operator--;
437
438 filter_iterator_impl(WrappedIteratorT Begin, WrappedIteratorT End,
439 PredicateT Pred)
440 : BaseT(Begin, End, Pred) {}
441
442 filter_iterator_impl &operator--() {
443 BaseT::operator--();
444 findPrevValid();
445 return *this;
446 }
447};
448
449namespace detail {
450
451template <bool is_bidirectional> struct fwd_or_bidi_tag_impl {
452 using type = std::forward_iterator_tag;
453};
454
455template <> struct fwd_or_bidi_tag_impl<true> {
456 using type = std::bidirectional_iterator_tag;
457};
458
459/// Helper which sets its type member to forward_iterator_tag if the category
460/// of \p IterT does not derive from bidirectional_iterator_tag, and to
461/// bidirectional_iterator_tag otherwise.
462template <typename IterT> struct fwd_or_bidi_tag {
463 using type = typename fwd_or_bidi_tag_impl<std::is_base_of<
464 std::bidirectional_iterator_tag,
465 typename std::iterator_traits<IterT>::iterator_category>::value>::type;
466};
467
468} // namespace detail
469
470/// Defines filter_iterator to a suitable specialization of
471/// filter_iterator_impl, based on the underlying iterator's category.
472template <typename WrappedIteratorT, typename PredicateT>
473using filter_iterator = filter_iterator_impl<
474 WrappedIteratorT, PredicateT,
475 typename detail::fwd_or_bidi_tag<WrappedIteratorT>::type>;
476
477/// Convenience function that takes a range of elements and a predicate,
478/// and return a new filter_iterator range.
479///
480/// FIXME: Currently if RangeT && is a rvalue reference to a temporary, the
481/// lifetime of that temporary is not kept by the returned range object, and the
482/// temporary is going to be dropped on the floor after the make_iterator_range
483/// full expression that contains this function call.
484template <typename RangeT, typename PredicateT>
485iterator_range<filter_iterator<detail::IterOfRange<RangeT>, PredicateT>>
486make_filter_range(RangeT &&Range, PredicateT Pred) {
487 using FilterIteratorT =
488 filter_iterator<detail::IterOfRange<RangeT>, PredicateT>;
489 return make_range(
490 FilterIteratorT(std::begin(std::forward<RangeT>(Range)),
491 std::end(std::forward<RangeT>(Range)), Pred),
492 FilterIteratorT(std::end(std::forward<RangeT>(Range)),
493 std::end(std::forward<RangeT>(Range)), Pred));
494}
495
496/// A pseudo-iterator adaptor that is designed to implement "early increment"
497/// style loops.
498///
499/// This is *not a normal iterator* and should almost never be used directly. It
500/// is intended primarily to be used with range based for loops and some range
501/// algorithms.
502///
503/// The iterator isn't quite an `OutputIterator` or an `InputIterator` but
504/// somewhere between them. The constraints of these iterators are:
505///
506/// - On construction or after being incremented, it is comparable and
507/// dereferencable. It is *not* incrementable.
508/// - After being dereferenced, it is neither comparable nor dereferencable, it
509/// is only incrementable.
510///
511/// This means you can only dereference the iterator once, and you can only
512/// increment it once between dereferences.
513template <typename WrappedIteratorT>
514class early_inc_iterator_impl
515 : public iterator_adaptor_base<early_inc_iterator_impl<WrappedIteratorT>,
516 WrappedIteratorT, std::input_iterator_tag> {
517 using BaseT =
518 iterator_adaptor_base<early_inc_iterator_impl<WrappedIteratorT>,
519 WrappedIteratorT, std::input_iterator_tag>;
520
521 using PointerT = typename std::iterator_traits<WrappedIteratorT>::pointer;
522
523protected:
524#if LLVM_ENABLE_ABI_BREAKING_CHECKS1
525 bool IsEarlyIncremented = false;
526#endif
527
528public:
529 early_inc_iterator_impl(WrappedIteratorT I) : BaseT(I) {}
530
531 using BaseT::operator*;
532 decltype(*std::declval<WrappedIteratorT>()) operator*() {
533#if LLVM_ENABLE_ABI_BREAKING_CHECKS1
534 assert(!IsEarlyIncremented && "Cannot dereference twice!")(static_cast <bool> (!IsEarlyIncremented && "Cannot dereference twice!"
) ? void (0) : __assert_fail ("!IsEarlyIncremented && \"Cannot dereference twice!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/STLExtras.h"
, 534, __extension__ __PRETTY_FUNCTION__))
;
535 IsEarlyIncremented = true;
536#endif
537 return *(this->I)++;
538 }
539
540 using BaseT::operator++;
541 early_inc_iterator_impl &operator++() {
542#if LLVM_ENABLE_ABI_BREAKING_CHECKS1
543 assert(IsEarlyIncremented && "Cannot increment before dereferencing!")(static_cast <bool> (IsEarlyIncremented && "Cannot increment before dereferencing!"
) ? void (0) : __assert_fail ("IsEarlyIncremented && \"Cannot increment before dereferencing!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/STLExtras.h"
, 543, __extension__ __PRETTY_FUNCTION__))
;
544 IsEarlyIncremented = false;
545#endif
546 return *this;
547 }
548
549 friend bool operator==(const early_inc_iterator_impl &LHS,
550 const early_inc_iterator_impl &RHS) {
551#if LLVM_ENABLE_ABI_BREAKING_CHECKS1
552 assert(!LHS.IsEarlyIncremented && "Cannot compare after dereferencing!")(static_cast <bool> (!LHS.IsEarlyIncremented &&
"Cannot compare after dereferencing!") ? void (0) : __assert_fail
("!LHS.IsEarlyIncremented && \"Cannot compare after dereferencing!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/STLExtras.h"
, 552, __extension__ __PRETTY_FUNCTION__))
;
553#endif
554 return (const BaseT &)LHS == (const BaseT &)RHS;
555 }
556};
557
558/// Make a range that does early increment to allow mutation of the underlying
559/// range without disrupting iteration.
560///
561/// The underlying iterator will be incremented immediately after it is
562/// dereferenced, allowing deletion of the current node or insertion of nodes to
563/// not disrupt iteration provided they do not invalidate the *next* iterator --
564/// the current iterator can be invalidated.
565///
566/// This requires a very exact pattern of use that is only really suitable to
567/// range based for loops and other range algorithms that explicitly guarantee
568/// to dereference exactly once each element, and to increment exactly once each
569/// element.
570template <typename RangeT>
571iterator_range<early_inc_iterator_impl<detail::IterOfRange<RangeT>>>
572make_early_inc_range(RangeT &&Range) {
573 using EarlyIncIteratorT =
574 early_inc_iterator_impl<detail::IterOfRange<RangeT>>;
575 return make_range(EarlyIncIteratorT(std::begin(std::forward<RangeT>(Range))),
576 EarlyIncIteratorT(std::end(std::forward<RangeT>(Range))));
577}
578
579// forward declarations required by zip_shortest/zip_first/zip_longest
580template <typename R, typename UnaryPredicate>
581bool all_of(R &&range, UnaryPredicate P);
582template <typename R, typename UnaryPredicate>
583bool any_of(R &&range, UnaryPredicate P);
584
585namespace detail {
586
587using std::declval;
588
589// We have to alias this since inlining the actual type at the usage site
590// in the parameter list of iterator_facade_base<> below ICEs MSVC 2017.
591template<typename... Iters> struct ZipTupleType {
592 using type = std::tuple<decltype(*declval<Iters>())...>;
593};
594
595template <typename ZipType, typename... Iters>
596using zip_traits = iterator_facade_base<
597 ZipType, typename std::common_type<std::bidirectional_iterator_tag,
598 typename std::iterator_traits<
599 Iters>::iterator_category...>::type,
600 // ^ TODO: Implement random access methods.
601 typename ZipTupleType<Iters...>::type,
602 typename std::iterator_traits<typename std::tuple_element<
603 0, std::tuple<Iters...>>::type>::difference_type,
604 // ^ FIXME: This follows boost::make_zip_iterator's assumption that all
605 // inner iterators have the same difference_type. It would fail if, for
606 // instance, the second field's difference_type were non-numeric while the
607 // first is.
608 typename ZipTupleType<Iters...>::type *,
609 typename ZipTupleType<Iters...>::type>;
610
611template <typename ZipType, typename... Iters>
612struct zip_common : public zip_traits<ZipType, Iters...> {
613 using Base = zip_traits<ZipType, Iters...>;
614 using value_type = typename Base::value_type;
615
616 std::tuple<Iters...> iterators;
617
618protected:
619 template <size_t... Ns> value_type deref(std::index_sequence<Ns...>) const {
620 return value_type(*std::get<Ns>(iterators)...);
621 }
622
623 template <size_t... Ns>
624 decltype(iterators) tup_inc(std::index_sequence<Ns...>) const {
625 return std::tuple<Iters...>(std::next(std::get<Ns>(iterators))...);
626 }
627
628 template <size_t... Ns>
629 decltype(iterators) tup_dec(std::index_sequence<Ns...>) const {
630 return std::tuple<Iters...>(std::prev(std::get<Ns>(iterators))...);
631 }
632
633public:
634 zip_common(Iters &&... ts) : iterators(std::forward<Iters>(ts)...) {}
635
636 value_type operator*() { return deref(std::index_sequence_for<Iters...>{}); }
637
638 const value_type operator*() const {
639 return deref(std::index_sequence_for<Iters...>{});
640 }
641
642 ZipType &operator++() {
643 iterators = tup_inc(std::index_sequence_for<Iters...>{});
644 return *reinterpret_cast<ZipType *>(this);
645 }
646
647 ZipType &operator--() {
648 static_assert(Base::IsBidirectional,
649 "All inner iterators must be at least bidirectional.");
650 iterators = tup_dec(std::index_sequence_for<Iters...>{});
651 return *reinterpret_cast<ZipType *>(this);
652 }
653};
654
655template <typename... Iters>
656struct zip_first : public zip_common<zip_first<Iters...>, Iters...> {
657 using Base = zip_common<zip_first<Iters...>, Iters...>;
658
659 bool operator==(const zip_first<Iters...> &other) const {
660 return std::get<0>(this->iterators) == std::get<0>(other.iterators);
661 }
662
663 zip_first(Iters &&... ts) : Base(std::forward<Iters>(ts)...) {}
664};
665
666template <typename... Iters>
667class zip_shortest : public zip_common<zip_shortest<Iters...>, Iters...> {
668 template <size_t... Ns>
669 bool test(const zip_shortest<Iters...> &other,
670 std::index_sequence<Ns...>) const {
671 return all_of(std::initializer_list<bool>{std::get<Ns>(this->iterators) !=
672 std::get<Ns>(other.iterators)...},
673 identity<bool>{});
674 }
675
676public:
677 using Base = zip_common<zip_shortest<Iters...>, Iters...>;
678
679 zip_shortest(Iters &&... ts) : Base(std::forward<Iters>(ts)...) {}
680
681 bool operator==(const zip_shortest<Iters...> &other) const {
682 return !test(other, std::index_sequence_for<Iters...>{});
683 }
684};
685
686template <template <typename...> class ItType, typename... Args> class zippy {
687public:
688 using iterator = ItType<decltype(std::begin(std::declval<Args>()))...>;
689 using iterator_category = typename iterator::iterator_category;
690 using value_type = typename iterator::value_type;
691 using difference_type = typename iterator::difference_type;
692 using pointer = typename iterator::pointer;
693 using reference = typename iterator::reference;
694
695private:
696 std::tuple<Args...> ts;
697
698 template <size_t... Ns>
699 iterator begin_impl(std::index_sequence<Ns...>) const {
700 return iterator(std::begin(std::get<Ns>(ts))...);
701 }
702 template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) const {
703 return iterator(std::end(std::get<Ns>(ts))...);
704 }
705
706public:
707 zippy(Args &&... ts_) : ts(std::forward<Args>(ts_)...) {}
708
709 iterator begin() const {
710 return begin_impl(std::index_sequence_for<Args...>{});
711 }
712 iterator end() const { return end_impl(std::index_sequence_for<Args...>{}); }
713};
714
715} // end namespace detail
716
717/// zip iterator for two or more iteratable types.
718template <typename T, typename U, typename... Args>
719detail::zippy<detail::zip_shortest, T, U, Args...> zip(T &&t, U &&u,
720 Args &&... args) {
721 return detail::zippy<detail::zip_shortest, T, U, Args...>(
722 std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
723}
724
725/// zip iterator that, for the sake of efficiency, assumes the first iteratee to
726/// be the shortest.
727template <typename T, typename U, typename... Args>
728detail::zippy<detail::zip_first, T, U, Args...> zip_first(T &&t, U &&u,
729 Args &&... args) {
730 return detail::zippy<detail::zip_first, T, U, Args...>(
731 std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
732}
733
734namespace detail {
735template <typename Iter>
736Iter next_or_end(const Iter &I, const Iter &End) {
737 if (I == End)
738 return End;
739 return std::next(I);
740}
741
742template <typename Iter>
743auto deref_or_none(const Iter &I, const Iter &End) -> llvm::Optional<
744 std::remove_const_t<std::remove_reference_t<decltype(*I)>>> {
745 if (I == End)
746 return None;
747 return *I;
748}
749
750template <typename Iter> struct ZipLongestItemType {
751 using type =
752 llvm::Optional<typename std::remove_const<typename std::remove_reference<
753 decltype(*std::declval<Iter>())>::type>::type>;
754};
755
756template <typename... Iters> struct ZipLongestTupleType {
757 using type = std::tuple<typename ZipLongestItemType<Iters>::type...>;
758};
759
760template <typename... Iters>
761class zip_longest_iterator
762 : public iterator_facade_base<
763 zip_longest_iterator<Iters...>,
764 typename std::common_type<
765 std::forward_iterator_tag,
766 typename std::iterator_traits<Iters>::iterator_category...>::type,
767 typename ZipLongestTupleType<Iters...>::type,
768 typename std::iterator_traits<typename std::tuple_element<
769 0, std::tuple<Iters...>>::type>::difference_type,
770 typename ZipLongestTupleType<Iters...>::type *,
771 typename ZipLongestTupleType<Iters...>::type> {
772public:
773 using value_type = typename ZipLongestTupleType<Iters...>::type;
774
775private:
776 std::tuple<Iters...> iterators;
777 std::tuple<Iters...> end_iterators;
778
779 template <size_t... Ns>
780 bool test(const zip_longest_iterator<Iters...> &other,
781 std::index_sequence<Ns...>) const {
782 return llvm::any_of(
783 std::initializer_list<bool>{std::get<Ns>(this->iterators) !=
784 std::get<Ns>(other.iterators)...},
785 identity<bool>{});
786 }
787
788 template <size_t... Ns> value_type deref(std::index_sequence<Ns...>) const {
789 return value_type(
790 deref_or_none(std::get<Ns>(iterators), std::get<Ns>(end_iterators))...);
791 }
792
793 template <size_t... Ns>
794 decltype(iterators) tup_inc(std::index_sequence<Ns...>) const {
795 return std::tuple<Iters...>(
796 next_or_end(std::get<Ns>(iterators), std::get<Ns>(end_iterators))...);
797 }
798
799public:
800 zip_longest_iterator(std::pair<Iters &&, Iters &&>... ts)
801 : iterators(std::forward<Iters>(ts.first)...),
802 end_iterators(std::forward<Iters>(ts.second)...) {}
803
804 value_type operator*() { return deref(std::index_sequence_for<Iters...>{}); }
805
806 value_type operator*() const {
807 return deref(std::index_sequence_for<Iters...>{});
808 }
809
810 zip_longest_iterator<Iters...> &operator++() {
811 iterators = tup_inc(std::index_sequence_for<Iters...>{});
812 return *this;
813 }
814
815 bool operator==(const zip_longest_iterator<Iters...> &other) const {
816 return !test(other, std::index_sequence_for<Iters...>{});
817 }
818};
819
820template <typename... Args> class zip_longest_range {
821public:
822 using iterator =
823 zip_longest_iterator<decltype(adl_begin(std::declval<Args>()))...>;
824 using iterator_category = typename iterator::iterator_category;
825 using value_type = typename iterator::value_type;
826 using difference_type = typename iterator::difference_type;
827 using pointer = typename iterator::pointer;
828 using reference = typename iterator::reference;
829
830private:
831 std::tuple<Args...> ts;
832
833 template <size_t... Ns>
834 iterator begin_impl(std::index_sequence<Ns...>) const {
835 return iterator(std::make_pair(adl_begin(std::get<Ns>(ts)),
836 adl_end(std::get<Ns>(ts)))...);
837 }
838
839 template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) const {
840 return iterator(std::make_pair(adl_end(std::get<Ns>(ts)),
841 adl_end(std::get<Ns>(ts)))...);
842 }
843
844public:
845 zip_longest_range(Args &&... ts_) : ts(std::forward<Args>(ts_)...) {}
846
847 iterator begin() const {
848 return begin_impl(std::index_sequence_for<Args...>{});
849 }
850 iterator end() const { return end_impl(std::index_sequence_for<Args...>{}); }
851};
852} // namespace detail
853
854/// Iterate over two or more iterators at the same time. Iteration continues
855/// until all iterators reach the end. The llvm::Optional only contains a value
856/// if the iterator has not reached the end.
857template <typename T, typename U, typename... Args>
858detail::zip_longest_range<T, U, Args...> zip_longest(T &&t, U &&u,
859 Args &&... args) {
860 return detail::zip_longest_range<T, U, Args...>(
861 std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
862}
863
864/// Iterator wrapper that concatenates sequences together.
865///
866/// This can concatenate different iterators, even with different types, into
867/// a single iterator provided the value types of all the concatenated
868/// iterators expose `reference` and `pointer` types that can be converted to
869/// `ValueT &` and `ValueT *` respectively. It doesn't support more
870/// interesting/customized pointer or reference types.
871///
872/// Currently this only supports forward or higher iterator categories as
873/// inputs and always exposes a forward iterator interface.
874template <typename ValueT, typename... IterTs>
875class concat_iterator
876 : public iterator_facade_base<concat_iterator<ValueT, IterTs...>,
877 std::forward_iterator_tag, ValueT> {
878 using BaseT = typename concat_iterator::iterator_facade_base;
879
880 /// We store both the current and end iterators for each concatenated
881 /// sequence in a tuple of pairs.
882 ///
883 /// Note that something like iterator_range seems nice at first here, but the
884 /// range properties are of little benefit and end up getting in the way
885 /// because we need to do mutation on the current iterators.
886 std::tuple<IterTs...> Begins;
887 std::tuple<IterTs...> Ends;
888
889 /// Attempts to increment a specific iterator.
890 ///
891 /// Returns true if it was able to increment the iterator. Returns false if
892 /// the iterator is already at the end iterator.
893 template <size_t Index> bool incrementHelper() {
894 auto &Begin = std::get<Index>(Begins);
895 auto &End = std::get<Index>(Ends);
896 if (Begin == End)
897 return false;
898
899 ++Begin;
900 return true;
901 }
902
903 /// Increments the first non-end iterator.
904 ///
905 /// It is an error to call this with all iterators at the end.
906 template <size_t... Ns> void increment(std::index_sequence<Ns...>) {
907 // Build a sequence of functions to increment each iterator if possible.
908 bool (concat_iterator::*IncrementHelperFns[])() = {
909 &concat_iterator::incrementHelper<Ns>...};
910
911 // Loop over them, and stop as soon as we succeed at incrementing one.
912 for (auto &IncrementHelperFn : IncrementHelperFns)
913 if ((this->*IncrementHelperFn)())
914 return;
915
916 llvm_unreachable("Attempted to increment an end concat iterator!")::llvm::llvm_unreachable_internal("Attempted to increment an end concat iterator!"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/STLExtras.h"
, 916)
;
917 }
918
919 /// Returns null if the specified iterator is at the end. Otherwise,
920 /// dereferences the iterator and returns the address of the resulting
921 /// reference.
922 template <size_t Index> ValueT *getHelper() const {
923 auto &Begin = std::get<Index>(Begins);
924 auto &End = std::get<Index>(Ends);
925 if (Begin == End)
926 return nullptr;
927
928 return &*Begin;
929 }
930
931 /// Finds the first non-end iterator, dereferences, and returns the resulting
932 /// reference.
933 ///
934 /// It is an error to call this with all iterators at the end.
935 template <size_t... Ns> ValueT &get(std::index_sequence<Ns...>) const {
936 // Build a sequence of functions to get from iterator if possible.
937 ValueT *(concat_iterator::*GetHelperFns[])() const = {
938 &concat_iterator::getHelper<Ns>...};
939
940 // Loop over them, and return the first result we find.
941 for (auto &GetHelperFn : GetHelperFns)
942 if (ValueT *P = (this->*GetHelperFn)())
943 return *P;
944
945 llvm_unreachable("Attempted to get a pointer from an end concat iterator!")::llvm::llvm_unreachable_internal("Attempted to get a pointer from an end concat iterator!"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/STLExtras.h"
, 945)
;
946 }
947
948public:
949 /// Constructs an iterator from a sequence of ranges.
950 ///
951 /// We need the full range to know how to switch between each of the
952 /// iterators.
953 template <typename... RangeTs>
954 explicit concat_iterator(RangeTs &&... Ranges)
955 : Begins(std::begin(Ranges)...), Ends(std::end(Ranges)...) {}
956
957 using BaseT::operator++;
958
959 concat_iterator &operator++() {
960 increment(std::index_sequence_for<IterTs...>());
961 return *this;
962 }
963
964 ValueT &operator*() const {
965 return get(std::index_sequence_for<IterTs...>());
966 }
967
968 bool operator==(const concat_iterator &RHS) const {
969 return Begins == RHS.Begins && Ends == RHS.Ends;
970 }
971};
972
973namespace detail {
974
975/// Helper to store a sequence of ranges being concatenated and access them.
976///
977/// This is designed to facilitate providing actual storage when temporaries
978/// are passed into the constructor such that we can use it as part of range
979/// based for loops.
980template <typename ValueT, typename... RangeTs> class concat_range {
981public:
982 using iterator =
983 concat_iterator<ValueT,
984 decltype(std::begin(std::declval<RangeTs &>()))...>;
985
986private:
987 std::tuple<RangeTs...> Ranges;
988
989 template <size_t... Ns> iterator begin_impl(std::index_sequence<Ns...>) {
990 return iterator(std::get<Ns>(Ranges)...);
991 }
992 template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) {
993 return iterator(make_range(std::end(std::get<Ns>(Ranges)),
994 std::end(std::get<Ns>(Ranges)))...);
995 }
996
997public:
998 concat_range(RangeTs &&... Ranges)
999 : Ranges(std::forward<RangeTs>(Ranges)...) {}
1000
1001 iterator begin() { return begin_impl(std::index_sequence_for<RangeTs...>{}); }
1002 iterator end() { return end_impl(std::index_sequence_for<RangeTs...>{}); }
1003};
1004
1005} // end namespace detail
1006
1007/// Concatenated range across two or more ranges.
1008///
1009/// The desired value type must be explicitly specified.
1010template <typename ValueT, typename... RangeTs>
1011detail::concat_range<ValueT, RangeTs...> concat(RangeTs &&... Ranges) {
1012 static_assert(sizeof...(RangeTs) > 1,
1013 "Need more than one range to concatenate!");
1014 return detail::concat_range<ValueT, RangeTs...>(
1015 std::forward<RangeTs>(Ranges)...);
1016}
1017
1018/// A utility class used to implement an iterator that contains some base object
1019/// and an index. The iterator moves the index but keeps the base constant.
1020template <typename DerivedT, typename BaseT, typename T,
1021 typename PointerT = T *, typename ReferenceT = T &>
1022class indexed_accessor_iterator
1023 : public llvm::iterator_facade_base<DerivedT,
1024 std::random_access_iterator_tag, T,
1025 std::ptrdiff_t, PointerT, ReferenceT> {
1026public:
1027 ptrdiff_t operator-(const indexed_accessor_iterator &rhs) const {
1028 assert(base == rhs.base && "incompatible iterators")(static_cast <bool> (base == rhs.base && "incompatible iterators"
) ? void (0) : __assert_fail ("base == rhs.base && \"incompatible iterators\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/STLExtras.h"
, 1028, __extension__ __PRETTY_FUNCTION__))
;
1029 return index - rhs.index;
1030 }
1031 bool operator==(const indexed_accessor_iterator &rhs) const {
1032 return base == rhs.base && index == rhs.index;
1033 }
1034 bool operator<(const indexed_accessor_iterator &rhs) const {
1035 assert(base == rhs.base && "incompatible iterators")(static_cast <bool> (base == rhs.base && "incompatible iterators"
) ? void (0) : __assert_fail ("base == rhs.base && \"incompatible iterators\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/STLExtras.h"
, 1035, __extension__ __PRETTY_FUNCTION__))
;
1036 return index < rhs.index;
1037 }
1038
1039 DerivedT &operator+=(ptrdiff_t offset) {
1040 this->index += offset;
1041 return static_cast<DerivedT &>(*this);
1042 }
1043 DerivedT &operator-=(ptrdiff_t offset) {
1044 this->index -= offset;
1045 return static_cast<DerivedT &>(*this);
1046 }
1047
1048 /// Returns the current index of the iterator.
1049 ptrdiff_t getIndex() const { return index; }
1050
1051 /// Returns the current base of the iterator.
1052 const BaseT &getBase() const { return base; }
1053
1054protected:
1055 indexed_accessor_iterator(BaseT base, ptrdiff_t index)
1056 : base(base), index(index) {}
1057 BaseT base;
1058 ptrdiff_t index;
1059};
1060
1061namespace detail {
1062/// The class represents the base of a range of indexed_accessor_iterators. It
1063/// provides support for many different range functionalities, e.g.
1064/// drop_front/slice/etc.. Derived range classes must implement the following
1065/// static methods:
1066/// * ReferenceT dereference_iterator(const BaseT &base, ptrdiff_t index)
1067/// - Dereference an iterator pointing to the base object at the given
1068/// index.
1069/// * BaseT offset_base(const BaseT &base, ptrdiff_t index)
1070/// - Return a new base that is offset from the provide base by 'index'
1071/// elements.
1072template <typename DerivedT, typename BaseT, typename T,
1073 typename PointerT = T *, typename ReferenceT = T &>
1074class indexed_accessor_range_base {
1075public:
1076 using RangeBaseT =
1077 indexed_accessor_range_base<DerivedT, BaseT, T, PointerT, ReferenceT>;
1078
1079 /// An iterator element of this range.
1080 class iterator : public indexed_accessor_iterator<iterator, BaseT, T,
1081 PointerT, ReferenceT> {
1082 public:
1083 // Index into this iterator, invoking a static method on the derived type.
1084 ReferenceT operator*() const {
1085 return DerivedT::dereference_iterator(this->getBase(), this->getIndex());
1086 }
1087
1088 private:
1089 iterator(BaseT owner, ptrdiff_t curIndex)
1090 : indexed_accessor_iterator<iterator, BaseT, T, PointerT, ReferenceT>(
1091 owner, curIndex) {}
1092
1093 /// Allow access to the constructor.
1094 friend indexed_accessor_range_base<DerivedT, BaseT, T, PointerT,
1095 ReferenceT>;
1096 };
1097
1098 indexed_accessor_range_base(iterator begin, iterator end)
1099 : base(offset_base(begin.getBase(), begin.getIndex())),
1100 count(end.getIndex() - begin.getIndex()) {}
1101 indexed_accessor_range_base(const iterator_range<iterator> &range)
1102 : indexed_accessor_range_base(range.begin(), range.end()) {}
1103 indexed_accessor_range_base(BaseT base, ptrdiff_t count)
1104 : base(base), count(count) {}
1105
1106 iterator begin() const { return iterator(base, 0); }
1107 iterator end() const { return iterator(base, count); }
1108 ReferenceT operator[](size_t Index) const {
1109 assert(Index < size() && "invalid index for value range")(static_cast <bool> (Index < size() && "invalid index for value range"
) ? void (0) : __assert_fail ("Index < size() && \"invalid index for value range\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/STLExtras.h"
, 1109, __extension__ __PRETTY_FUNCTION__))
;
1110 return DerivedT::dereference_iterator(base, static_cast<ptrdiff_t>(Index));
1111 }
1112 ReferenceT front() const {
1113 assert(!empty() && "expected non-empty range")(static_cast <bool> (!empty() && "expected non-empty range"
) ? void (0) : __assert_fail ("!empty() && \"expected non-empty range\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/STLExtras.h"
, 1113, __extension__ __PRETTY_FUNCTION__))
;
1114 return (*this)[0];
1115 }
1116 ReferenceT back() const {
1117 assert(!empty() && "expected non-empty range")(static_cast <bool> (!empty() && "expected non-empty range"
) ? void (0) : __assert_fail ("!empty() && \"expected non-empty range\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/STLExtras.h"
, 1117, __extension__ __PRETTY_FUNCTION__))
;
1118 return (*this)[size() - 1];
1119 }
1120
1121 /// Compare this range with another.
1122 template <typename OtherT> bool operator==(const OtherT &other) const {
1123 return size() ==
1124 static_cast<size_t>(std::distance(other.begin(), other.end())) &&
1125 std::equal(begin(), end(), other.begin());
1126 }
1127 template <typename OtherT> bool operator!=(const OtherT &other) const {
1128 return !(*this == other);
1129 }
1130
1131 /// Return the size of this range.
1132 size_t size() const { return count; }
1133
1134 /// Return if the range is empty.
1135 bool empty() const { return size() == 0; }
1136
1137 /// Drop the first N elements, and keep M elements.
1138 DerivedT slice(size_t n, size_t m) const {
1139 assert(n + m <= size() && "invalid size specifiers")(static_cast <bool> (n + m <= size() && "invalid size specifiers"
) ? void (0) : __assert_fail ("n + m <= size() && \"invalid size specifiers\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/STLExtras.h"
, 1139, __extension__ __PRETTY_FUNCTION__))
;
1140 return DerivedT(offset_base(base, n), m);
1141 }
1142
1143 /// Drop the first n elements.
1144 DerivedT drop_front(size_t n = 1) const {
1145 assert(size() >= n && "Dropping more elements than exist")(static_cast <bool> (size() >= n && "Dropping more elements than exist"
) ? void (0) : __assert_fail ("size() >= n && \"Dropping more elements than exist\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/STLExtras.h"
, 1145, __extension__ __PRETTY_FUNCTION__))
;
1146 return slice(n, size() - n);
1147 }
1148 /// Drop the last n elements.
1149 DerivedT drop_back(size_t n = 1) const {
1150 assert(size() >= n && "Dropping more elements than exist")(static_cast <bool> (size() >= n && "Dropping more elements than exist"
) ? void (0) : __assert_fail ("size() >= n && \"Dropping more elements than exist\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/STLExtras.h"
, 1150, __extension__ __PRETTY_FUNCTION__))
;
1151 return DerivedT(base, size() - n);
1152 }
1153
1154 /// Take the first n elements.
1155 DerivedT take_front(size_t n = 1) const {
1156 return n < size() ? drop_back(size() - n)
1157 : static_cast<const DerivedT &>(*this);
1158 }
1159
1160 /// Take the last n elements.
1161 DerivedT take_back(size_t n = 1) const {
1162 return n < size() ? drop_front(size() - n)
1163 : static_cast<const DerivedT &>(*this);
1164 }
1165
1166 /// Allow conversion to any type accepting an iterator_range.
1167 template <typename RangeT, typename = std::enable_if_t<std::is_constructible<
1168 RangeT, iterator_range<iterator>>::value>>
1169 operator RangeT() const {
1170 return RangeT(iterator_range<iterator>(*this));
1171 }
1172
1173 /// Returns the base of this range.
1174 const BaseT &getBase() const { return base; }
1175
1176private:
1177 /// Offset the given base by the given amount.
1178 static BaseT offset_base(const BaseT &base, size_t n) {
1179 return n == 0 ? base : DerivedT::offset_base(base, n);
1180 }
1181
1182protected:
1183 indexed_accessor_range_base(const indexed_accessor_range_base &) = default;
1184 indexed_accessor_range_base(indexed_accessor_range_base &&) = default;
1185 indexed_accessor_range_base &
1186 operator=(const indexed_accessor_range_base &) = default;
1187
1188 /// The base that owns the provided range of values.
1189 BaseT base;
1190 /// The size from the owning range.
1191 ptrdiff_t count;
1192};
1193} // end namespace detail
1194
1195/// This class provides an implementation of a range of
1196/// indexed_accessor_iterators where the base is not indexable. Ranges with
1197/// bases that are offsetable should derive from indexed_accessor_range_base
1198/// instead. Derived range classes are expected to implement the following
1199/// static method:
1200/// * ReferenceT dereference(const BaseT &base, ptrdiff_t index)
1201/// - Dereference an iterator pointing to a parent base at the given index.
1202template <typename DerivedT, typename BaseT, typename T,
1203 typename PointerT = T *, typename ReferenceT = T &>
1204class indexed_accessor_range
1205 : public detail::indexed_accessor_range_base<
1206 DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT, ReferenceT> {
1207public:
1208 indexed_accessor_range(BaseT base, ptrdiff_t startIndex, ptrdiff_t count)
1209 : detail::indexed_accessor_range_base<
1210 DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT, ReferenceT>(
1211 std::make_pair(base, startIndex), count) {}
1212 using detail::indexed_accessor_range_base<
1213 DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT,
1214 ReferenceT>::indexed_accessor_range_base;
1215
1216 /// Returns the current base of the range.
1217 const BaseT &getBase() const { return this->base.first; }
1218
1219 /// Returns the current start index of the range.
1220 ptrdiff_t getStartIndex() const { return this->base.second; }
1221
1222 /// See `detail::indexed_accessor_range_base` for details.
1223 static std::pair<BaseT, ptrdiff_t>
1224 offset_base(const std::pair<BaseT, ptrdiff_t> &base, ptrdiff_t index) {
1225 // We encode the internal base as a pair of the derived base and a start
1226 // index into the derived base.
1227 return std::make_pair(base.first, base.second + index);
1228 }
1229 /// See `detail::indexed_accessor_range_base` for details.
1230 static ReferenceT
1231 dereference_iterator(const std::pair<BaseT, ptrdiff_t> &base,
1232 ptrdiff_t index) {
1233 return DerivedT::dereference(base.first, base.second + index);
1234 }
1235};
1236
1237/// Given a container of pairs, return a range over the first elements.
1238template <typename ContainerTy> auto make_first_range(ContainerTy &&c) {
1239 return llvm::map_range(
1240 std::forward<ContainerTy>(c),
1241 [](decltype((*std::begin(c))) elt) -> decltype((elt.first)) {
1242 return elt.first;
1243 });
1244}
1245
1246/// Given a container of pairs, return a range over the second elements.
1247template <typename ContainerTy> auto make_second_range(ContainerTy &&c) {
1248 return llvm::map_range(
1249 std::forward<ContainerTy>(c),
1250 [](decltype((*std::begin(c))) elt) -> decltype((elt.second)) {
1251 return elt.second;
1252 });
1253}
1254
1255//===----------------------------------------------------------------------===//
1256// Extra additions to <utility>
1257//===----------------------------------------------------------------------===//
1258
1259/// Function object to check whether the first component of a std::pair
1260/// compares less than the first component of another std::pair.
1261struct less_first {
1262 template <typename T> bool operator()(const T &lhs, const T &rhs) const {
1263 return lhs.first < rhs.first;
1264 }
1265};
1266
1267/// Function object to check whether the second component of a std::pair
1268/// compares less than the second component of another std::pair.
1269struct less_second {
1270 template <typename T> bool operator()(const T &lhs, const T &rhs) const {
1271 return lhs.second < rhs.second;
1272 }
1273};
1274
1275/// \brief Function object to apply a binary function to the first component of
1276/// a std::pair.
1277template<typename FuncTy>
1278struct on_first {
1279 FuncTy func;
1280
1281 template <typename T>
1282 decltype(auto) operator()(const T &lhs, const T &rhs) const {
1283 return func(lhs.first, rhs.first);
1284 }
1285};
1286
1287/// Utility type to build an inheritance chain that makes it easy to rank
1288/// overload candidates.
1289template <int N> struct rank : rank<N - 1> {};
1290template <> struct rank<0> {};
1291
1292/// traits class for checking whether type T is one of any of the given
1293/// types in the variadic list.
1294template <typename T, typename... Ts>
1295using is_one_of = disjunction<std::is_same<T, Ts>...>;
1296
1297/// traits class for checking whether type T is a base class for all
1298/// the given types in the variadic list.
1299template <typename T, typename... Ts>
1300using are_base_of = conjunction<std::is_base_of<T, Ts>...>;
1301
1302namespace detail {
1303template <typename... Ts> struct Visitor;
1304
1305template <typename HeadT, typename... TailTs>
1306struct Visitor<HeadT, TailTs...> : remove_cvref_t<HeadT>, Visitor<TailTs...> {
1307 explicit constexpr Visitor(HeadT &&Head, TailTs &&...Tail)
1308 : remove_cvref_t<HeadT>(std::forward<HeadT>(Head)),
1309 Visitor<TailTs...>(std::forward<TailTs>(Tail)...) {}
1310 using remove_cvref_t<HeadT>::operator();
1311 using Visitor<TailTs...>::operator();
1312};
1313
1314template <typename HeadT> struct Visitor<HeadT> : remove_cvref_t<HeadT> {
1315 explicit constexpr Visitor(HeadT &&Head)
1316 : remove_cvref_t<HeadT>(std::forward<HeadT>(Head)) {}
1317 using remove_cvref_t<HeadT>::operator();
1318};
1319} // namespace detail
1320
1321/// Returns an opaquely-typed Callable object whose operator() overload set is
1322/// the sum of the operator() overload sets of each CallableT in CallableTs.
1323///
1324/// The type of the returned object derives from each CallableT in CallableTs.
1325/// The returned object is constructed by invoking the appropriate copy or move
1326/// constructor of each CallableT, as selected by overload resolution on the
1327/// corresponding argument to makeVisitor.
1328///
1329/// Example:
1330///
1331/// \code
1332/// auto visitor = makeVisitor([](auto) { return "unhandled type"; },
1333/// [](int i) { return "int"; },
1334/// [](std::string s) { return "str"; });
1335/// auto a = visitor(42); // `a` is now "int".
1336/// auto b = visitor("foo"); // `b` is now "str".
1337/// auto c = visitor(3.14f); // `c` is now "unhandled type".
1338/// \endcode
1339///
1340/// Example of making a visitor with a lambda which captures a move-only type:
1341///
1342/// \code
1343/// std::unique_ptr<FooHandler> FH = /* ... */;
1344/// auto visitor = makeVisitor(
1345/// [FH{std::move(FH)}](Foo F) { return FH->handle(F); },
1346/// [](int i) { return i; },
1347/// [](std::string s) { return atoi(s); });
1348/// \endcode
1349template <typename... CallableTs>
1350constexpr decltype(auto) makeVisitor(CallableTs &&...Callables) {
1351 return detail::Visitor<CallableTs...>(std::forward<CallableTs>(Callables)...);
1352}
1353
1354//===----------------------------------------------------------------------===//
1355// Extra additions for arrays
1356//===----------------------------------------------------------------------===//
1357
1358// We have a copy here so that LLVM behaves the same when using different
1359// standard libraries.
1360template <class Iterator, class RNG>
1361void shuffle(Iterator first, Iterator last, RNG &&g) {
1362 // It would be better to use a std::uniform_int_distribution,
1363 // but that would be stdlib dependent.
1364 typedef
1365 typename std::iterator_traits<Iterator>::difference_type difference_type;
1366 for (auto size = last - first; size > 1; ++first, (void)--size) {
1367 difference_type offset = g() % size;
1368 // Avoid self-assignment due to incorrect assertions in libstdc++
1369 // containers (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85828).
1370 if (offset != difference_type(0))
1371 std::iter_swap(first, first + offset);
1372 }
1373}
1374
1375/// Find the length of an array.
1376template <class T, std::size_t N>
1377constexpr inline size_t array_lengthof(T (&)[N]) {
1378 return N;
1379}
1380
1381/// Adapt std::less<T> for array_pod_sort.
1382template<typename T>
1383inline int array_pod_sort_comparator(const void *P1, const void *P2) {
1384 if (std::less<T>()(*reinterpret_cast<const T*>(P1),
1385 *reinterpret_cast<const T*>(P2)))
1386 return -1;
1387 if (std::less<T>()(*reinterpret_cast<const T*>(P2),
1388 *reinterpret_cast<const T*>(P1)))
1389 return 1;
1390 return 0;
1391}
1392
1393/// get_array_pod_sort_comparator - This is an internal helper function used to
1394/// get type deduction of T right.
1395template<typename T>
1396inline int (*get_array_pod_sort_comparator(const T &))
1397 (const void*, const void*) {
1398 return array_pod_sort_comparator<T>;
1399}
1400
1401#ifdef EXPENSIVE_CHECKS
1402namespace detail {
1403
1404inline unsigned presortShuffleEntropy() {
1405 static unsigned Result(std::random_device{}());
1406 return Result;
1407}
1408
1409template <class IteratorTy>
1410inline void presortShuffle(IteratorTy Start, IteratorTy End) {
1411 std::mt19937 Generator(presortShuffleEntropy());
1412 llvm::shuffle(Start, End, Generator);
1413}
1414
1415} // end namespace detail
1416#endif
1417
1418/// array_pod_sort - This sorts an array with the specified start and end
1419/// extent. This is just like std::sort, except that it calls qsort instead of
1420/// using an inlined template. qsort is slightly slower than std::sort, but
1421/// most sorts are not performance critical in LLVM and std::sort has to be
1422/// template instantiated for each type, leading to significant measured code
1423/// bloat. This function should generally be used instead of std::sort where
1424/// possible.
1425///
1426/// This function assumes that you have simple POD-like types that can be
1427/// compared with std::less and can be moved with memcpy. If this isn't true,
1428/// you should use std::sort.
1429///
1430/// NOTE: If qsort_r were portable, we could allow a custom comparator and
1431/// default to std::less.
1432template<class IteratorTy>
1433inline void array_pod_sort(IteratorTy Start, IteratorTy End) {
1434 // Don't inefficiently call qsort with one element or trigger undefined
1435 // behavior with an empty sequence.
1436 auto NElts = End - Start;
1437 if (NElts <= 1) return;
1438#ifdef EXPENSIVE_CHECKS
1439 detail::presortShuffle<IteratorTy>(Start, End);
1440#endif
1441 qsort(&*Start, NElts, sizeof(*Start), get_array_pod_sort_comparator(*Start));
1442}
1443
1444template <class IteratorTy>
1445inline void array_pod_sort(
1446 IteratorTy Start, IteratorTy End,
1447 int (*Compare)(
1448 const typename std::iterator_traits<IteratorTy>::value_type *,
1449 const typename std::iterator_traits<IteratorTy>::value_type *)) {
1450 // Don't inefficiently call qsort with one element or trigger undefined
1451 // behavior with an empty sequence.
1452 auto NElts = End - Start;
1453 if (NElts <= 1) return;
1454#ifdef EXPENSIVE_CHECKS
1455 detail::presortShuffle<IteratorTy>(Start, End);
1456#endif
1457 qsort(&*Start, NElts, sizeof(*Start),
1458 reinterpret_cast<int (*)(const void *, const void *)>(Compare));
1459}
1460
1461namespace detail {
1462template <typename T>
1463// We can use qsort if the iterator type is a pointer and the underlying value
1464// is trivially copyable.
1465using sort_trivially_copyable = conjunction<
1466 std::is_pointer<T>,
1467 std::is_trivially_copyable<typename std::iterator_traits<T>::value_type>>;
1468} // namespace detail
1469
1470// Provide wrappers to std::sort which shuffle the elements before sorting
1471// to help uncover non-deterministic behavior (PR35135).
1472template <typename IteratorTy,
1473 std::enable_if_t<!detail::sort_trivially_copyable<IteratorTy>::value,
1474 int> = 0>
1475inline void sort(IteratorTy Start, IteratorTy End) {
1476#ifdef EXPENSIVE_CHECKS
1477 detail::presortShuffle<IteratorTy>(Start, End);
1478#endif
1479 std::sort(Start, End);
1480}
1481
1482// Forward trivially copyable types to array_pod_sort. This avoids a large
1483// amount of code bloat for a minor performance hit.
1484template <typename IteratorTy,
1485 std::enable_if_t<detail::sort_trivially_copyable<IteratorTy>::value,
1486 int> = 0>
1487inline void sort(IteratorTy Start, IteratorTy End) {
1488 array_pod_sort(Start, End);
1489}
1490
1491template <typename Container> inline void sort(Container &&C) {
1492 llvm::sort(adl_begin(C), adl_end(C));
1493}
1494
1495template <typename IteratorTy, typename Compare>
1496inline void sort(IteratorTy Start, IteratorTy End, Compare Comp) {
1497#ifdef EXPENSIVE_CHECKS
1498 detail::presortShuffle<IteratorTy>(Start, End);
1499#endif
1500 std::sort(Start, End, Comp);
1501}
1502
1503template <typename Container, typename Compare>
1504inline void sort(Container &&C, Compare Comp) {
1505 llvm::sort(adl_begin(C), adl_end(C), Comp);
1506}
1507
1508//===----------------------------------------------------------------------===//
1509// Extra additions to <algorithm>
1510//===----------------------------------------------------------------------===//
1511
1512/// Get the size of a range. This is a wrapper function around std::distance
1513/// which is only enabled when the operation is O(1).
1514template <typename R>
1515auto size(R &&Range,
1516 std::enable_if_t<
1517 std::is_base_of<std::random_access_iterator_tag,
1518 typename std::iterator_traits<decltype(
1519 Range.begin())>::iterator_category>::value,
1520 void> * = nullptr) {
1521 return std::distance(Range.begin(), Range.end());
1522}
1523
1524/// Provide wrappers to std::for_each which take ranges instead of having to
1525/// pass begin/end explicitly.
1526template <typename R, typename UnaryFunction>
1527UnaryFunction for_each(R &&Range, UnaryFunction F) {
1528 return std::for_each(adl_begin(Range), adl_end(Range), F);
1529}
1530
1531/// Provide wrappers to std::all_of which take ranges instead of having to pass
1532/// begin/end explicitly.
1533template <typename R, typename UnaryPredicate>
1534bool all_of(R &&Range, UnaryPredicate P) {
1535 return std::all_of(adl_begin(Range), adl_end(Range), P);
1536}
1537
1538/// Provide wrappers to std::any_of which take ranges instead of having to pass
1539/// begin/end explicitly.
1540template <typename R, typename UnaryPredicate>
1541bool any_of(R &&Range, UnaryPredicate P) {
1542 return std::any_of(adl_begin(Range), adl_end(Range), P);
1543}
1544
1545/// Provide wrappers to std::none_of which take ranges instead of having to pass
1546/// begin/end explicitly.
1547template <typename R, typename UnaryPredicate>
1548bool none_of(R &&Range, UnaryPredicate P) {
1549 return std::none_of(adl_begin(Range), adl_end(Range), P);
1550}
1551
1552/// Provide wrappers to std::find which take ranges instead of having to pass
1553/// begin/end explicitly.
1554template <typename R, typename T> auto find(R &&Range, const T &Val) {
1555 return std::find(adl_begin(Range), adl_end(Range), Val);
1556}
1557
1558/// Provide wrappers to std::find_if which take ranges instead of having to pass
1559/// begin/end explicitly.
1560template <typename R, typename UnaryPredicate>
1561auto find_if(R &&Range, UnaryPredicate P) {
1562 return std::find_if(adl_begin(Range), adl_end(Range), P);
1563}
1564
1565template <typename R, typename UnaryPredicate>
1566auto find_if_not(R &&Range, UnaryPredicate P) {
1567 return std::find_if_not(adl_begin(Range), adl_end(Range), P);
1568}
1569
1570/// Provide wrappers to std::remove_if which take ranges instead of having to
1571/// pass begin/end explicitly.
1572template <typename R, typename UnaryPredicate>
1573auto remove_if(R &&Range, UnaryPredicate P) {
1574 return std::remove_if(adl_begin(Range), adl_end(Range), P);
1575}
1576
1577/// Provide wrappers to std::copy_if which take ranges instead of having to
1578/// pass begin/end explicitly.
1579template <typename R, typename OutputIt, typename UnaryPredicate>
1580OutputIt copy_if(R &&Range, OutputIt Out, UnaryPredicate P) {
1581 return std::copy_if(adl_begin(Range), adl_end(Range), Out, P);
1582}
1583
1584template <typename R, typename OutputIt>
1585OutputIt copy(R &&Range, OutputIt Out) {
1586 return std::copy(adl_begin(Range), adl_end(Range), Out);
1587}
1588
1589/// Provide wrappers to std::move which take ranges instead of having to
1590/// pass begin/end explicitly.
1591template <typename R, typename OutputIt>
1592OutputIt move(R &&Range, OutputIt Out) {
1593 return std::move(adl_begin(Range), adl_end(Range), Out);
1594}
1595
1596/// Wrapper function around std::find to detect if an element exists
1597/// in a container.
1598template <typename R, typename E>
1599bool is_contained(R &&Range, const E &Element) {
1600 return std::find(adl_begin(Range), adl_end(Range), Element) != adl_end(Range);
1601}
1602
1603/// Wrapper function around std::is_sorted to check if elements in a range \p R
1604/// are sorted with respect to a comparator \p C.
1605template <typename R, typename Compare> bool is_sorted(R &&Range, Compare C) {
1606 return std::is_sorted(adl_begin(Range), adl_end(Range), C);
1607}
1608
1609/// Wrapper function around std::is_sorted to check if elements in a range \p R
1610/// are sorted in non-descending order.
1611template <typename R> bool is_sorted(R &&Range) {
1612 return std::is_sorted(adl_begin(Range), adl_end(Range));
1613}
1614
1615/// Wrapper function around std::count to count the number of times an element
1616/// \p Element occurs in the given range \p Range.
1617template <typename R, typename E> auto count(R &&Range, const E &Element) {
1618 return std::count(adl_begin(Range), adl_end(Range), Element);
1619}
1620
1621/// Wrapper function around std::count_if to count the number of times an
1622/// element satisfying a given predicate occurs in a range.
1623template <typename R, typename UnaryPredicate>
1624auto count_if(R &&Range, UnaryPredicate P) {
1625 return std::count_if(adl_begin(Range), adl_end(Range), P);
1626}
1627
1628/// Wrapper function around std::transform to apply a function to a range and
1629/// store the result elsewhere.
1630template <typename R, typename OutputIt, typename UnaryFunction>
1631OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F) {
1632 return std::transform(adl_begin(Range), adl_end(Range), d_first, F);
1633}
1634
1635/// Provide wrappers to std::partition which take ranges instead of having to
1636/// pass begin/end explicitly.
1637template <typename R, typename UnaryPredicate>
1638auto partition(R &&Range, UnaryPredicate P) {
1639 return std::partition(adl_begin(Range), adl_end(Range), P);
1640}
1641
1642/// Provide wrappers to std::lower_bound which take ranges instead of having to
1643/// pass begin/end explicitly.
1644template <typename R, typename T> auto lower_bound(R &&Range, T &&Value) {
1645 return std::lower_bound(adl_begin(Range), adl_end(Range),
1646 std::forward<T>(Value));
1647}
1648
1649template <typename R, typename T, typename Compare>
1650auto lower_bound(R &&Range, T &&Value, Compare C) {
1651 return std::lower_bound(adl_begin(Range), adl_end(Range),
1652 std::forward<T>(Value), C);
1653}
1654
1655/// Provide wrappers to std::upper_bound which take ranges instead of having to
1656/// pass begin/end explicitly.
1657template <typename R, typename T> auto upper_bound(R &&Range, T &&Value) {
1658 return std::upper_bound(adl_begin(Range), adl_end(Range),
1659 std::forward<T>(Value));
1660}
1661
1662template <typename R, typename T, typename Compare>
1663auto upper_bound(R &&Range, T &&Value, Compare C) {
1664 return std::upper_bound(adl_begin(Range), adl_end(Range),
1665 std::forward<T>(Value), C);
1666}
1667
1668template <typename R>
1669void stable_sort(R &&Range) {
1670 std::stable_sort(adl_begin(Range), adl_end(Range));
1671}
1672
1673template <typename R, typename Compare>
1674void stable_sort(R &&Range, Compare C) {
1675 std::stable_sort(adl_begin(Range), adl_end(Range), C);
1676}
1677
1678/// Binary search for the first iterator in a range where a predicate is false.
1679/// Requires that C is always true below some limit, and always false above it.
1680template <typename R, typename Predicate,
1681 typename Val = decltype(*adl_begin(std::declval<R>()))>
1682auto partition_point(R &&Range, Predicate P) {
1683 return std::partition_point(adl_begin(Range), adl_end(Range), P);
1684}
1685
1686template<typename Range, typename Predicate>
1687auto unique(Range &&R, Predicate P) {
1688 return std::unique(adl_begin(R), adl_end(R), P);
1689}
1690
1691/// Wrapper function around std::equal to detect if all elements
1692/// in a container are same.
1693template <typename R>
1694bool is_splat(R &&Range) {
1695 size_t range_size = size(Range);
1696 return range_size != 0 && (range_size == 1 ||
1697 std::equal(adl_begin(Range) + 1, adl_end(Range), adl_begin(Range)));
1698}
1699
1700/// Provide a container algorithm similar to C++ Library Fundamentals v2's
1701/// `erase_if` which is equivalent to:
1702///
1703/// C.erase(remove_if(C, pred), C.end());
1704///
1705/// This version works for any container with an erase method call accepting
1706/// two iterators.
1707template <typename Container, typename UnaryPredicate>
1708void erase_if(Container &C, UnaryPredicate P) {
1709 C.erase(remove_if(C, P), C.end());
1710}
1711
1712/// Wrapper function to remove a value from a container:
1713///
1714/// C.erase(remove(C.begin(), C.end(), V), C.end());
1715template <typename Container, typename ValueType>
1716void erase_value(Container &C, ValueType V) {
1717 C.erase(std::remove(C.begin(), C.end(), V), C.end());
1718}
1719
1720/// Wrapper function to append a range to a container.
1721///
1722/// C.insert(C.end(), R.begin(), R.end());
1723template <typename Container, typename Range>
1724inline void append_range(Container &C, Range &&R) {
1725 C.insert(C.end(), R.begin(), R.end());
1726}
1727
1728/// Given a sequence container Cont, replace the range [ContIt, ContEnd) with
1729/// the range [ValIt, ValEnd) (which is not from the same container).
1730template<typename Container, typename RandomAccessIterator>
1731void replace(Container &Cont, typename Container::iterator ContIt,
1732 typename Container::iterator ContEnd, RandomAccessIterator ValIt,
1733 RandomAccessIterator ValEnd) {
1734 while (true) {
1735 if (ValIt == ValEnd) {
1736 Cont.erase(ContIt, ContEnd);
1737 return;
1738 } else if (ContIt == ContEnd) {
1739 Cont.insert(ContIt, ValIt, ValEnd);
1740 return;
1741 }
1742 *ContIt++ = *ValIt++;
1743 }
1744}
1745
1746/// Given a sequence container Cont, replace the range [ContIt, ContEnd) with
1747/// the range R.
1748template<typename Container, typename Range = std::initializer_list<
1749 typename Container::value_type>>
1750void replace(Container &Cont, typename Container::iterator ContIt,
1751 typename Container::iterator ContEnd, Range R) {
1752 replace(Cont, ContIt, ContEnd, R.begin(), R.end());
1753}
1754
1755/// An STL-style algorithm similar to std::for_each that applies a second
1756/// functor between every pair of elements.
1757///
1758/// This provides the control flow logic to, for example, print a
1759/// comma-separated list:
1760/// \code
1761/// interleave(names.begin(), names.end(),
1762/// [&](StringRef name) { os << name; },
1763/// [&] { os << ", "; });
1764/// \endcode
1765template <typename ForwardIterator, typename UnaryFunctor,
1766 typename NullaryFunctor,
1767 typename = typename std::enable_if<
1768 !std::is_constructible<StringRef, UnaryFunctor>::value &&
1769 !std::is_constructible<StringRef, NullaryFunctor>::value>::type>
1770inline void interleave(ForwardIterator begin, ForwardIterator end,
1771 UnaryFunctor each_fn, NullaryFunctor between_fn) {
1772 if (begin == end)
1773 return;
1774 each_fn(*begin);
1775 ++begin;
1776 for (; begin != end; ++begin) {
1777 between_fn();
1778 each_fn(*begin);
1779 }
1780}
1781
1782template <typename Container, typename UnaryFunctor, typename NullaryFunctor,
1783 typename = typename std::enable_if<
1784 !std::is_constructible<StringRef, UnaryFunctor>::value &&
1785 !std::is_constructible<StringRef, NullaryFunctor>::value>::type>
1786inline void interleave(const Container &c, UnaryFunctor each_fn,
1787 NullaryFunctor between_fn) {
1788 interleave(c.begin(), c.end(), each_fn, between_fn);
1789}
1790
1791/// Overload of interleave for the common case of string separator.
1792template <typename Container, typename UnaryFunctor, typename StreamT,
1793 typename T = detail::ValueOfRange<Container>>
1794inline void interleave(const Container &c, StreamT &os, UnaryFunctor each_fn,
1795 const StringRef &separator) {
1796 interleave(c.begin(), c.end(), each_fn, [&] { os << separator; });
1797}
1798template <typename Container, typename StreamT,
1799 typename T = detail::ValueOfRange<Container>>
1800inline void interleave(const Container &c, StreamT &os,
1801 const StringRef &separator) {
1802 interleave(
1803 c, os, [&](const T &a) { os << a; }, separator);
1804}
1805
1806template <typename Container, typename UnaryFunctor, typename StreamT,
1807 typename T = detail::ValueOfRange<Container>>
1808inline void interleaveComma(const Container &c, StreamT &os,
1809 UnaryFunctor each_fn) {
1810 interleave(c, os, each_fn, ", ");
1811}
1812template <typename Container, typename StreamT,
1813 typename T = detail::ValueOfRange<Container>>
1814inline void interleaveComma(const Container &c, StreamT &os) {
1815 interleaveComma(c, os, [&](const T &a) { os << a; });
1816}
1817
1818//===----------------------------------------------------------------------===//
1819// Extra additions to <memory>
1820//===----------------------------------------------------------------------===//
1821
1822struct FreeDeleter {
1823 void operator()(void* v) {
1824 ::free(v);
1825 }
1826};
1827
1828template<typename First, typename Second>
1829struct pair_hash {
1830 size_t operator()(const std::pair<First, Second> &P) const {
1831 return std::hash<First>()(P.first) * 31 + std::hash<Second>()(P.second);
1832 }
1833};
1834
1835/// Binary functor that adapts to any other binary functor after dereferencing
1836/// operands.
1837template <typename T> struct deref {
1838 T func;
1839
1840 // Could be further improved to cope with non-derivable functors and
1841 // non-binary functors (should be a variadic template member function
1842 // operator()).
1843 template <typename A, typename B> auto operator()(A &lhs, B &rhs) const {
1844 assert(lhs)(static_cast <bool> (lhs) ? void (0) : __assert_fail ("lhs"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/STLExtras.h"
, 1844, __extension__ __PRETTY_FUNCTION__))
;
1845 assert(rhs)(static_cast <bool> (rhs) ? void (0) : __assert_fail ("rhs"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/STLExtras.h"
, 1845, __extension__ __PRETTY_FUNCTION__))
;
1846 return func(*lhs, *rhs);
1847 }
1848};
1849
1850namespace detail {
1851
1852template <typename R> class enumerator_iter;
1853
1854template <typename R> struct result_pair {
1855 using value_reference =
1856 typename std::iterator_traits<IterOfRange<R>>::reference;
1857
1858 friend class enumerator_iter<R>;
1859
1860 result_pair() = default;
1861 result_pair(std::size_t Index, IterOfRange<R> Iter)
1862 : Index(Index), Iter(Iter) {}
1863
1864 result_pair(const result_pair<R> &Other)
1865 : Index(Other.Index), Iter(Other.Iter) {}
1866 result_pair &operator=(const result_pair &Other) {
1867 Index = Other.Index;
1868 Iter = Other.Iter;
1869 return *this;
1870 }
1871
1872 std::size_t index() const { return Index; }
1873 const value_reference value() const { return *Iter; }
1874 value_reference value() { return *Iter; }
1875
1876private:
1877 std::size_t Index = std::numeric_limits<std::size_t>::max();
1878 IterOfRange<R> Iter;
1879};
1880
1881template <typename R>
1882class enumerator_iter
1883 : public iterator_facade_base<
1884 enumerator_iter<R>, std::forward_iterator_tag, result_pair<R>,
1885 typename std::iterator_traits<IterOfRange<R>>::difference_type,
1886 typename std::iterator_traits<IterOfRange<R>>::pointer,
1887 typename std::iterator_traits<IterOfRange<R>>::reference> {
1888 using result_type = result_pair<R>;
1889
1890public:
1891 explicit enumerator_iter(IterOfRange<R> EndIter)
1892 : Result(std::numeric_limits<size_t>::max(), EndIter) {}
1893
1894 enumerator_iter(std::size_t Index, IterOfRange<R> Iter)
1895 : Result(Index, Iter) {}
1896
1897 result_type &operator*() { return Result; }
1898 const result_type &operator*() const { return Result; }
1899
1900 enumerator_iter &operator++() {
1901 assert(Result.Index != std::numeric_limits<size_t>::max())(static_cast <bool> (Result.Index != std::numeric_limits
<size_t>::max()) ? void (0) : __assert_fail ("Result.Index != std::numeric_limits<size_t>::max()"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/STLExtras.h"
, 1901, __extension__ __PRETTY_FUNCTION__))
;
1902 ++Result.Iter;
1903 ++Result.Index;
1904 return *this;
1905 }
1906
1907 bool operator==(const enumerator_iter &RHS) const {
1908 // Don't compare indices here, only iterators. It's possible for an end
1909 // iterator to have different indices depending on whether it was created
1910 // by calling std::end() versus incrementing a valid iterator.
1911 return Result.Iter == RHS.Result.Iter;
1912 }
1913
1914 enumerator_iter(const enumerator_iter &Other) : Result(Other.Result) {}
1915 enumerator_iter &operator=(const enumerator_iter &Other) {
1916 Result = Other.Result;
1917 return *this;
1918 }
1919
1920private:
1921 result_type Result;
1922};
1923
1924template <typename R> class enumerator {
1925public:
1926 explicit enumerator(R &&Range) : TheRange(std::forward<R>(Range)) {}
1927
1928 enumerator_iter<R> begin() {
1929 return enumerator_iter<R>(0, std::begin(TheRange));
1930 }
1931
1932 enumerator_iter<R> end() {
1933 return enumerator_iter<R>(std::end(TheRange));
1934 }
1935
1936private:
1937 R TheRange;
1938};
1939
1940} // end namespace detail
1941
1942/// Given an input range, returns a new range whose values are are pair (A,B)
1943/// such that A is the 0-based index of the item in the sequence, and B is
1944/// the value from the original sequence. Example:
1945///
1946/// std::vector<char> Items = {'A', 'B', 'C', 'D'};
1947/// for (auto X : enumerate(Items)) {
1948/// printf("Item %d - %c\n", X.index(), X.value());
1949/// }
1950///
1951/// Output:
1952/// Item 0 - A
1953/// Item 1 - B
1954/// Item 2 - C
1955/// Item 3 - D
1956///
1957template <typename R> detail::enumerator<R> enumerate(R &&TheRange) {
1958 return detail::enumerator<R>(std::forward<R>(TheRange));
1959}
1960
1961namespace detail {
1962
1963template <typename F, typename Tuple, std::size_t... I>
1964decltype(auto) apply_tuple_impl(F &&f, Tuple &&t, std::index_sequence<I...>) {
1965 return std::forward<F>(f)(std::get<I>(std::forward<Tuple>(t))...);
1966}
1967
1968} // end namespace detail
1969
1970/// Given an input tuple (a1, a2, ..., an), pass the arguments of the
1971/// tuple variadically to f as if by calling f(a1, a2, ..., an) and
1972/// return the result.
1973template <typename F, typename Tuple>
1974decltype(auto) apply_tuple(F &&f, Tuple &&t) {
1975 using Indices = std::make_index_sequence<
1976 std::tuple_size<typename std::decay<Tuple>::type>::value>;
1977
1978 return detail::apply_tuple_impl(std::forward<F>(f), std::forward<Tuple>(t),
1979 Indices{});
1980}
1981
1982/// Return true if the sequence [Begin, End) has exactly N items. Runs in O(N)
1983/// time. Not meant for use with random-access iterators.
1984/// Can optionally take a predicate to filter lazily some items.
1985template <typename IterTy,
1986 typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)>
1987bool hasNItems(
1988 IterTy &&Begin, IterTy &&End, unsigned N,
1989 Pred &&ShouldBeCounted =
1990 [](const decltype(*std::declval<IterTy>()) &) { return true; },
1991 std::enable_if_t<
1992 !std::is_base_of<std::random_access_iterator_tag,
1993 typename std::iterator_traits<std::remove_reference_t<
1994 decltype(Begin)>>::iterator_category>::value,
1995 void> * = nullptr) {
1996 for (; N; ++Begin) {
1997 if (Begin == End)
1998 return false; // Too few.
1999 N -= ShouldBeCounted(*Begin);
2000 }
2001 for (; Begin != End; ++Begin)
2002 if (ShouldBeCounted(*Begin))
2003 return false; // Too many.
2004 return true;
2005}
2006
2007/// Return true if the sequence [Begin, End) has N or more items. Runs in O(N)
2008/// time. Not meant for use with random-access iterators.
2009/// Can optionally take a predicate to lazily filter some items.
2010template <typename IterTy,
2011 typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)>
2012bool hasNItemsOrMore(
2013 IterTy &&Begin, IterTy &&End, unsigned N,
2014 Pred &&ShouldBeCounted =
2015 [](const decltype(*std::declval<IterTy>()) &) { return true; },
2016 std::enable_if_t<
2017 !std::is_base_of<std::random_access_iterator_tag,
2018 typename std::iterator_traits<std::remove_reference_t<
2019 decltype(Begin)>>::iterator_category>::value,
2020 void> * = nullptr) {
2021 for (; N; ++Begin) {
2022 if (Begin == End)
2023 return false; // Too few.
2024 N -= ShouldBeCounted(*Begin);
2025 }
2026 return true;
2027}
2028
2029/// Returns true if the sequence [Begin, End) has N or less items. Can
2030/// optionally take a predicate to lazily filter some items.
2031template <typename IterTy,
2032 typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)>
2033bool hasNItemsOrLess(
2034 IterTy &&Begin, IterTy &&End, unsigned N,
2035 Pred &&ShouldBeCounted = [](const decltype(*std::declval<IterTy>()) &) {
2036 return true;
2037 }) {
2038 assert(N != std::numeric_limits<unsigned>::max())(static_cast <bool> (N != std::numeric_limits<unsigned
>::max()) ? void (0) : __assert_fail ("N != std::numeric_limits<unsigned>::max()"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/ADT/STLExtras.h"
, 2038, __extension__ __PRETTY_FUNCTION__))
;
2039 return !hasNItemsOrMore(Begin, End, N + 1, ShouldBeCounted);
2040}
2041
2042/// Returns true if the given container has exactly N items
2043template <typename ContainerTy> bool hasNItems(ContainerTy &&C, unsigned N) {
2044 return hasNItems(std::begin(C), std::end(C), N);
2045}
2046
2047/// Returns true if the given container has N or more items
2048template <typename ContainerTy>
2049bool hasNItemsOrMore(ContainerTy &&C, unsigned N) {
2050 return hasNItemsOrMore(std::begin(C), std::end(C), N);
2051}
2052
2053/// Returns true if the given container has N or less items
2054template <typename ContainerTy>
2055bool hasNItemsOrLess(ContainerTy &&C, unsigned N) {
2056 return hasNItemsOrLess(std::begin(C), std::end(C), N);
2057}
2058
2059/// Returns a raw pointer that represents the same address as the argument.
2060///
2061/// This implementation can be removed once we move to C++20 where it's defined
2062/// as std::to_address().
2063///
2064/// The std::pointer_traits<>::to_address(p) variations of these overloads has
2065/// not been implemented.
2066template <class Ptr> auto to_address(const Ptr &P) { return P.operator->(); }
2067template <class T> constexpr T *to_address(T *P) { return P; }
2068
2069} // end namespace llvm
2070
2071#endif // LLVM_ADT_STLEXTRAS_H

/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Type.h

1//===- llvm/Type.h - Classes for handling data types ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the declaration of the Type class. For more "Type"
10// stuff, look in DerivedTypes.h.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_IR_TYPE_H
15#define LLVM_IR_TYPE_H
16
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/SmallPtrSet.h"
20#include "llvm/Support/CBindingWrapping.h"
21#include "llvm/Support/Casting.h"
22#include "llvm/Support/Compiler.h"
23#include "llvm/Support/ErrorHandling.h"
24#include "llvm/Support/TypeSize.h"
25#include <cassert>
26#include <cstdint>
27#include <iterator>
28
29namespace llvm {
30
31class IntegerType;
32class LLVMContext;
33class PointerType;
34class raw_ostream;
35class StringRef;
36
37/// The instances of the Type class are immutable: once they are created,
38/// they are never changed. Also note that only one instance of a particular
39/// type is ever created. Thus seeing if two types are equal is a matter of
40/// doing a trivial pointer comparison. To enforce that no two equal instances
41/// are created, Type instances can only be created via static factory methods
42/// in class Type and in derived classes. Once allocated, Types are never
43/// free'd.
44///
45class Type {
46public:
47 //===--------------------------------------------------------------------===//
48 /// Definitions of all of the base types for the Type system. Based on this
49 /// value, you can cast to a class defined in DerivedTypes.h.
50 /// Note: If you add an element to this, you need to add an element to the
51 /// Type::getPrimitiveType function, or else things will break!
52 /// Also update LLVMTypeKind and LLVMGetTypeKind () in the C binding.
53 ///
54 enum TypeID {
55 // PrimitiveTypes
56 HalfTyID = 0, ///< 16-bit floating point type
57 BFloatTyID, ///< 16-bit floating point type (7-bit significand)
58 FloatTyID, ///< 32-bit floating point type
59 DoubleTyID, ///< 64-bit floating point type
60 X86_FP80TyID, ///< 80-bit floating point type (X87)
61 FP128TyID, ///< 128-bit floating point type (112-bit significand)
62 PPC_FP128TyID, ///< 128-bit floating point type (two 64-bits, PowerPC)
63 VoidTyID, ///< type with no size
64 LabelTyID, ///< Labels
65 MetadataTyID, ///< Metadata
66 X86_MMXTyID, ///< MMX vectors (64 bits, X86 specific)
67 X86_AMXTyID, ///< AMX vectors (8192 bits, X86 specific)
68 TokenTyID, ///< Tokens
69
70 // Derived types... see DerivedTypes.h file.
71 IntegerTyID, ///< Arbitrary bit width integers
72 FunctionTyID, ///< Functions
73 PointerTyID, ///< Pointers
74 StructTyID, ///< Structures
75 ArrayTyID, ///< Arrays
76 FixedVectorTyID, ///< Fixed width SIMD vector type
77 ScalableVectorTyID ///< Scalable SIMD vector type
78 };
79
80private:
81 /// This refers to the LLVMContext in which this type was uniqued.
82 LLVMContext &Context;
83
84 TypeID ID : 8; // The current base type of this type.
85 unsigned SubclassData : 24; // Space for subclasses to store data.
86 // Note that this should be synchronized with
87 // MAX_INT_BITS value in IntegerType class.
88
89protected:
90 friend class LLVMContextImpl;
91
92 explicit Type(LLVMContext &C, TypeID tid)
93 : Context(C), ID(tid), SubclassData(0) {}
94 ~Type() = default;
95
96 unsigned getSubclassData() const { return SubclassData; }
97
98 void setSubclassData(unsigned val) {
99 SubclassData = val;
100 // Ensure we don't have any accidental truncation.
101 assert(getSubclassData() == val && "Subclass data too large for field")(static_cast <bool> (getSubclassData() == val &&
"Subclass data too large for field") ? void (0) : __assert_fail
("getSubclassData() == val && \"Subclass data too large for field\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Type.h"
, 101, __extension__ __PRETTY_FUNCTION__))
;
102 }
103
104 /// Keeps track of how many Type*'s there are in the ContainedTys list.
105 unsigned NumContainedTys = 0;
106
107 /// A pointer to the array of Types contained by this Type. For example, this
108 /// includes the arguments of a function type, the elements of a structure,
109 /// the pointee of a pointer, the element type of an array, etc. This pointer
110 /// may be 0 for types that don't contain other types (Integer, Double,
111 /// Float).
112 Type * const *ContainedTys = nullptr;
113
114public:
115 /// Print the current type.
116 /// Omit the type details if \p NoDetails == true.
117 /// E.g., let %st = type { i32, i16 }
118 /// When \p NoDetails is true, we only print %st.
119 /// Put differently, \p NoDetails prints the type as if
120 /// inlined with the operands when printing an instruction.
121 void print(raw_ostream &O, bool IsForDebug = false,
122 bool NoDetails = false) const;
123
124 void dump() const;
125
126 /// Return the LLVMContext in which this type was uniqued.
127 LLVMContext &getContext() const { return Context; }
128
129 //===--------------------------------------------------------------------===//
130 // Accessors for working with types.
131 //
132
133 /// Return the type id for the type. This will return one of the TypeID enum
134 /// elements defined above.
135 TypeID getTypeID() const { return ID; }
136
137 /// Return true if this is 'void'.
138 bool isVoidTy() const { return getTypeID() == VoidTyID; }
139
140 /// Return true if this is 'half', a 16-bit IEEE fp type.
141 bool isHalfTy() const { return getTypeID() == HalfTyID; }
142
143 /// Return true if this is 'bfloat', a 16-bit bfloat type.
144 bool isBFloatTy() const { return getTypeID() == BFloatTyID; }
145
146 /// Return true if this is 'float', a 32-bit IEEE fp type.
147 bool isFloatTy() const { return getTypeID() == FloatTyID; }
148
149 /// Return true if this is 'double', a 64-bit IEEE fp type.
150 bool isDoubleTy() const { return getTypeID() == DoubleTyID; }
151
152 /// Return true if this is x86 long double.
153 bool isX86_FP80Ty() const { return getTypeID() == X86_FP80TyID; }
154
155 /// Return true if this is 'fp128'.
156 bool isFP128Ty() const { return getTypeID() == FP128TyID; }
157
158 /// Return true if this is powerpc long double.
159 bool isPPC_FP128Ty() const { return getTypeID() == PPC_FP128TyID; }
160
161 /// Return true if this is one of the six floating-point types
162 bool isFloatingPointTy() const {
163 return getTypeID() == HalfTyID || getTypeID() == BFloatTyID ||
164 getTypeID() == FloatTyID || getTypeID() == DoubleTyID ||
165 getTypeID() == X86_FP80TyID || getTypeID() == FP128TyID ||
166 getTypeID() == PPC_FP128TyID;
167 }
168
169 const fltSemantics &getFltSemantics() const {
170 switch (getTypeID()) {
171 case HalfTyID: return APFloat::IEEEhalf();
172 case BFloatTyID: return APFloat::BFloat();
173 case FloatTyID: return APFloat::IEEEsingle();
174 case DoubleTyID: return APFloat::IEEEdouble();
175 case X86_FP80TyID: return APFloat::x87DoubleExtended();
176 case FP128TyID: return APFloat::IEEEquad();
177 case PPC_FP128TyID: return APFloat::PPCDoubleDouble();
178 default: llvm_unreachable("Invalid floating type")::llvm::llvm_unreachable_internal("Invalid floating type", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Type.h"
, 178)
;
179 }
180 }
181
182 /// Return true if this is X86 MMX.
183 bool isX86_MMXTy() const { return getTypeID() == X86_MMXTyID; }
184
185 /// Return true if this is X86 AMX.
186 bool isX86_AMXTy() const { return getTypeID() == X86_AMXTyID; }
187
188 /// Return true if this is a FP type or a vector of FP.
189 bool isFPOrFPVectorTy() const { return getScalarType()->isFloatingPointTy(); }
190
191 /// Return true if this is 'label'.
192 bool isLabelTy() const { return getTypeID() == LabelTyID; }
193
194 /// Return true if this is 'metadata'.
195 bool isMetadataTy() const { return getTypeID() == MetadataTyID; }
196
197 /// Return true if this is 'token'.
198 bool isTokenTy() const { return getTypeID() == TokenTyID; }
199
200 /// True if this is an instance of IntegerType.
201 bool isIntegerTy() const { return getTypeID() == IntegerTyID; }
202
203 /// Return true if this is an IntegerType of the given width.
204 bool isIntegerTy(unsigned Bitwidth) const;
205
206 /// Return true if this is an integer type or a vector of integer types.
207 bool isIntOrIntVectorTy() const { return getScalarType()->isIntegerTy(); }
208
209 /// Return true if this is an integer type or a vector of integer types of
210 /// the given width.
211 bool isIntOrIntVectorTy(unsigned BitWidth) const {
212 return getScalarType()->isIntegerTy(BitWidth);
213 }
214
215 /// Return true if this is an integer type or a pointer type.
216 bool isIntOrPtrTy() const { return isIntegerTy() || isPointerTy(); }
217
218 /// True if this is an instance of FunctionType.
219 bool isFunctionTy() const { return getTypeID() == FunctionTyID; }
220
221 /// True if this is an instance of StructType.
222 bool isStructTy() const { return getTypeID() == StructTyID; }
223
224 /// True if this is an instance of ArrayType.
225 bool isArrayTy() const { return getTypeID() == ArrayTyID; }
226
227 /// True if this is an instance of PointerType.
228 bool isPointerTy() const { return getTypeID() == PointerTyID; }
229
230 /// True if this is an instance of an opaque PointerType.
231 bool isOpaquePointerTy() const;
232
233 /// Return true if this is a pointer type or a vector of pointer types.
234 bool isPtrOrPtrVectorTy() const { return getScalarType()->isPointerTy(); }
235
236 /// True if this is an instance of VectorType.
237 inline bool isVectorTy() const {
238 return getTypeID() == ScalableVectorTyID || getTypeID() == FixedVectorTyID;
239 }
240
241 /// Return true if this type could be converted with a lossless BitCast to
242 /// type 'Ty'. For example, i8* to i32*. BitCasts are valid for types of the
243 /// same size only where no re-interpretation of the bits is done.
244 /// Determine if this type could be losslessly bitcast to Ty
245 bool canLosslesslyBitCastTo(Type *Ty) const;
246
247 /// Return true if this type is empty, that is, it has no elements or all of
248 /// its elements are empty.
249 bool isEmptyTy() const;
250
251 /// Return true if the type is "first class", meaning it is a valid type for a
252 /// Value.
253 bool isFirstClassType() const {
254 return getTypeID() != FunctionTyID && getTypeID() != VoidTyID;
255 }
256
257 /// Return true if the type is a valid type for a register in codegen. This
258 /// includes all first-class types except struct and array types.
259 bool isSingleValueType() const {
260 return isFloatingPointTy() || isX86_MMXTy() || isIntegerTy() ||
261 isPointerTy() || isVectorTy() || isX86_AMXTy();
262 }
263
264 /// Return true if the type is an aggregate type. This means it is valid as
265 /// the first operand of an insertvalue or extractvalue instruction. This
266 /// includes struct and array types, but does not include vector types.
267 bool isAggregateType() const {
268 return getTypeID() == StructTyID || getTypeID() == ArrayTyID;
58
Assuming the condition is false
59
Assuming the condition is false
60
Returning zero, which participates in a condition later
269 }
270
271 /// Return true if it makes sense to take the size of this type. To get the
272 /// actual size for a particular target, it is reasonable to use the
273 /// DataLayout subsystem to do this.
274 bool isSized(SmallPtrSetImpl<Type*> *Visited = nullptr) const {
275 // If it's a primitive, it is always sized.
276 if (getTypeID() == IntegerTyID || isFloatingPointTy() ||
277 getTypeID() == PointerTyID || getTypeID() == X86_MMXTyID ||
278 getTypeID() == X86_AMXTyID)
279 return true;
280 // If it is not something that can have a size (e.g. a function or label),
281 // it doesn't have a size.
282 if (getTypeID() != StructTyID && getTypeID() != ArrayTyID && !isVectorTy())
283 return false;
284 // Otherwise we have to try harder to decide.
285 return isSizedDerivedType(Visited);
286 }
287
288 /// Return the basic size of this type if it is a primitive type. These are
289 /// fixed by LLVM and are not target-dependent.
290 /// This will return zero if the type does not have a size or is not a
291 /// primitive type.
292 ///
293 /// If this is a scalable vector type, the scalable property will be set and
294 /// the runtime size will be a positive integer multiple of the base size.
295 ///
296 /// Note that this may not reflect the size of memory allocated for an
297 /// instance of the type or the number of bytes that are written when an
298 /// instance of the type is stored to memory. The DataLayout class provides
299 /// additional query functions to provide this information.
300 ///
301 TypeSize getPrimitiveSizeInBits() const LLVM_READONLY__attribute__((__pure__));
302
303 /// If this is a vector type, return the getPrimitiveSizeInBits value for the
304 /// element type. Otherwise return the getPrimitiveSizeInBits value for this
305 /// type.
306 unsigned getScalarSizeInBits() const LLVM_READONLY__attribute__((__pure__));
307
308 /// Return the width of the mantissa of this type. This is only valid on
309 /// floating-point types. If the FP type does not have a stable mantissa (e.g.
310 /// ppc long double), this method returns -1.
311 int getFPMantissaWidth() const;
312
313 /// Return whether the type is IEEE compatible, as defined by the eponymous
314 /// method in APFloat.
315 bool isIEEE() const { return APFloat::getZero(getFltSemantics()).isIEEE(); }
316
317 /// If this is a vector type, return the element type, otherwise return
318 /// 'this'.
319 inline Type *getScalarType() const {
320 if (isVectorTy())
321 return getContainedType(0);
322 return const_cast<Type *>(this);
323 }
324
325 //===--------------------------------------------------------------------===//
326 // Type Iteration support.
327 //
328 using subtype_iterator = Type * const *;
329
330 subtype_iterator subtype_begin() const { return ContainedTys; }
331 subtype_iterator subtype_end() const { return &ContainedTys[NumContainedTys];}
332 ArrayRef<Type*> subtypes() const {
333 return makeArrayRef(subtype_begin(), subtype_end());
334 }
335
336 using subtype_reverse_iterator = std::reverse_iterator<subtype_iterator>;
337
338 subtype_reverse_iterator subtype_rbegin() const {
339 return subtype_reverse_iterator(subtype_end());
340 }
341 subtype_reverse_iterator subtype_rend() const {
342 return subtype_reverse_iterator(subtype_begin());
343 }
344
345 /// This method is used to implement the type iterator (defined at the end of
346 /// the file). For derived types, this returns the types 'contained' in the
347 /// derived type.
348 Type *getContainedType(unsigned i) const {
349 assert(i < NumContainedTys && "Index out of range!")(static_cast <bool> (i < NumContainedTys && "Index out of range!"
) ? void (0) : __assert_fail ("i < NumContainedTys && \"Index out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Type.h"
, 349, __extension__ __PRETTY_FUNCTION__))
;
350 return ContainedTys[i];
351 }
352
353 /// Return the number of types in the derived type.
354 unsigned getNumContainedTypes() const { return NumContainedTys; }
355
356 //===--------------------------------------------------------------------===//
357 // Helper methods corresponding to subclass methods. This forces a cast to
358 // the specified subclass and calls its accessor. "getArrayNumElements" (for
359 // example) is shorthand for cast<ArrayType>(Ty)->getNumElements(). This is
360 // only intended to cover the core methods that are frequently used, helper
361 // methods should not be added here.
362
363 inline unsigned getIntegerBitWidth() const;
364
365 inline Type *getFunctionParamType(unsigned i) const;
366 inline unsigned getFunctionNumParams() const;
367 inline bool isFunctionVarArg() const;
368
369 inline StringRef getStructName() const;
370 inline unsigned getStructNumElements() const;
371 inline Type *getStructElementType(unsigned N) const;
372
373 inline uint64_t getArrayNumElements() const;
374
375 Type *getArrayElementType() const {
376 assert(getTypeID() == ArrayTyID)(static_cast <bool> (getTypeID() == ArrayTyID) ? void (
0) : __assert_fail ("getTypeID() == ArrayTyID", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Type.h"
, 376, __extension__ __PRETTY_FUNCTION__))
;
377 return ContainedTys[0];
378 }
379
380 Type *getPointerElementType() const {
381 assert(getTypeID() == PointerTyID)(static_cast <bool> (getTypeID() == PointerTyID) ? void
(0) : __assert_fail ("getTypeID() == PointerTyID", "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Type.h"
, 381, __extension__ __PRETTY_FUNCTION__))
;
382 return ContainedTys[0];
383 }
384
385 /// Given vector type, change the element type,
386 /// whilst keeping the old number of elements.
387 /// For non-vectors simply returns \p EltTy.
388 inline Type *getWithNewType(Type *EltTy) const;
389
390 /// Given an integer or vector type, change the lane bitwidth to NewBitwidth,
391 /// whilst keeping the old number of lanes.
392 inline Type *getWithNewBitWidth(unsigned NewBitWidth) const;
393
394 /// Given scalar/vector integer type, returns a type with elements twice as
395 /// wide as in the original type. For vectors, preserves element count.
396 inline Type *getExtendedType() const;
397
398 /// Get the address space of this pointer or pointer vector type.
399 inline unsigned getPointerAddressSpace() const;
400
401 //===--------------------------------------------------------------------===//
402 // Static members exported by the Type class itself. Useful for getting
403 // instances of Type.
404 //
405
406 /// Return a type based on an identifier.
407 static Type *getPrimitiveType(LLVMContext &C, TypeID IDNumber);
408
409 //===--------------------------------------------------------------------===//
410 // These are the builtin types that are always available.
411 //
412 static Type *getVoidTy(LLVMContext &C);
413 static Type *getLabelTy(LLVMContext &C);
414 static Type *getHalfTy(LLVMContext &C);
415 static Type *getBFloatTy(LLVMContext &C);
416 static Type *getFloatTy(LLVMContext &C);
417 static Type *getDoubleTy(LLVMContext &C);
418 static Type *getMetadataTy(LLVMContext &C);
419 static Type *getX86_FP80Ty(LLVMContext &C);
420 static Type *getFP128Ty(LLVMContext &C);
421 static Type *getPPC_FP128Ty(LLVMContext &C);
422 static Type *getX86_MMXTy(LLVMContext &C);
423 static Type *getX86_AMXTy(LLVMContext &C);
424 static Type *getTokenTy(LLVMContext &C);
425 static IntegerType *getIntNTy(LLVMContext &C, unsigned N);
426 static IntegerType *getInt1Ty(LLVMContext &C);
427 static IntegerType *getInt8Ty(LLVMContext &C);
428 static IntegerType *getInt16Ty(LLVMContext &C);
429 static IntegerType *getInt32Ty(LLVMContext &C);
430 static IntegerType *getInt64Ty(LLVMContext &C);
431 static IntegerType *getInt128Ty(LLVMContext &C);
432 template <typename ScalarTy> static Type *getScalarTy(LLVMContext &C) {
433 int noOfBits = sizeof(ScalarTy) * CHAR_BIT8;
434 if (std::is_integral<ScalarTy>::value) {
435 return (Type*) Type::getIntNTy(C, noOfBits);
436 } else if (std::is_floating_point<ScalarTy>::value) {
437 switch (noOfBits) {
438 case 32:
439 return Type::getFloatTy(C);
440 case 64:
441 return Type::getDoubleTy(C);
442 }
443 }
444 llvm_unreachable("Unsupported type in Type::getScalarTy")::llvm::llvm_unreachable_internal("Unsupported type in Type::getScalarTy"
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Type.h"
, 444)
;
445 }
446 static Type *getFloatingPointTy(LLVMContext &C, const fltSemantics &S) {
447 Type *Ty;
448 if (&S == &APFloat::IEEEhalf())
449 Ty = Type::getHalfTy(C);
450 else if (&S == &APFloat::BFloat())
451 Ty = Type::getBFloatTy(C);
452 else if (&S == &APFloat::IEEEsingle())
453 Ty = Type::getFloatTy(C);
454 else if (&S == &APFloat::IEEEdouble())
455 Ty = Type::getDoubleTy(C);
456 else if (&S == &APFloat::x87DoubleExtended())
457 Ty = Type::getX86_FP80Ty(C);
458 else if (&S == &APFloat::IEEEquad())
459 Ty = Type::getFP128Ty(C);
460 else {
461 assert(&S == &APFloat::PPCDoubleDouble() && "Unknown FP format")(static_cast <bool> (&S == &APFloat::PPCDoubleDouble
() && "Unknown FP format") ? void (0) : __assert_fail
("&S == &APFloat::PPCDoubleDouble() && \"Unknown FP format\""
, "/build/llvm-toolchain-snapshot-13~++20210726100616+dead50d4427c/llvm/include/llvm/IR/Type.h"
, 461, __extension__ __PRETTY_FUNCTION__))
;
462 Ty = Type::getPPC_FP128Ty(C);
463 }
464 return Ty;
465 }
466
467 //===--------------------------------------------------------------------===//
468 // Convenience methods for getting pointer types with one of the above builtin
469 // types as pointee.
470 //
471 static PointerType *getHalfPtrTy(LLVMContext &C, unsigned AS = 0);
472 static PointerType *getBFloatPtrTy(LLVMContext &C, unsigned AS = 0);
473 static PointerType *getFloatPtrTy(LLVMContext &C, unsigned AS = 0);
474 static PointerType *getDoublePtrTy(LLVMContext &C, unsigned AS = 0);
475 static PointerType *getX86_FP80PtrTy(LLVMContext &C, unsigned AS = 0);
476 static PointerType *getFP128PtrTy(LLVMContext &C, unsigned AS = 0);
477 static PointerType *getPPC_FP128PtrTy(LLVMContext &C, unsigned AS = 0);
478 static PointerType *getX86_MMXPtrTy(LLVMContext &C, unsigned AS = 0);
479 static PointerType *getX86_AMXPtrTy(LLVMContext &C, unsigned AS = 0);
480 static PointerType *getIntNPtrTy(LLVMContext &C, unsigned N, unsigned AS = 0);
481 static PointerType *getInt1PtrTy(LLVMContext &C, unsigned AS = 0);
482 static PointerType *getInt8PtrTy(LLVMContext &C, unsigned AS = 0);
483 static PointerType *getInt16PtrTy(LLVMContext &C, unsigned AS = 0);
484 static PointerType *getInt32PtrTy(LLVMContext &C, unsigned AS = 0);
485 static PointerType *getInt64PtrTy(LLVMContext &C, unsigned AS = 0);
486
487 /// Return a pointer to the current type. This is equivalent to
488 /// PointerType::get(Foo, AddrSpace).
489 /// TODO: Remove this after opaque pointer transition is complete.
490 PointerType *getPointerTo(unsigned AddrSpace = 0) const;
491
492private:
493 /// Derived types like structures and arrays are sized iff all of the members
494 /// of the type are sized as well. Since asking for their size is relatively
495 /// uncommon, move this operation out-of-line.
496 bool isSizedDerivedType(SmallPtrSetImpl<Type*> *Visited = nullptr) const;
497};
498
499// Printing of types.
500inline raw_ostream &operator<<(raw_ostream &OS, const Type &T) {
501 T.print(OS);
502 return OS;
503}
504
505// allow isa<PointerType>(x) to work without DerivedTypes.h included.
506template <> struct isa_impl<PointerType, Type> {
507 static inline bool doit(const Type &Ty) {
508 return Ty.getTypeID() == Type::PointerTyID;
509 }
510};
511
512// Create wrappers for C Binding types (see CBindingWrapping.h).
513DEFINE_ISA_CONVERSION_FUNCTIONS(Type, LLVMTypeRef)inline Type *unwrap(LLVMTypeRef P) { return reinterpret_cast<
Type*>(P); } inline LLVMTypeRef wrap(const Type *P) { return
reinterpret_cast<LLVMTypeRef>(const_cast<Type*>(
P)); } template<typename T> inline T *unwrap(LLVMTypeRef
P) { return cast<T>(unwrap(P)); }
514
515/* Specialized opaque type conversions.
516 */
517inline Type **unwrap(LLVMTypeRef* Tys) {
518 return reinterpret_cast<Type**>(Tys);
519}
520
521inline LLVMTypeRef *wrap(Type **Tys) {
522 return reinterpret_cast<LLVMTypeRef*>(const_cast<Type**>(Tys));
523}
524
525} // end namespace llvm
526
527#endif // LLVM_IR_TYPE_H