Bug Summary

File:llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
Warning:line 758, column 29
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name MemCpyOptimizer.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/build-llvm/lib/Transforms/Scalar -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/build-llvm/lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/build-llvm/lib/Transforms/Scalar -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-06-21-164211-33944-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp

/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp

1//===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass performs various transformations related to eliminating memcpy
10// calls, or transforming sets of stores into memset's.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Transforms/Scalar/MemCpyOptimizer.h"
15#include "llvm/ADT/DenseSet.h"
16#include "llvm/ADT/None.h"
17#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/SmallVector.h"
19#include "llvm/ADT/Statistic.h"
20#include "llvm/ADT/iterator_range.h"
21#include "llvm/Analysis/AliasAnalysis.h"
22#include "llvm/Analysis/AssumptionCache.h"
23#include "llvm/Analysis/GlobalsModRef.h"
24#include "llvm/Analysis/Loads.h"
25#include "llvm/Analysis/MemoryDependenceAnalysis.h"
26#include "llvm/Analysis/MemoryLocation.h"
27#include "llvm/Analysis/MemorySSA.h"
28#include "llvm/Analysis/MemorySSAUpdater.h"
29#include "llvm/Analysis/TargetLibraryInfo.h"
30#include "llvm/Analysis/ValueTracking.h"
31#include "llvm/IR/Argument.h"
32#include "llvm/IR/BasicBlock.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/DerivedTypes.h"
36#include "llvm/IR/Dominators.h"
37#include "llvm/IR/Function.h"
38#include "llvm/IR/GetElementPtrTypeIterator.h"
39#include "llvm/IR/GlobalVariable.h"
40#include "llvm/IR/IRBuilder.h"
41#include "llvm/IR/InstrTypes.h"
42#include "llvm/IR/Instruction.h"
43#include "llvm/IR/Instructions.h"
44#include "llvm/IR/IntrinsicInst.h"
45#include "llvm/IR/Intrinsics.h"
46#include "llvm/IR/LLVMContext.h"
47#include "llvm/IR/Module.h"
48#include "llvm/IR/Operator.h"
49#include "llvm/IR/PassManager.h"
50#include "llvm/IR/Type.h"
51#include "llvm/IR/User.h"
52#include "llvm/IR/Value.h"
53#include "llvm/InitializePasses.h"
54#include "llvm/Pass.h"
55#include "llvm/Support/Casting.h"
56#include "llvm/Support/Debug.h"
57#include "llvm/Support/MathExtras.h"
58#include "llvm/Support/raw_ostream.h"
59#include "llvm/Transforms/Scalar.h"
60#include "llvm/Transforms/Utils/Local.h"
61#include <algorithm>
62#include <cassert>
63#include <cstdint>
64#include <utility>
65
66using namespace llvm;
67
68#define DEBUG_TYPE"memcpyopt" "memcpyopt"
69
70static cl::opt<bool>
71 EnableMemorySSA("enable-memcpyopt-memoryssa", cl::init(true), cl::Hidden,
72 cl::desc("Use MemorySSA-backed MemCpyOpt."));
73
74STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted")static llvm::Statistic NumMemCpyInstr = {"memcpyopt", "NumMemCpyInstr"
, "Number of memcpy instructions deleted"}
;
75STATISTIC(NumMemSetInfer, "Number of memsets inferred")static llvm::Statistic NumMemSetInfer = {"memcpyopt", "NumMemSetInfer"
, "Number of memsets inferred"}
;
76STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy")static llvm::Statistic NumMoveToCpy = {"memcpyopt", "NumMoveToCpy"
, "Number of memmoves converted to memcpy"}
;
77STATISTIC(NumCpyToSet, "Number of memcpys converted to memset")static llvm::Statistic NumCpyToSet = {"memcpyopt", "NumCpyToSet"
, "Number of memcpys converted to memset"}
;
78STATISTIC(NumCallSlot, "Number of call slot optimizations performed")static llvm::Statistic NumCallSlot = {"memcpyopt", "NumCallSlot"
, "Number of call slot optimizations performed"}
;
79
80namespace {
81
82/// Represents a range of memset'd bytes with the ByteVal value.
83/// This allows us to analyze stores like:
84/// store 0 -> P+1
85/// store 0 -> P+0
86/// store 0 -> P+3
87/// store 0 -> P+2
88/// which sometimes happens with stores to arrays of structs etc. When we see
89/// the first store, we make a range [1, 2). The second store extends the range
90/// to [0, 2). The third makes a new range [2, 3). The fourth store joins the
91/// two ranges into [0, 3) which is memset'able.
92struct MemsetRange {
93 // Start/End - A semi range that describes the span that this range covers.
94 // The range is closed at the start and open at the end: [Start, End).
95 int64_t Start, End;
96
97 /// StartPtr - The getelementptr instruction that points to the start of the
98 /// range.
99 Value *StartPtr;
100
101 /// Alignment - The known alignment of the first store.
102 unsigned Alignment;
103
104 /// TheStores - The actual stores that make up this range.
105 SmallVector<Instruction*, 16> TheStores;
106
107 bool isProfitableToUseMemset(const DataLayout &DL) const;
108};
109
110} // end anonymous namespace
111
112bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const {
113 // If we found more than 4 stores to merge or 16 bytes, use memset.
114 if (TheStores.size() >= 4 || End-Start >= 16) return true;
115
116 // If there is nothing to merge, don't do anything.
117 if (TheStores.size() < 2) return false;
118
119 // If any of the stores are a memset, then it is always good to extend the
120 // memset.
121 for (Instruction *SI : TheStores)
122 if (!isa<StoreInst>(SI))
123 return true;
124
125 // Assume that the code generator is capable of merging pairs of stores
126 // together if it wants to.
127 if (TheStores.size() == 2) return false;
128
129 // If we have fewer than 8 stores, it can still be worthwhile to do this.
130 // For example, merging 4 i8 stores into an i32 store is useful almost always.
131 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
132 // memset will be split into 2 32-bit stores anyway) and doing so can
133 // pessimize the llvm optimizer.
134 //
135 // Since we don't have perfect knowledge here, make some assumptions: assume
136 // the maximum GPR width is the same size as the largest legal integer
137 // size. If so, check to see whether we will end up actually reducing the
138 // number of stores used.
139 unsigned Bytes = unsigned(End-Start);
140 unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8;
141 if (MaxIntSize == 0)
142 MaxIntSize = 1;
143 unsigned NumPointerStores = Bytes / MaxIntSize;
144
145 // Assume the remaining bytes if any are done a byte at a time.
146 unsigned NumByteStores = Bytes % MaxIntSize;
147
148 // If we will reduce the # stores (according to this heuristic), do the
149 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
150 // etc.
151 return TheStores.size() > NumPointerStores+NumByteStores;
152}
153
154namespace {
155
156class MemsetRanges {
157 using range_iterator = SmallVectorImpl<MemsetRange>::iterator;
158
159 /// A sorted list of the memset ranges.
160 SmallVector<MemsetRange, 8> Ranges;
161
162 const DataLayout &DL;
163
164public:
165 MemsetRanges(const DataLayout &DL) : DL(DL) {}
166
167 using const_iterator = SmallVectorImpl<MemsetRange>::const_iterator;
168
169 const_iterator begin() const { return Ranges.begin(); }
170 const_iterator end() const { return Ranges.end(); }
171 bool empty() const { return Ranges.empty(); }
172
173 void addInst(int64_t OffsetFromFirst, Instruction *Inst) {
174 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
175 addStore(OffsetFromFirst, SI);
176 else
177 addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst));
178 }
179
180 void addStore(int64_t OffsetFromFirst, StoreInst *SI) {
181 int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType());
182
183 addRange(OffsetFromFirst, StoreSize, SI->getPointerOperand(),
184 SI->getAlign().value(), SI);
185 }
186
187 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) {
188 int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue();
189 addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getDestAlignment(), MSI);
190 }
191
192 void addRange(int64_t Start, int64_t Size, Value *Ptr,
193 unsigned Alignment, Instruction *Inst);
194};
195
196} // end anonymous namespace
197
198/// Add a new store to the MemsetRanges data structure. This adds a
199/// new range for the specified store at the specified offset, merging into
200/// existing ranges as appropriate.
201void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr,
202 unsigned Alignment, Instruction *Inst) {
203 int64_t End = Start+Size;
204
205 range_iterator I = partition_point(
206 Ranges, [=](const MemsetRange &O) { return O.End < Start; });
207
208 // We now know that I == E, in which case we didn't find anything to merge
209 // with, or that Start <= I->End. If End < I->Start or I == E, then we need
210 // to insert a new range. Handle this now.
211 if (I == Ranges.end() || End < I->Start) {
212 MemsetRange &R = *Ranges.insert(I, MemsetRange());
213 R.Start = Start;
214 R.End = End;
215 R.StartPtr = Ptr;
216 R.Alignment = Alignment;
217 R.TheStores.push_back(Inst);
218 return;
219 }
220
221 // This store overlaps with I, add it.
222 I->TheStores.push_back(Inst);
223
224 // At this point, we may have an interval that completely contains our store.
225 // If so, just add it to the interval and return.
226 if (I->Start <= Start && I->End >= End)
227 return;
228
229 // Now we know that Start <= I->End and End >= I->Start so the range overlaps
230 // but is not entirely contained within the range.
231
232 // See if the range extends the start of the range. In this case, it couldn't
233 // possibly cause it to join the prior range, because otherwise we would have
234 // stopped on *it*.
235 if (Start < I->Start) {
236 I->Start = Start;
237 I->StartPtr = Ptr;
238 I->Alignment = Alignment;
239 }
240
241 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint
242 // is in or right at the end of I), and that End >= I->Start. Extend I out to
243 // End.
244 if (End > I->End) {
245 I->End = End;
246 range_iterator NextI = I;
247 while (++NextI != Ranges.end() && End >= NextI->Start) {
248 // Merge the range in.
249 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end());
250 if (NextI->End > I->End)
251 I->End = NextI->End;
252 Ranges.erase(NextI);
253 NextI = I;
254 }
255 }
256}
257
258//===----------------------------------------------------------------------===//
259// MemCpyOptLegacyPass Pass
260//===----------------------------------------------------------------------===//
261
262namespace {
263
264class MemCpyOptLegacyPass : public FunctionPass {
265 MemCpyOptPass Impl;
266
267public:
268 static char ID; // Pass identification, replacement for typeid
269
270 MemCpyOptLegacyPass() : FunctionPass(ID) {
271 initializeMemCpyOptLegacyPassPass(*PassRegistry::getPassRegistry());
272 }
273
274 bool runOnFunction(Function &F) override;
275
276private:
277 // This transformation requires dominator postdominator info
278 void getAnalysisUsage(AnalysisUsage &AU) const override {
279 AU.setPreservesCFG();
280 AU.addRequired<AssumptionCacheTracker>();
281 AU.addRequired<DominatorTreeWrapperPass>();
282 AU.addPreserved<DominatorTreeWrapperPass>();
283 AU.addPreserved<GlobalsAAWrapperPass>();
284 AU.addRequired<TargetLibraryInfoWrapperPass>();
285 if (!EnableMemorySSA)
286 AU.addRequired<MemoryDependenceWrapperPass>();
287 AU.addPreserved<MemoryDependenceWrapperPass>();
288 AU.addRequired<AAResultsWrapperPass>();
289 AU.addPreserved<AAResultsWrapperPass>();
290 if (EnableMemorySSA)
291 AU.addRequired<MemorySSAWrapperPass>();
292 AU.addPreserved<MemorySSAWrapperPass>();
293 }
294};
295
296} // end anonymous namespace
297
298char MemCpyOptLegacyPass::ID = 0;
299
300/// The public interface to this file...
301FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOptLegacyPass(); }
302
303INITIALIZE_PASS_BEGIN(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",static void *initializeMemCpyOptLegacyPassPassOnce(PassRegistry
&Registry) {
304 false, false)static void *initializeMemCpyOptLegacyPassPassOnce(PassRegistry
&Registry) {
305INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)initializeAssumptionCacheTrackerPass(Registry);
306INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry);
307INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)initializeMemoryDependenceWrapperPassPass(Registry);
308INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry);
309INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry);
310INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)initializeGlobalsAAWrapperPassPass(Registry);
311INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)initializeMemorySSAWrapperPassPass(Registry);
312INITIALIZE_PASS_END(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",PassInfo *PI = new PassInfo( "MemCpy Optimization", "memcpyopt"
, &MemCpyOptLegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<MemCpyOptLegacyPass>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeMemCpyOptLegacyPassPassFlag
; void llvm::initializeMemCpyOptLegacyPassPass(PassRegistry &
Registry) { llvm::call_once(InitializeMemCpyOptLegacyPassPassFlag
, initializeMemCpyOptLegacyPassPassOnce, std::ref(Registry));
}
313 false, false)PassInfo *PI = new PassInfo( "MemCpy Optimization", "memcpyopt"
, &MemCpyOptLegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<MemCpyOptLegacyPass>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeMemCpyOptLegacyPassPassFlag
; void llvm::initializeMemCpyOptLegacyPassPass(PassRegistry &
Registry) { llvm::call_once(InitializeMemCpyOptLegacyPassPassFlag
, initializeMemCpyOptLegacyPassPassOnce, std::ref(Registry));
}
314
315// Check that V is either not accessible by the caller, or unwinding cannot
316// occur between Start and End.
317static bool mayBeVisibleThroughUnwinding(Value *V, Instruction *Start,
318 Instruction *End) {
319 assert(Start->getParent() == End->getParent() && "Must be in same block")(static_cast <bool> (Start->getParent() == End->getParent
() && "Must be in same block") ? void (0) : __assert_fail
("Start->getParent() == End->getParent() && \"Must be in same block\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp"
, 319, __extension__ __PRETTY_FUNCTION__))
;
320 if (!Start->getFunction()->doesNotThrow() &&
321 !isa<AllocaInst>(getUnderlyingObject(V))) {
322 for (const Instruction &I :
323 make_range(Start->getIterator(), End->getIterator())) {
324 if (I.mayThrow())
325 return true;
326 }
327 }
328 return false;
329}
330
331void MemCpyOptPass::eraseInstruction(Instruction *I) {
332 if (MSSAU)
333 MSSAU->removeMemoryAccess(I);
334 if (MD)
335 MD->removeInstruction(I);
336 I->eraseFromParent();
337}
338
339// Check for mod or ref of Loc between Start and End, excluding both boundaries.
340// Start and End must be in the same block
341static bool accessedBetween(AliasAnalysis &AA, MemoryLocation Loc,
342 const MemoryUseOrDef *Start,
343 const MemoryUseOrDef *End) {
344 assert(Start->getBlock() == End->getBlock() && "Only local supported")(static_cast <bool> (Start->getBlock() == End->getBlock
() && "Only local supported") ? void (0) : __assert_fail
("Start->getBlock() == End->getBlock() && \"Only local supported\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp"
, 344, __extension__ __PRETTY_FUNCTION__))
;
345 for (const MemoryAccess &MA :
346 make_range(++Start->getIterator(), End->getIterator())) {
347 if (isModOrRefSet(AA.getModRefInfo(cast<MemoryUseOrDef>(MA).getMemoryInst(),
348 Loc)))
349 return true;
350 }
351 return false;
352}
353
354// Check for mod of Loc between Start and End, excluding both boundaries.
355// Start and End can be in different blocks.
356static bool writtenBetween(MemorySSA *MSSA, MemoryLocation Loc,
357 const MemoryUseOrDef *Start,
358 const MemoryUseOrDef *End) {
359 // TODO: Only walk until we hit Start.
360 MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess(
361 End->getDefiningAccess(), Loc);
362 return !MSSA->dominates(Clobber, Start);
363}
364
365/// When scanning forward over instructions, we look for some other patterns to
366/// fold away. In particular, this looks for stores to neighboring locations of
367/// memory. If it sees enough consecutive ones, it attempts to merge them
368/// together into a memcpy/memset.
369Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst,
370 Value *StartPtr,
371 Value *ByteVal) {
372 const DataLayout &DL = StartInst->getModule()->getDataLayout();
373
374 // Okay, so we now have a single store that can be splatable. Scan to find
375 // all subsequent stores of the same value to offset from the same pointer.
376 // Join these together into ranges, so we can decide whether contiguous blocks
377 // are stored.
378 MemsetRanges Ranges(DL);
379
380 BasicBlock::iterator BI(StartInst);
381
382 // Keeps track of the last memory use or def before the insertion point for
383 // the new memset. The new MemoryDef for the inserted memsets will be inserted
384 // after MemInsertPoint. It points to either LastMemDef or to the last user
385 // before the insertion point of the memset, if there are any such users.
386 MemoryUseOrDef *MemInsertPoint = nullptr;
387 // Keeps track of the last MemoryDef between StartInst and the insertion point
388 // for the new memset. This will become the defining access of the inserted
389 // memsets.
390 MemoryDef *LastMemDef = nullptr;
391 for (++BI; !BI->isTerminator(); ++BI) {
392 if (MSSAU) {
393 auto *CurrentAcc = cast_or_null<MemoryUseOrDef>(
394 MSSAU->getMemorySSA()->getMemoryAccess(&*BI));
395 if (CurrentAcc) {
396 MemInsertPoint = CurrentAcc;
397 if (auto *CurrentDef = dyn_cast<MemoryDef>(CurrentAcc))
398 LastMemDef = CurrentDef;
399 }
400 }
401
402 // Calls that only access inaccessible memory do not block merging
403 // accessible stores.
404 if (auto *CB = dyn_cast<CallBase>(BI)) {
405 if (CB->onlyAccessesInaccessibleMemory())
406 continue;
407 }
408
409 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
410 // If the instruction is readnone, ignore it, otherwise bail out. We
411 // don't even allow readonly here because we don't want something like:
412 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
413 if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
414 break;
415 continue;
416 }
417
418 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
419 // If this is a store, see if we can merge it in.
420 if (!NextStore->isSimple()) break;
421
422 Value *StoredVal = NextStore->getValueOperand();
423
424 // Don't convert stores of non-integral pointer types to memsets (which
425 // stores integers).
426 if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType()))
427 break;
428
429 // Check to see if this stored value is of the same byte-splattable value.
430 Value *StoredByte = isBytewiseValue(StoredVal, DL);
431 if (isa<UndefValue>(ByteVal) && StoredByte)
432 ByteVal = StoredByte;
433 if (ByteVal != StoredByte)
434 break;
435
436 // Check to see if this store is to a constant offset from the start ptr.
437 Optional<int64_t> Offset =
438 isPointerOffset(StartPtr, NextStore->getPointerOperand(), DL);
439 if (!Offset)
440 break;
441
442 Ranges.addStore(*Offset, NextStore);
443 } else {
444 MemSetInst *MSI = cast<MemSetInst>(BI);
445
446 if (MSI->isVolatile() || ByteVal != MSI->getValue() ||
447 !isa<ConstantInt>(MSI->getLength()))
448 break;
449
450 // Check to see if this store is to a constant offset from the start ptr.
451 Optional<int64_t> Offset = isPointerOffset(StartPtr, MSI->getDest(), DL);
452 if (!Offset)
453 break;
454
455 Ranges.addMemSet(*Offset, MSI);
456 }
457 }
458
459 // If we have no ranges, then we just had a single store with nothing that
460 // could be merged in. This is a very common case of course.
461 if (Ranges.empty())
462 return nullptr;
463
464 // If we had at least one store that could be merged in, add the starting
465 // store as well. We try to avoid this unless there is at least something
466 // interesting as a small compile-time optimization.
467 Ranges.addInst(0, StartInst);
468
469 // If we create any memsets, we put it right before the first instruction that
470 // isn't part of the memset block. This ensure that the memset is dominated
471 // by any addressing instruction needed by the start of the block.
472 IRBuilder<> Builder(&*BI);
473
474 // Now that we have full information about ranges, loop over the ranges and
475 // emit memset's for anything big enough to be worthwhile.
476 Instruction *AMemSet = nullptr;
477 for (const MemsetRange &Range : Ranges) {
478 if (Range.TheStores.size() == 1) continue;
479
480 // If it is profitable to lower this range to memset, do so now.
481 if (!Range.isProfitableToUseMemset(DL))
482 continue;
483
484 // Otherwise, we do want to transform this! Create a new memset.
485 // Get the starting pointer of the block.
486 StartPtr = Range.StartPtr;
487
488 AMemSet = Builder.CreateMemSet(StartPtr, ByteVal, Range.End - Range.Start,
489 MaybeAlign(Range.Alignment));
490 LLVM_DEBUG(dbgs() << "Replace stores:\n"; for (Instruction *SIdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "Replace stores:\n"; for (Instruction
*SI : Range.TheStores) dbgs() << *SI << '\n'; dbgs
() << "With: " << *AMemSet << '\n'; } } while
(false)
491 : Range.TheStores) dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "Replace stores:\n"; for (Instruction
*SI : Range.TheStores) dbgs() << *SI << '\n'; dbgs
() << "With: " << *AMemSet << '\n'; } } while
(false)
492 << *SI << '\n';do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "Replace stores:\n"; for (Instruction
*SI : Range.TheStores) dbgs() << *SI << '\n'; dbgs
() << "With: " << *AMemSet << '\n'; } } while
(false)
493 dbgs() << "With: " << *AMemSet << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "Replace stores:\n"; for (Instruction
*SI : Range.TheStores) dbgs() << *SI << '\n'; dbgs
() << "With: " << *AMemSet << '\n'; } } while
(false)
;
494 if (!Range.TheStores.empty())
495 AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc());
496
497 if (MSSAU) {
498 assert(LastMemDef && MemInsertPoint &&(static_cast <bool> (LastMemDef && MemInsertPoint
&& "Both LastMemDef and MemInsertPoint need to be set"
) ? void (0) : __assert_fail ("LastMemDef && MemInsertPoint && \"Both LastMemDef and MemInsertPoint need to be set\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp"
, 499, __extension__ __PRETTY_FUNCTION__))
499 "Both LastMemDef and MemInsertPoint need to be set")(static_cast <bool> (LastMemDef && MemInsertPoint
&& "Both LastMemDef and MemInsertPoint need to be set"
) ? void (0) : __assert_fail ("LastMemDef && MemInsertPoint && \"Both LastMemDef and MemInsertPoint need to be set\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp"
, 499, __extension__ __PRETTY_FUNCTION__))
;
500 auto *NewDef =
501 cast<MemoryDef>(MemInsertPoint->getMemoryInst() == &*BI
502 ? MSSAU->createMemoryAccessBefore(
503 AMemSet, LastMemDef, MemInsertPoint)
504 : MSSAU->createMemoryAccessAfter(
505 AMemSet, LastMemDef, MemInsertPoint));
506 MSSAU->insertDef(NewDef, /*RenameUses=*/true);
507 LastMemDef = NewDef;
508 MemInsertPoint = NewDef;
509 }
510
511 // Zap all the stores.
512 for (Instruction *SI : Range.TheStores)
513 eraseInstruction(SI);
514
515 ++NumMemSetInfer;
516 }
517
518 return AMemSet;
519}
520
521// This method try to lift a store instruction before position P.
522// It will lift the store and its argument + that anything that
523// may alias with these.
524// The method returns true if it was successful.
525bool MemCpyOptPass::moveUp(StoreInst *SI, Instruction *P, const LoadInst *LI) {
526 // If the store alias this position, early bail out.
527 MemoryLocation StoreLoc = MemoryLocation::get(SI);
528 if (isModOrRefSet(AA->getModRefInfo(P, StoreLoc)))
529 return false;
530
531 // Keep track of the arguments of all instruction we plan to lift
532 // so we can make sure to lift them as well if appropriate.
533 DenseSet<Instruction*> Args;
534 if (auto *Ptr = dyn_cast<Instruction>(SI->getPointerOperand()))
535 if (Ptr->getParent() == SI->getParent())
536 Args.insert(Ptr);
537
538 // Instruction to lift before P.
539 SmallVector<Instruction *, 8> ToLift{SI};
540
541 // Memory locations of lifted instructions.
542 SmallVector<MemoryLocation, 8> MemLocs{StoreLoc};
543
544 // Lifted calls.
545 SmallVector<const CallBase *, 8> Calls;
546
547 const MemoryLocation LoadLoc = MemoryLocation::get(LI);
548
549 for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) {
550 auto *C = &*I;
551
552 // Make sure hoisting does not perform a store that was not guaranteed to
553 // happen.
554 if (!isGuaranteedToTransferExecutionToSuccessor(C))
555 return false;
556
557 bool MayAlias = isModOrRefSet(AA->getModRefInfo(C, None));
558
559 bool NeedLift = false;
560 if (Args.erase(C))
561 NeedLift = true;
562 else if (MayAlias) {
563 NeedLift = llvm::any_of(MemLocs, [C, this](const MemoryLocation &ML) {
564 return isModOrRefSet(AA->getModRefInfo(C, ML));
565 });
566
567 if (!NeedLift)
568 NeedLift = llvm::any_of(Calls, [C, this](const CallBase *Call) {
569 return isModOrRefSet(AA->getModRefInfo(C, Call));
570 });
571 }
572
573 if (!NeedLift)
574 continue;
575
576 if (MayAlias) {
577 // Since LI is implicitly moved downwards past the lifted instructions,
578 // none of them may modify its source.
579 if (isModSet(AA->getModRefInfo(C, LoadLoc)))
580 return false;
581 else if (const auto *Call = dyn_cast<CallBase>(C)) {
582 // If we can't lift this before P, it's game over.
583 if (isModOrRefSet(AA->getModRefInfo(P, Call)))
584 return false;
585
586 Calls.push_back(Call);
587 } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) {
588 // If we can't lift this before P, it's game over.
589 auto ML = MemoryLocation::get(C);
590 if (isModOrRefSet(AA->getModRefInfo(P, ML)))
591 return false;
592
593 MemLocs.push_back(ML);
594 } else
595 // We don't know how to lift this instruction.
596 return false;
597 }
598
599 ToLift.push_back(C);
600 for (unsigned k = 0, e = C->getNumOperands(); k != e; ++k)
601 if (auto *A = dyn_cast<Instruction>(C->getOperand(k))) {
602 if (A->getParent() == SI->getParent()) {
603 // Cannot hoist user of P above P
604 if(A == P) return false;
605 Args.insert(A);
606 }
607 }
608 }
609
610 // Find MSSA insertion point. Normally P will always have a corresponding
611 // memory access before which we can insert. However, with non-standard AA
612 // pipelines, there may be a mismatch between AA and MSSA, in which case we
613 // will scan for a memory access before P. In either case, we know for sure
614 // that at least the load will have a memory access.
615 // TODO: Simplify this once P will be determined by MSSA, in which case the
616 // discrepancy can no longer occur.
617 MemoryUseOrDef *MemInsertPoint = nullptr;
618 if (MSSAU) {
619 if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(P)) {
620 MemInsertPoint = cast<MemoryUseOrDef>(--MA->getIterator());
621 } else {
622 const Instruction *ConstP = P;
623 for (const Instruction &I : make_range(++ConstP->getReverseIterator(),
624 ++LI->getReverseIterator())) {
625 if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(&I)) {
626 MemInsertPoint = MA;
627 break;
628 }
629 }
630 }
631 }
632
633 // We made it, we need to lift.
634 for (auto *I : llvm::reverse(ToLift)) {
635 LLVM_DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "Lifting " << *I <<
" before " << *P << "\n"; } } while (false)
;
636 I->moveBefore(P);
637 if (MSSAU) {
638 assert(MemInsertPoint && "Must have found insert point")(static_cast <bool> (MemInsertPoint && "Must have found insert point"
) ? void (0) : __assert_fail ("MemInsertPoint && \"Must have found insert point\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp"
, 638, __extension__ __PRETTY_FUNCTION__))
;
639 if (MemoryUseOrDef *MA = MSSAU->getMemorySSA()->getMemoryAccess(I)) {
640 MSSAU->moveAfter(MA, MemInsertPoint);
641 MemInsertPoint = MA;
642 }
643 }
644 }
645
646 return true;
647}
648
649bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
650 if (!SI->isSimple()) return false;
23
Calling 'StoreInst::isSimple'
27
Returning from 'StoreInst::isSimple'
28
Taking false branch
651
652 // Avoid merging nontemporal stores since the resulting
653 // memcpy/memset would not be able to preserve the nontemporal hint.
654 // In theory we could teach how to propagate the !nontemporal metadata to
655 // memset calls. However, that change would force the backend to
656 // conservatively expand !nontemporal memset calls back to sequences of
657 // store instructions (effectively undoing the merging).
658 if (SI->getMetadata(LLVMContext::MD_nontemporal))
29
Calling 'Instruction::getMetadata'
33
Returning from 'Instruction::getMetadata'
34
Taking false branch
659 return false;
660
661 const DataLayout &DL = SI->getModule()->getDataLayout();
662
663 Value *StoredVal = SI->getValueOperand();
664
665 // Not all the transforms below are correct for non-integral pointers, bail
666 // until we've audited the individual pieces.
667 if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType()))
35
Calling 'DataLayout::isNonIntegralPointerType'
38
Returning from 'DataLayout::isNonIntegralPointerType'
39
Taking false branch
668 return false;
669
670 // Load to store forwarding can be interpreted as memcpy.
671 if (LoadInst *LI
40.1
'LI' is non-null
40.1
'LI' is non-null
40.1
'LI' is non-null
40.1
'LI' is non-null
40.1
'LI' is non-null
40.1
'LI' is non-null
40.1
'LI' is non-null
= dyn_cast<LoadInst>(StoredVal)) {
40
Assuming 'StoredVal' is a 'LoadInst'
41
Taking true branch
672 if (LI->isSimple() && LI->hasOneUse() &&
42
Calling 'LoadInst::isSimple'
46
Returning from 'LoadInst::isSimple'
47
Calling 'Value::hasOneUse'
53
Returning from 'Value::hasOneUse'
54
Assuming the condition is true
56
Taking true branch
673 LI->getParent() == SI->getParent()) {
55
Assuming the condition is true
674
675 auto *T = LI->getType();
676 if (T->isAggregateType()) {
57
Calling 'Type::isAggregateType'
61
Returning from 'Type::isAggregateType'
62
Taking false branch
677 MemoryLocation LoadLoc = MemoryLocation::get(LI);
678
679 // We use alias analysis to check if an instruction may store to
680 // the memory we load from in between the load and the store. If
681 // such an instruction is found, we try to promote there instead
682 // of at the store position.
683 // TODO: Can use MSSA for this.
684 Instruction *P = SI;
685 for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) {
686 if (isModSet(AA->getModRefInfo(&I, LoadLoc))) {
687 P = &I;
688 break;
689 }
690 }
691
692 // We found an instruction that may write to the loaded memory.
693 // We can try to promote at this position instead of the store
694 // position if nothing alias the store memory after this and the store
695 // destination is not in the range.
696 if (P && P != SI) {
697 if (!moveUp(SI, P, LI))
698 P = nullptr;
699 }
700
701 // If a valid insertion position is found, then we can promote
702 // the load/store pair to a memcpy.
703 if (P) {
704 // If we load from memory that may alias the memory we store to,
705 // memmove must be used to preserve semantic. If not, memcpy can
706 // be used.
707 bool UseMemMove = false;
708 if (!AA->isNoAlias(MemoryLocation::get(SI), LoadLoc))
709 UseMemMove = true;
710
711 uint64_t Size = DL.getTypeStoreSize(T);
712
713 IRBuilder<> Builder(P);
714 Instruction *M;
715 if (UseMemMove)
716 M = Builder.CreateMemMove(
717 SI->getPointerOperand(), SI->getAlign(),
718 LI->getPointerOperand(), LI->getAlign(), Size);
719 else
720 M = Builder.CreateMemCpy(
721 SI->getPointerOperand(), SI->getAlign(),
722 LI->getPointerOperand(), LI->getAlign(), Size);
723
724 LLVM_DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI << " => "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "Promoting " << *LI <<
" to " << *SI << " => " << *M << "\n"
; } } while (false)
725 << *M << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "Promoting " << *LI <<
" to " << *SI << " => " << *M << "\n"
; } } while (false)
;
726
727 if (MSSAU) {
728 auto *LastDef =
729 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI));
730 auto *NewAccess =
731 MSSAU->createMemoryAccessAfter(M, LastDef, LastDef);
732 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
733 }
734
735 eraseInstruction(SI);
736 eraseInstruction(LI);
737 ++NumMemCpyInstr;
738
739 // Make sure we do not invalidate the iterator.
740 BBI = M->getIterator();
741 return true;
742 }
743 }
744
745 // Detect cases where we're performing call slot forwarding, but
746 // happen to be using a load-store pair to implement it, rather than
747 // a memcpy.
748 CallInst *C = nullptr;
749 if (EnableMemorySSA) {
63
Assuming the condition is false
64
Taking false branch
750 if (auto *LoadClobber = dyn_cast<MemoryUseOrDef>(
751 MSSA->getWalker()->getClobberingMemoryAccess(LI))) {
752 // The load most post-dom the call. Limit to the same block for now.
753 // TODO: Support non-local call-slot optimization?
754 if (LoadClobber->getBlock() == SI->getParent())
755 C = dyn_cast_or_null<CallInst>(LoadClobber->getMemoryInst());
756 }
757 } else {
758 MemDepResult ldep = MD->getDependency(LI);
65
Called C++ object pointer is null
759 if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst()))
760 C = dyn_cast<CallInst>(ldep.getInst());
761 }
762
763 if (C) {
764 // Check that nothing touches the dest of the "copy" between
765 // the call and the store.
766 MemoryLocation StoreLoc = MemoryLocation::get(SI);
767 if (EnableMemorySSA) {
768 if (accessedBetween(*AA, StoreLoc, MSSA->getMemoryAccess(C),
769 MSSA->getMemoryAccess(SI)))
770 C = nullptr;
771 } else {
772 for (BasicBlock::iterator I = --SI->getIterator(),
773 E = C->getIterator();
774 I != E; --I) {
775 if (isModOrRefSet(AA->getModRefInfo(&*I, StoreLoc))) {
776 C = nullptr;
777 break;
778 }
779 }
780 }
781 }
782
783 if (C) {
784 bool changed = performCallSlotOptzn(
785 LI, SI, SI->getPointerOperand()->stripPointerCasts(),
786 LI->getPointerOperand()->stripPointerCasts(),
787 DL.getTypeStoreSize(SI->getOperand(0)->getType()),
788 commonAlignment(SI->getAlign(), LI->getAlign()), C);
789 if (changed) {
790 eraseInstruction(SI);
791 eraseInstruction(LI);
792 ++NumMemCpyInstr;
793 return true;
794 }
795 }
796 }
797 }
798
799 // There are two cases that are interesting for this code to handle: memcpy
800 // and memset. Right now we only handle memset.
801
802 // Ensure that the value being stored is something that can be memset'able a
803 // byte at a time like "0" or "-1" or any width, as well as things like
804 // 0xA0A0A0A0 and 0.0.
805 auto *V = SI->getOperand(0);
806 if (Value *ByteVal = isBytewiseValue(V, DL)) {
807 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(),
808 ByteVal)) {
809 BBI = I->getIterator(); // Don't invalidate iterator.
810 return true;
811 }
812
813 // If we have an aggregate, we try to promote it to memset regardless
814 // of opportunity for merging as it can expose optimization opportunities
815 // in subsequent passes.
816 auto *T = V->getType();
817 if (T->isAggregateType()) {
818 uint64_t Size = DL.getTypeStoreSize(T);
819 IRBuilder<> Builder(SI);
820 auto *M = Builder.CreateMemSet(SI->getPointerOperand(), ByteVal, Size,
821 SI->getAlign());
822
823 LLVM_DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "Promoting " << *SI <<
" to " << *M << "\n"; } } while (false)
;
824
825 if (MSSAU) {
826 assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI)))(static_cast <bool> (isa<MemoryDef>(MSSAU->getMemorySSA
()->getMemoryAccess(SI))) ? void (0) : __assert_fail ("isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI))"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp"
, 826, __extension__ __PRETTY_FUNCTION__))
;
827 auto *LastDef =
828 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(SI));
829 auto *NewAccess = MSSAU->createMemoryAccessAfter(M, LastDef, LastDef);
830 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
831 }
832
833 eraseInstruction(SI);
834 NumMemSetInfer++;
835
836 // Make sure we do not invalidate the iterator.
837 BBI = M->getIterator();
838 return true;
839 }
840 }
841
842 return false;
843}
844
845bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) {
846 // See if there is another memset or store neighboring this memset which
847 // allows us to widen out the memset to do a single larger store.
848 if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile())
849 if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(),
850 MSI->getValue())) {
851 BBI = I->getIterator(); // Don't invalidate iterator.
852 return true;
853 }
854 return false;
855}
856
857/// Takes a memcpy and a call that it depends on,
858/// and checks for the possibility of a call slot optimization by having
859/// the call write its result directly into the destination of the memcpy.
860bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad,
861 Instruction *cpyStore, Value *cpyDest,
862 Value *cpySrc, uint64_t cpyLen,
863 Align cpyAlign, CallInst *C) {
864 // The general transformation to keep in mind is
865 //
866 // call @func(..., src, ...)
867 // memcpy(dest, src, ...)
868 //
869 // ->
870 //
871 // memcpy(dest, src, ...)
872 // call @func(..., dest, ...)
873 //
874 // Since moving the memcpy is technically awkward, we additionally check that
875 // src only holds uninitialized values at the moment of the call, meaning that
876 // the memcpy can be discarded rather than moved.
877
878 // Lifetime marks shouldn't be operated on.
879 if (Function *F = C->getCalledFunction())
880 if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start)
881 return false;
882
883 // Require that src be an alloca. This simplifies the reasoning considerably.
884 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
885 if (!srcAlloca)
886 return false;
887
888 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
889 if (!srcArraySize)
890 return false;
891
892 const DataLayout &DL = cpyLoad->getModule()->getDataLayout();
893 uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) *
894 srcArraySize->getZExtValue();
895
896 if (cpyLen < srcSize)
897 return false;
898
899 // Check that accessing the first srcSize bytes of dest will not cause a
900 // trap. Otherwise the transform is invalid since it might cause a trap
901 // to occur earlier than it otherwise would.
902 if (!isDereferenceableAndAlignedPointer(cpyDest, Align(1), APInt(64, cpyLen),
903 DL, C, DT))
904 return false;
905
906 // Make sure that nothing can observe cpyDest being written early. There are
907 // a number of cases to consider:
908 // 1. cpyDest cannot be accessed between C and cpyStore as a precondition of
909 // the transform.
910 // 2. C itself may not access cpyDest (prior to the transform). This is
911 // checked further below.
912 // 3. If cpyDest is accessible to the caller of this function (potentially
913 // captured and not based on an alloca), we need to ensure that we cannot
914 // unwind between C and cpyStore. This is checked here.
915 // 4. If cpyDest is potentially captured, there may be accesses to it from
916 // another thread. In this case, we need to check that cpyStore is
917 // guaranteed to be executed if C is. As it is a non-atomic access, it
918 // renders accesses from other threads undefined.
919 // TODO: This is currently not checked.
920 if (mayBeVisibleThroughUnwinding(cpyDest, C, cpyStore))
921 return false;
922
923 // Check that dest points to memory that is at least as aligned as src.
924 Align srcAlign = srcAlloca->getAlign();
925 bool isDestSufficientlyAligned = srcAlign <= cpyAlign;
926 // If dest is not aligned enough and we can't increase its alignment then
927 // bail out.
928 if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest))
929 return false;
930
931 // Check that src is not accessed except via the call and the memcpy. This
932 // guarantees that it holds only undefined values when passed in (so the final
933 // memcpy can be dropped), that it is not read or written between the call and
934 // the memcpy, and that writing beyond the end of it is undefined.
935 SmallVector<User *, 8> srcUseList(srcAlloca->users());
936 while (!srcUseList.empty()) {
937 User *U = srcUseList.pop_back_val();
938
939 if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) {
940 append_range(srcUseList, U->users());
941 continue;
942 }
943 if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) {
944 if (!G->hasAllZeroIndices())
945 return false;
946
947 append_range(srcUseList, U->users());
948 continue;
949 }
950 if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U))
951 if (IT->isLifetimeStartOrEnd())
952 continue;
953
954 if (U != C && U != cpyLoad)
955 return false;
956 }
957
958 // Check that src isn't captured by the called function since the
959 // transformation can cause aliasing issues in that case.
960 for (unsigned ArgI = 0, E = C->arg_size(); ArgI != E; ++ArgI)
961 if (C->getArgOperand(ArgI) == cpySrc && !C->doesNotCapture(ArgI))
962 return false;
963
964 // Since we're changing the parameter to the callsite, we need to make sure
965 // that what would be the new parameter dominates the callsite.
966 if (!DT->dominates(cpyDest, C)) {
967 // Support moving a constant index GEP before the call.
968 auto *GEP = dyn_cast<GetElementPtrInst>(cpyDest);
969 if (GEP && GEP->hasAllConstantIndices() &&
970 DT->dominates(GEP->getPointerOperand(), C))
971 GEP->moveBefore(C);
972 else
973 return false;
974 }
975
976 // In addition to knowing that the call does not access src in some
977 // unexpected manner, for example via a global, which we deduce from
978 // the use analysis, we also need to know that it does not sneakily
979 // access dest. We rely on AA to figure this out for us.
980 ModRefInfo MR = AA->getModRefInfo(C, cpyDest, LocationSize::precise(srcSize));
981 // If necessary, perform additional analysis.
982 if (isModOrRefSet(MR))
983 MR = AA->callCapturesBefore(C, cpyDest, LocationSize::precise(srcSize), DT);
984 if (isModOrRefSet(MR))
985 return false;
986
987 // We can't create address space casts here because we don't know if they're
988 // safe for the target.
989 if (cpySrc->getType()->getPointerAddressSpace() !=
990 cpyDest->getType()->getPointerAddressSpace())
991 return false;
992 for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI)
993 if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc &&
994 cpySrc->getType()->getPointerAddressSpace() !=
995 C->getArgOperand(ArgI)->getType()->getPointerAddressSpace())
996 return false;
997
998 // All the checks have passed, so do the transformation.
999 bool changedArgument = false;
1000 for (unsigned ArgI = 0; ArgI < C->arg_size(); ++ArgI)
1001 if (C->getArgOperand(ArgI)->stripPointerCasts() == cpySrc) {
1002 Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest
1003 : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(),
1004 cpyDest->getName(), C);
1005 changedArgument = true;
1006 if (C->getArgOperand(ArgI)->getType() == Dest->getType())
1007 C->setArgOperand(ArgI, Dest);
1008 else
1009 C->setArgOperand(ArgI, CastInst::CreatePointerCast(
1010 Dest, C->getArgOperand(ArgI)->getType(),
1011 Dest->getName(), C));
1012 }
1013
1014 if (!changedArgument)
1015 return false;
1016
1017 // If the destination wasn't sufficiently aligned then increase its alignment.
1018 if (!isDestSufficientlyAligned) {
1019 assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!")(static_cast <bool> (isa<AllocaInst>(cpyDest) &&
"Can only increase alloca alignment!") ? void (0) : __assert_fail
("isa<AllocaInst>(cpyDest) && \"Can only increase alloca alignment!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp"
, 1019, __extension__ __PRETTY_FUNCTION__))
;
1020 cast<AllocaInst>(cpyDest)->setAlignment(srcAlign);
1021 }
1022
1023 // Drop any cached information about the call, because we may have changed
1024 // its dependence information by changing its parameter.
1025 if (MD)
1026 MD->removeInstruction(C);
1027
1028 // Update AA metadata
1029 // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be
1030 // handled here, but combineMetadata doesn't support them yet
1031 unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
1032 LLVMContext::MD_noalias,
1033 LLVMContext::MD_invariant_group,
1034 LLVMContext::MD_access_group};
1035 combineMetadata(C, cpyLoad, KnownIDs, true);
1036
1037 ++NumCallSlot;
1038 return true;
1039}
1040
1041/// We've found that the (upward scanning) memory dependence of memcpy 'M' is
1042/// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can.
1043bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M,
1044 MemCpyInst *MDep) {
1045 // We can only transforms memcpy's where the dest of one is the source of the
1046 // other.
1047 if (M->getSource() != MDep->getDest() || MDep->isVolatile())
1048 return false;
1049
1050 // If dep instruction is reading from our current input, then it is a noop
1051 // transfer and substituting the input won't change this instruction. Just
1052 // ignore the input and let someone else zap MDep. This handles cases like:
1053 // memcpy(a <- a)
1054 // memcpy(b <- a)
1055 if (M->getSource() == MDep->getSource())
1056 return false;
1057
1058 // Second, the length of the memcpy's must be the same, or the preceding one
1059 // must be larger than the following one.
1060 if (MDep->getLength() != M->getLength()) {
1061 ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength());
1062 ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength());
1063 if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue())
1064 return false;
1065 }
1066
1067 // Verify that the copied-from memory doesn't change in between the two
1068 // transfers. For example, in:
1069 // memcpy(a <- b)
1070 // *b = 42;
1071 // memcpy(c <- a)
1072 // It would be invalid to transform the second memcpy into memcpy(c <- b).
1073 //
1074 // TODO: If the code between M and MDep is transparent to the destination "c",
1075 // then we could still perform the xform by moving M up to the first memcpy.
1076 if (EnableMemorySSA) {
1077 // TODO: It would be sufficient to check the MDep source up to the memcpy
1078 // size of M, rather than MDep.
1079 if (writtenBetween(MSSA, MemoryLocation::getForSource(MDep),
1080 MSSA->getMemoryAccess(MDep), MSSA->getMemoryAccess(M)))
1081 return false;
1082 } else {
1083 // NOTE: This is conservative, it will stop on any read from the source loc,
1084 // not just the defining memcpy.
1085 MemDepResult SourceDep =
1086 MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false,
1087 M->getIterator(), M->getParent());
1088 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
1089 return false;
1090 }
1091
1092 // If the dest of the second might alias the source of the first, then the
1093 // source and dest might overlap. We still want to eliminate the intermediate
1094 // value, but we have to generate a memmove instead of memcpy.
1095 bool UseMemMove = false;
1096 if (!AA->isNoAlias(MemoryLocation::getForDest(M),
1097 MemoryLocation::getForSource(MDep)))
1098 UseMemMove = true;
1099
1100 // If all checks passed, then we can transform M.
1101 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n"
<< *MDep << '\n' << *M << '\n'; } } while
(false)
1102 << *MDep << '\n' << *M << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n"
<< *MDep << '\n' << *M << '\n'; } } while
(false)
;
1103
1104 // TODO: Is this worth it if we're creating a less aligned memcpy? For
1105 // example we could be moving from movaps -> movq on x86.
1106 IRBuilder<> Builder(M);
1107 Instruction *NewM;
1108 if (UseMemMove)
1109 NewM = Builder.CreateMemMove(M->getRawDest(), M->getDestAlign(),
1110 MDep->getRawSource(), MDep->getSourceAlign(),
1111 M->getLength(), M->isVolatile());
1112 else
1113 NewM = Builder.CreateMemCpy(M->getRawDest(), M->getDestAlign(),
1114 MDep->getRawSource(), MDep->getSourceAlign(),
1115 M->getLength(), M->isVolatile());
1116
1117 if (MSSAU) {
1118 assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M)))(static_cast <bool> (isa<MemoryDef>(MSSAU->getMemorySSA
()->getMemoryAccess(M))) ? void (0) : __assert_fail ("isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M))"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp"
, 1118, __extension__ __PRETTY_FUNCTION__))
;
1119 auto *LastDef = cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M));
1120 auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef);
1121 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
1122 }
1123
1124 // Remove the instruction we're replacing.
1125 eraseInstruction(M);
1126 ++NumMemCpyInstr;
1127 return true;
1128}
1129
1130/// We've found that the (upward scanning) memory dependence of \p MemCpy is
1131/// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that
1132/// weren't copied over by \p MemCpy.
1133///
1134/// In other words, transform:
1135/// \code
1136/// memset(dst, c, dst_size);
1137/// memcpy(dst, src, src_size);
1138/// \endcode
1139/// into:
1140/// \code
1141/// memcpy(dst, src, src_size);
1142/// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size);
1143/// \endcode
1144bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy,
1145 MemSetInst *MemSet) {
1146 // We can only transform memset/memcpy with the same destination.
1147 if (!AA->isMustAlias(MemSet->getDest(), MemCpy->getDest()))
1148 return false;
1149
1150 // Check that src and dst of the memcpy aren't the same. While memcpy
1151 // operands cannot partially overlap, exact equality is allowed.
1152 if (!AA->isNoAlias(MemoryLocation(MemCpy->getSource(),
1153 LocationSize::precise(1)),
1154 MemoryLocation(MemCpy->getDest(),
1155 LocationSize::precise(1))))
1156 return false;
1157
1158 if (EnableMemorySSA) {
1159 // We know that dst up to src_size is not written. We now need to make sure
1160 // that dst up to dst_size is not accessed. (If we did not move the memset,
1161 // checking for reads would be sufficient.)
1162 if (accessedBetween(*AA, MemoryLocation::getForDest(MemSet),
1163 MSSA->getMemoryAccess(MemSet),
1164 MSSA->getMemoryAccess(MemCpy))) {
1165 return false;
1166 }
1167 } else {
1168 // We have already checked that dst up to src_size is not accessed. We
1169 // need to make sure that there are no accesses up to dst_size either.
1170 MemDepResult DstDepInfo = MD->getPointerDependencyFrom(
1171 MemoryLocation::getForDest(MemSet), false, MemCpy->getIterator(),
1172 MemCpy->getParent());
1173 if (DstDepInfo.getInst() != MemSet)
1174 return false;
1175 }
1176
1177 // Use the same i8* dest as the memcpy, killing the memset dest if different.
1178 Value *Dest = MemCpy->getRawDest();
1179 Value *DestSize = MemSet->getLength();
1180 Value *SrcSize = MemCpy->getLength();
1181
1182 if (mayBeVisibleThroughUnwinding(Dest, MemSet, MemCpy))
1183 return false;
1184
1185 // If the sizes are the same, simply drop the memset instead of generating
1186 // a replacement with zero size.
1187 if (DestSize == SrcSize) {
1188 eraseInstruction(MemSet);
1189 return true;
1190 }
1191
1192 // By default, create an unaligned memset.
1193 unsigned Align = 1;
1194 // If Dest is aligned, and SrcSize is constant, use the minimum alignment
1195 // of the sum.
1196 const unsigned DestAlign =
1197 std::max(MemSet->getDestAlignment(), MemCpy->getDestAlignment());
1198 if (DestAlign > 1)
1199 if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize))
1200 Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign);
1201
1202 IRBuilder<> Builder(MemCpy);
1203
1204 // If the sizes have different types, zext the smaller one.
1205 if (DestSize->getType() != SrcSize->getType()) {
1206 if (DestSize->getType()->getIntegerBitWidth() >
1207 SrcSize->getType()->getIntegerBitWidth())
1208 SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType());
1209 else
1210 DestSize = Builder.CreateZExt(DestSize, SrcSize->getType());
1211 }
1212
1213 Value *Ule = Builder.CreateICmpULE(DestSize, SrcSize);
1214 Value *SizeDiff = Builder.CreateSub(DestSize, SrcSize);
1215 Value *MemsetLen = Builder.CreateSelect(
1216 Ule, ConstantInt::getNullValue(DestSize->getType()), SizeDiff);
1217 Instruction *NewMemSet = Builder.CreateMemSet(
1218 Builder.CreateGEP(Dest->getType()->getPointerElementType(), Dest,
1219 SrcSize),
1220 MemSet->getOperand(1), MemsetLen, MaybeAlign(Align));
1221
1222 if (MSSAU) {
1223 assert(isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)) &&(static_cast <bool> (isa<MemoryDef>(MSSAU->getMemorySSA
()->getMemoryAccess(MemCpy)) && "MemCpy must be a MemoryDef"
) ? void (0) : __assert_fail ("isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)) && \"MemCpy must be a MemoryDef\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp"
, 1224, __extension__ __PRETTY_FUNCTION__))
1224 "MemCpy must be a MemoryDef")(static_cast <bool> (isa<MemoryDef>(MSSAU->getMemorySSA
()->getMemoryAccess(MemCpy)) && "MemCpy must be a MemoryDef"
) ? void (0) : __assert_fail ("isa<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy)) && \"MemCpy must be a MemoryDef\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp"
, 1224, __extension__ __PRETTY_FUNCTION__))
;
1225 // The new memset is inserted after the memcpy, but it is known that its
1226 // defining access is the memset about to be removed which immediately
1227 // precedes the memcpy.
1228 auto *LastDef =
1229 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy));
1230 auto *NewAccess = MSSAU->createMemoryAccessBefore(
1231 NewMemSet, LastDef->getDefiningAccess(), LastDef);
1232 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
1233 }
1234
1235 eraseInstruction(MemSet);
1236 return true;
1237}
1238
1239/// Determine whether the instruction has undefined content for the given Size,
1240/// either because it was freshly alloca'd or started its lifetime.
1241static bool hasUndefContents(Instruction *I, Value *Size) {
1242 if (isa<AllocaInst>(I))
1243 return true;
1244
1245 if (ConstantInt *CSize = dyn_cast<ConstantInt>(Size)) {
1246 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1247 if (II->getIntrinsicID() == Intrinsic::lifetime_start)
1248 if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0)))
1249 if (LTSize->getZExtValue() >= CSize->getZExtValue())
1250 return true;
1251 }
1252
1253 return false;
1254}
1255
1256static bool hasUndefContentsMSSA(MemorySSA *MSSA, AliasAnalysis *AA, Value *V,
1257 MemoryDef *Def, Value *Size) {
1258 if (MSSA->isLiveOnEntryDef(Def))
1259 return isa<AllocaInst>(getUnderlyingObject(V));
1260
1261 if (IntrinsicInst *II =
1262 dyn_cast_or_null<IntrinsicInst>(Def->getMemoryInst())) {
1263 if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
1264 ConstantInt *LTSize = cast<ConstantInt>(II->getArgOperand(0));
1265
1266 if (ConstantInt *CSize = dyn_cast<ConstantInt>(Size)) {
1267 if (AA->isMustAlias(V, II->getArgOperand(1)) &&
1268 LTSize->getZExtValue() >= CSize->getZExtValue())
1269 return true;
1270 }
1271
1272 // If the lifetime.start covers a whole alloca (as it almost always
1273 // does) and we're querying a pointer based on that alloca, then we know
1274 // the memory is definitely undef, regardless of how exactly we alias.
1275 // The size also doesn't matter, as an out-of-bounds access would be UB.
1276 AllocaInst *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(V));
1277 if (getUnderlyingObject(II->getArgOperand(1)) == Alloca) {
1278 const DataLayout &DL = Alloca->getModule()->getDataLayout();
1279 if (Optional<TypeSize> AllocaSize = Alloca->getAllocationSizeInBits(DL))
1280 if (*AllocaSize == LTSize->getValue() * 8)
1281 return true;
1282 }
1283 }
1284 }
1285
1286 return false;
1287}
1288
1289/// Transform memcpy to memset when its source was just memset.
1290/// In other words, turn:
1291/// \code
1292/// memset(dst1, c, dst1_size);
1293/// memcpy(dst2, dst1, dst2_size);
1294/// \endcode
1295/// into:
1296/// \code
1297/// memset(dst1, c, dst1_size);
1298/// memset(dst2, c, dst2_size);
1299/// \endcode
1300/// When dst2_size <= dst1_size.
1301bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy,
1302 MemSetInst *MemSet) {
1303 // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and
1304 // memcpying from the same address. Otherwise it is hard to reason about.
1305 if (!AA->isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource()))
1306 return false;
1307
1308 Value *MemSetSize = MemSet->getLength();
1309 Value *CopySize = MemCpy->getLength();
1310
1311 if (MemSetSize != CopySize) {
1312 // Make sure the memcpy doesn't read any more than what the memset wrote.
1313 // Don't worry about sizes larger than i64.
1314
1315 // A known memset size is required.
1316 ConstantInt *CMemSetSize = dyn_cast<ConstantInt>(MemSetSize);
1317 if (!CMemSetSize)
1318 return false;
1319
1320 // A known memcpy size is also required.
1321 ConstantInt *CCopySize = dyn_cast<ConstantInt>(CopySize);
1322 if (!CCopySize)
1323 return false;
1324 if (CCopySize->getZExtValue() > CMemSetSize->getZExtValue()) {
1325 // If the memcpy is larger than the memset, but the memory was undef prior
1326 // to the memset, we can just ignore the tail. Technically we're only
1327 // interested in the bytes from MemSetSize..CopySize here, but as we can't
1328 // easily represent this location, we use the full 0..CopySize range.
1329 MemoryLocation MemCpyLoc = MemoryLocation::getForSource(MemCpy);
1330 bool CanReduceSize = false;
1331 if (EnableMemorySSA) {
1332 MemoryUseOrDef *MemSetAccess = MSSA->getMemoryAccess(MemSet);
1333 MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess(
1334 MemSetAccess->getDefiningAccess(), MemCpyLoc);
1335 if (auto *MD = dyn_cast<MemoryDef>(Clobber))
1336 if (hasUndefContentsMSSA(MSSA, AA, MemCpy->getSource(), MD, CopySize))
1337 CanReduceSize = true;
1338 } else {
1339 MemDepResult DepInfo = MD->getPointerDependencyFrom(
1340 MemCpyLoc, true, MemSet->getIterator(), MemSet->getParent());
1341 if (DepInfo.isDef() && hasUndefContents(DepInfo.getInst(), CopySize))
1342 CanReduceSize = true;
1343 }
1344
1345 if (!CanReduceSize)
1346 return false;
1347 CopySize = MemSetSize;
1348 }
1349 }
1350
1351 IRBuilder<> Builder(MemCpy);
1352 Instruction *NewM =
1353 Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1),
1354 CopySize, MaybeAlign(MemCpy->getDestAlignment()));
1355 if (MSSAU) {
1356 auto *LastDef =
1357 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(MemCpy));
1358 auto *NewAccess = MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef);
1359 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
1360 }
1361
1362 return true;
1363}
1364
1365/// Perform simplification of memcpy's. If we have memcpy A
1366/// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
1367/// B to be a memcpy from X to Z (or potentially a memmove, depending on
1368/// circumstances). This allows later passes to remove the first memcpy
1369/// altogether.
1370bool MemCpyOptPass::processMemCpy(MemCpyInst *M, BasicBlock::iterator &BBI) {
1371 // We can only optimize non-volatile memcpy's.
1372 if (M->isVolatile()) return false;
1373
1374 // If the source and destination of the memcpy are the same, then zap it.
1375 if (M->getSource() == M->getDest()) {
1376 ++BBI;
1377 eraseInstruction(M);
1378 return true;
1379 }
1380
1381 // If copying from a constant, try to turn the memcpy into a memset.
1382 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource()))
1383 if (GV->isConstant() && GV->hasDefinitiveInitializer())
1384 if (Value *ByteVal = isBytewiseValue(GV->getInitializer(),
1385 M->getModule()->getDataLayout())) {
1386 IRBuilder<> Builder(M);
1387 Instruction *NewM =
1388 Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(),
1389 MaybeAlign(M->getDestAlignment()), false);
1390 if (MSSAU) {
1391 auto *LastDef =
1392 cast<MemoryDef>(MSSAU->getMemorySSA()->getMemoryAccess(M));
1393 auto *NewAccess =
1394 MSSAU->createMemoryAccessAfter(NewM, LastDef, LastDef);
1395 MSSAU->insertDef(cast<MemoryDef>(NewAccess), /*RenameUses=*/true);
1396 }
1397
1398 eraseInstruction(M);
1399 ++NumCpyToSet;
1400 return true;
1401 }
1402
1403 if (EnableMemorySSA) {
1404 MemoryUseOrDef *MA = MSSA->getMemoryAccess(M);
1405 MemoryAccess *AnyClobber = MSSA->getWalker()->getClobberingMemoryAccess(MA);
1406 MemoryLocation DestLoc = MemoryLocation::getForDest(M);
1407 const MemoryAccess *DestClobber =
1408 MSSA->getWalker()->getClobberingMemoryAccess(AnyClobber, DestLoc);
1409
1410 // Try to turn a partially redundant memset + memcpy into
1411 // memcpy + smaller memset. We don't need the memcpy size for this.
1412 // The memcpy most post-dom the memset, so limit this to the same basic
1413 // block. A non-local generalization is likely not worthwhile.
1414 if (auto *MD = dyn_cast<MemoryDef>(DestClobber))
1415 if (auto *MDep = dyn_cast_or_null<MemSetInst>(MD->getMemoryInst()))
1416 if (DestClobber->getBlock() == M->getParent())
1417 if (processMemSetMemCpyDependence(M, MDep))
1418 return true;
1419
1420 MemoryAccess *SrcClobber = MSSA->getWalker()->getClobberingMemoryAccess(
1421 AnyClobber, MemoryLocation::getForSource(M));
1422
1423 // There are four possible optimizations we can do for memcpy:
1424 // a) memcpy-memcpy xform which exposes redundance for DSE.
1425 // b) call-memcpy xform for return slot optimization.
1426 // c) memcpy from freshly alloca'd space or space that has just started
1427 // its lifetime copies undefined data, and we can therefore eliminate
1428 // the memcpy in favor of the data that was already at the destination.
1429 // d) memcpy from a just-memset'd source can be turned into memset.
1430 if (auto *MD = dyn_cast<MemoryDef>(SrcClobber)) {
1431 if (Instruction *MI = MD->getMemoryInst()) {
1432 if (ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength())) {
1433 if (auto *C = dyn_cast<CallInst>(MI)) {
1434 // The memcpy must post-dom the call. Limit to the same block for
1435 // now. Additionally, we need to ensure that there are no accesses
1436 // to dest between the call and the memcpy. Accesses to src will be
1437 // checked by performCallSlotOptzn().
1438 // TODO: Support non-local call-slot optimization?
1439 if (C->getParent() == M->getParent() &&
1440 !accessedBetween(*AA, DestLoc, MD, MA)) {
1441 // FIXME: Can we pass in either of dest/src alignment here instead
1442 // of conservatively taking the minimum?
1443 Align Alignment = std::min(M->getDestAlign().valueOrOne(),
1444 M->getSourceAlign().valueOrOne());
1445 if (performCallSlotOptzn(M, M, M->getDest(), M->getSource(),
1446 CopySize->getZExtValue(), Alignment,
1447 C)) {
1448 LLVM_DEBUG(dbgs() << "Performed call slot optimization:\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "Performed call slot optimization:\n"
<< " call: " << *C << "\n" << " memcpy: "
<< *M << "\n"; } } while (false)
1449 << " call: " << *C << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "Performed call slot optimization:\n"
<< " call: " << *C << "\n" << " memcpy: "
<< *M << "\n"; } } while (false)
1450 << " memcpy: " << *M << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "Performed call slot optimization:\n"
<< " call: " << *C << "\n" << " memcpy: "
<< *M << "\n"; } } while (false)
;
1451 eraseInstruction(M);
1452 ++NumMemCpyInstr;
1453 return true;
1454 }
1455 }
1456 }
1457 }
1458 if (auto *MDep = dyn_cast<MemCpyInst>(MI))
1459 return processMemCpyMemCpyDependence(M, MDep);
1460 if (auto *MDep = dyn_cast<MemSetInst>(MI)) {
1461 if (performMemCpyToMemSetOptzn(M, MDep)) {
1462 LLVM_DEBUG(dbgs() << "Converted memcpy to memset\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "Converted memcpy to memset\n"
; } } while (false)
;
1463 eraseInstruction(M);
1464 ++NumCpyToSet;
1465 return true;
1466 }
1467 }
1468 }
1469
1470 if (hasUndefContentsMSSA(MSSA, AA, M->getSource(), MD, M->getLength())) {
1471 LLVM_DEBUG(dbgs() << "Removed memcpy from undef\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "Removed memcpy from undef\n"
; } } while (false)
;
1472 eraseInstruction(M);
1473 ++NumMemCpyInstr;
1474 return true;
1475 }
1476 }
1477 } else {
1478 MemDepResult DepInfo = MD->getDependency(M);
1479
1480 // Try to turn a partially redundant memset + memcpy into
1481 // memcpy + smaller memset. We don't need the memcpy size for this.
1482 if (DepInfo.isClobber())
1483 if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst()))
1484 if (processMemSetMemCpyDependence(M, MDep))
1485 return true;
1486
1487 // There are four possible optimizations we can do for memcpy:
1488 // a) memcpy-memcpy xform which exposes redundance for DSE.
1489 // b) call-memcpy xform for return slot optimization.
1490 // c) memcpy from freshly alloca'd space or space that has just started
1491 // its lifetime copies undefined data, and we can therefore eliminate
1492 // the memcpy in favor of the data that was already at the destination.
1493 // d) memcpy from a just-memset'd source can be turned into memset.
1494 if (ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength())) {
1495 if (DepInfo.isClobber()) {
1496 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) {
1497 // FIXME: Can we pass in either of dest/src alignment here instead
1498 // of conservatively taking the minimum?
1499 Align Alignment = std::min(M->getDestAlign().valueOrOne(),
1500 M->getSourceAlign().valueOrOne());
1501 if (performCallSlotOptzn(M, M, M->getDest(), M->getSource(),
1502 CopySize->getZExtValue(), Alignment, C)) {
1503 eraseInstruction(M);
1504 ++NumMemCpyInstr;
1505 return true;
1506 }
1507 }
1508 }
1509 }
1510
1511 MemoryLocation SrcLoc = MemoryLocation::getForSource(M);
1512 MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(
1513 SrcLoc, true, M->getIterator(), M->getParent());
1514
1515 if (SrcDepInfo.isClobber()) {
1516 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst()))
1517 return processMemCpyMemCpyDependence(M, MDep);
1518 } else if (SrcDepInfo.isDef()) {
1519 if (hasUndefContents(SrcDepInfo.getInst(), M->getLength())) {
1520 eraseInstruction(M);
1521 ++NumMemCpyInstr;
1522 return true;
1523 }
1524 }
1525
1526 if (SrcDepInfo.isClobber())
1527 if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst()))
1528 if (performMemCpyToMemSetOptzn(M, MDep)) {
1529 eraseInstruction(M);
1530 ++NumCpyToSet;
1531 return true;
1532 }
1533 }
1534
1535 return false;
1536}
1537
1538/// Transforms memmove calls to memcpy calls when the src/dst are guaranteed
1539/// not to alias.
1540bool MemCpyOptPass::processMemMove(MemMoveInst *M) {
1541 if (!TLI->has(LibFunc_memmove))
1542 return false;
1543
1544 // See if the pointers alias.
1545 if (!AA->isNoAlias(MemoryLocation::getForDest(M),
1546 MemoryLocation::getForSource(M)))
1547 return false;
1548
1549 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *Mdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: "
<< *M << "\n"; } } while (false)
1550 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: "
<< *M << "\n"; } } while (false)
;
1551
1552 // If not, then we know we can transform this.
1553 Type *ArgTys[3] = { M->getRawDest()->getType(),
1554 M->getRawSource()->getType(),
1555 M->getLength()->getType() };
1556 M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(),
1557 Intrinsic::memcpy, ArgTys));
1558
1559 // For MemorySSA nothing really changes (except that memcpy may imply stricter
1560 // aliasing guarantees).
1561
1562 // MemDep may have over conservative information about this instruction, just
1563 // conservatively flush it from the cache.
1564 if (MD)
1565 MD->removeInstruction(M);
1566
1567 ++NumMoveToCpy;
1568 return true;
1569}
1570
1571/// This is called on every byval argument in call sites.
1572bool MemCpyOptPass::processByValArgument(CallBase &CB, unsigned ArgNo) {
1573 const DataLayout &DL = CB.getCaller()->getParent()->getDataLayout();
1574 // Find out what feeds this byval argument.
1575 Value *ByValArg = CB.getArgOperand(ArgNo);
1576 Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType();
1577 uint64_t ByValSize = DL.getTypeAllocSize(ByValTy);
1578 MemoryLocation Loc(ByValArg, LocationSize::precise(ByValSize));
1579 MemCpyInst *MDep = nullptr;
1580 if (EnableMemorySSA) {
1581 MemoryUseOrDef *CallAccess = MSSA->getMemoryAccess(&CB);
1582 if (!CallAccess)
1583 return false;
1584 MemoryAccess *Clobber = MSSA->getWalker()->getClobberingMemoryAccess(
1585 CallAccess->getDefiningAccess(), Loc);
1586 if (auto *MD = dyn_cast<MemoryDef>(Clobber))
1587 MDep = dyn_cast_or_null<MemCpyInst>(MD->getMemoryInst());
1588 } else {
1589 MemDepResult DepInfo = MD->getPointerDependencyFrom(
1590 Loc, true, CB.getIterator(), CB.getParent());
1591 if (!DepInfo.isClobber())
1592 return false;
1593 MDep = dyn_cast<MemCpyInst>(DepInfo.getInst());
1594 }
1595
1596 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by
1597 // a memcpy, see if we can byval from the source of the memcpy instead of the
1598 // result.
1599 if (!MDep || MDep->isVolatile() ||
1600 ByValArg->stripPointerCasts() != MDep->getDest())
1601 return false;
1602
1603 // The length of the memcpy must be larger or equal to the size of the byval.
1604 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
1605 if (!C1 || C1->getValue().getZExtValue() < ByValSize)
1606 return false;
1607
1608 // Get the alignment of the byval. If the call doesn't specify the alignment,
1609 // then it is some target specific value that we can't know.
1610 MaybeAlign ByValAlign = CB.getParamAlign(ArgNo);
1611 if (!ByValAlign) return false;
1612
1613 // If it is greater than the memcpy, then we check to see if we can force the
1614 // source of the memcpy to the alignment we need. If we fail, we bail out.
1615 MaybeAlign MemDepAlign = MDep->getSourceAlign();
1616 if ((!MemDepAlign || *MemDepAlign < *ByValAlign) &&
1617 getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL, &CB, AC,
1618 DT) < *ByValAlign)
1619 return false;
1620
1621 // The address space of the memcpy source must match the byval argument
1622 if (MDep->getSource()->getType()->getPointerAddressSpace() !=
1623 ByValArg->getType()->getPointerAddressSpace())
1624 return false;
1625
1626 // Verify that the copied-from memory doesn't change in between the memcpy and
1627 // the byval call.
1628 // memcpy(a <- b)
1629 // *b = 42;
1630 // foo(*a)
1631 // It would be invalid to transform the second memcpy into foo(*b).
1632 if (EnableMemorySSA) {
1633 if (writtenBetween(MSSA, MemoryLocation::getForSource(MDep),
1634 MSSA->getMemoryAccess(MDep), MSSA->getMemoryAccess(&CB)))
1635 return false;
1636 } else {
1637 // NOTE: This is conservative, it will stop on any read from the source loc,
1638 // not just the defining memcpy.
1639 MemDepResult SourceDep = MD->getPointerDependencyFrom(
1640 MemoryLocation::getForSource(MDep), false,
1641 CB.getIterator(), MDep->getParent());
1642 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
1643 return false;
1644 }
1645
1646 Value *TmpCast = MDep->getSource();
1647 if (MDep->getSource()->getType() != ByValArg->getType()) {
1648 BitCastInst *TmpBitCast = new BitCastInst(MDep->getSource(), ByValArg->getType(),
1649 "tmpcast", &CB);
1650 // Set the tmpcast's DebugLoc to MDep's
1651 TmpBitCast->setDebugLoc(MDep->getDebugLoc());
1652 TmpCast = TmpBitCast;
1653 }
1654
1655 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n"
<< " " << *MDep << "\n" << " " <<
CB << "\n"; } } while (false)
1656 << " " << *MDep << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n"
<< " " << *MDep << "\n" << " " <<
CB << "\n"; } } while (false)
1657 << " " << CB << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("memcpyopt")) { dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n"
<< " " << *MDep << "\n" << " " <<
CB << "\n"; } } while (false)
;
1658
1659 // Otherwise we're good! Update the byval argument.
1660 CB.setArgOperand(ArgNo, TmpCast);
1661 ++NumMemCpyInstr;
1662 return true;
1663}
1664
1665/// Executes one iteration of MemCpyOptPass.
1666bool MemCpyOptPass::iterateOnFunction(Function &F) {
1667 bool MadeChange = false;
1668
1669 // Walk all instruction in the function.
1670 for (BasicBlock &BB : F) {
1671 // Skip unreachable blocks. For example processStore assumes that an
1672 // instruction in a BB can't be dominated by a later instruction in the
1673 // same BB (which is a scenario that can happen for an unreachable BB that
1674 // has itself as a predecessor).
1675 if (!DT->isReachableFromEntry(&BB))
17
Assuming the condition is false
18
Taking false branch
1676 continue;
1677
1678 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) {
19
Loop condition is true. Entering loop body
1679 // Avoid invalidating the iterator.
1680 Instruction *I = &*BI++;
1681
1682 bool RepeatInstruction = false;
1683
1684 if (StoreInst *SI
20.1
'SI' is non-null
20.1
'SI' is non-null
20.1
'SI' is non-null
20.1
'SI' is non-null
20.1
'SI' is non-null
20.1
'SI' is non-null
20.1
'SI' is non-null
= dyn_cast<StoreInst>(I))
20
Assuming 'I' is a 'StoreInst'
21
Taking true branch
1685 MadeChange |= processStore(SI, BI);
22
Calling 'MemCpyOptPass::processStore'
1686 else if (MemSetInst *M = dyn_cast<MemSetInst>(I))
1687 RepeatInstruction = processMemSet(M, BI);
1688 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I))
1689 RepeatInstruction = processMemCpy(M, BI);
1690 else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I))
1691 RepeatInstruction = processMemMove(M);
1692 else if (auto *CB = dyn_cast<CallBase>(I)) {
1693 for (unsigned i = 0, e = CB->arg_size(); i != e; ++i)
1694 if (CB->isByValArgument(i))
1695 MadeChange |= processByValArgument(*CB, i);
1696 }
1697
1698 // Reprocess the instruction if desired.
1699 if (RepeatInstruction) {
1700 if (BI != BB.begin())
1701 --BI;
1702 MadeChange = true;
1703 }
1704 }
1705 }
1706
1707 return MadeChange;
1708}
1709
1710PreservedAnalyses MemCpyOptPass::run(Function &F, FunctionAnalysisManager &AM) {
1711 auto *MD = !EnableMemorySSA ? &AM.getResult<MemoryDependenceAnalysis>(F)
1712 : AM.getCachedResult<MemoryDependenceAnalysis>(F);
1713 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1714 auto *AA = &AM.getResult<AAManager>(F);
1715 auto *AC = &AM.getResult<AssumptionAnalysis>(F);
1716 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1717 auto *MSSA = EnableMemorySSA ? &AM.getResult<MemorySSAAnalysis>(F)
1718 : AM.getCachedResult<MemorySSAAnalysis>(F);
1719
1720 bool MadeChange =
1721 runImpl(F, MD, &TLI, AA, AC, DT, MSSA ? &MSSA->getMSSA() : nullptr);
1722 if (!MadeChange)
1723 return PreservedAnalyses::all();
1724
1725 PreservedAnalyses PA;
1726 PA.preserveSet<CFGAnalyses>();
1727 if (MD)
1728 PA.preserve<MemoryDependenceAnalysis>();
1729 if (MSSA)
1730 PA.preserve<MemorySSAAnalysis>();
1731 return PA;
1732}
1733
1734bool MemCpyOptPass::runImpl(Function &F, MemoryDependenceResults *MD_,
1735 TargetLibraryInfo *TLI_, AliasAnalysis *AA_,
1736 AssumptionCache *AC_, DominatorTree *DT_,
1737 MemorySSA *MSSA_) {
1738 bool MadeChange = false;
1739 MD = MD_;
12
Null pointer value stored to field 'MD'
1740 TLI = TLI_;
1741 AA = AA_;
1742 AC = AC_;
1743 DT = DT_;
1744 MSSA = MSSA_;
1745 MemorySSAUpdater MSSAU_(MSSA_);
1746 MSSAU = MSSA_
12.1
'MSSA_' is null
12.1
'MSSA_' is null
12.1
'MSSA_' is null
12.1
'MSSA_' is null
12.1
'MSSA_' is null
12.1
'MSSA_' is null
12.1
'MSSA_' is null
? &MSSAU_ : nullptr;
13
'?' condition is false
1747 // If we don't have at least memset and memcpy, there is little point of doing
1748 // anything here. These are required by a freestanding implementation, so if
1749 // even they are disabled, there is no point in trying hard.
1750 if (!TLI->has(LibFunc_memset) || !TLI->has(LibFunc_memcpy))
14
Taking false branch
1751 return false;
1752
1753 while (true) {
15
Loop condition is true. Entering loop body
1754 if (!iterateOnFunction(F))
16
Calling 'MemCpyOptPass::iterateOnFunction'
1755 break;
1756 MadeChange = true;
1757 }
1758
1759 if (MSSA_ && VerifyMemorySSA)
1760 MSSA_->verifyMemorySSA();
1761
1762 MD = nullptr;
1763 return MadeChange;
1764}
1765
1766/// This is the main transformation entry point for a function.
1767bool MemCpyOptLegacyPass::runOnFunction(Function &F) {
1768 if (skipFunction(F))
1
Assuming the condition is false
2
Taking false branch
1769 return false;
1770
1771 auto *MDWP = !EnableMemorySSA
3
Assuming the condition is false
4
'?' condition is false
1772 ? &getAnalysis<MemoryDependenceWrapperPass>()
1773 : getAnalysisIfAvailable<MemoryDependenceWrapperPass>();
1774 auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
1775 auto *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1776 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1777 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1778 auto *MSSAWP = EnableMemorySSA
5
Assuming the condition is false
6
'?' condition is false
1779 ? &getAnalysis<MemorySSAWrapperPass>()
1780 : getAnalysisIfAvailable<MemorySSAWrapperPass>();
1781
1782 return Impl.runImpl(F, MDWP ? & MDWP->getMemDep() : nullptr, TLI, AA, AC, DT,
7
Assuming 'MDWP' is null
8
'?' condition is false
10
Passing null pointer value via 2nd parameter 'MD_'
11
Calling 'MemCpyOptPass::runImpl'
1783 MSSAWP
8.1
'MSSAWP' is null
8.1
'MSSAWP' is null
8.1
'MSSAWP' is null
8.1
'MSSAWP' is null
8.1
'MSSAWP' is null
8.1
'MSSAWP' is null
8.1
'MSSAWP' is null
? &MSSAWP->getMSSA() : nullptr)
;
9
'?' condition is false
1784}

/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/None.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator.h"
26#include "llvm/ADT/iterator_range.h"
27#include "llvm/IR/Attributes.h"
28#include "llvm/IR/BasicBlock.h"
29#include "llvm/IR/CallingConv.h"
30#include "llvm/IR/CFG.h"
31#include "llvm/IR/Constant.h"
32#include "llvm/IR/DerivedTypes.h"
33#include "llvm/IR/Function.h"
34#include "llvm/IR/InstrTypes.h"
35#include "llvm/IR/Instruction.h"
36#include "llvm/IR/OperandTraits.h"
37#include "llvm/IR/Type.h"
38#include "llvm/IR/Use.h"
39#include "llvm/IR/User.h"
40#include "llvm/IR/Value.h"
41#include "llvm/Support/AtomicOrdering.h"
42#include "llvm/Support/Casting.h"
43#include "llvm/Support/ErrorHandling.h"
44#include <cassert>
45#include <cstddef>
46#include <cstdint>
47#include <iterator>
48
49namespace llvm {
50
51class APInt;
52class ConstantInt;
53class DataLayout;
54class LLVMContext;
55
56//===----------------------------------------------------------------------===//
57// AllocaInst Class
58//===----------------------------------------------------------------------===//
59
60/// an instruction to allocate memory on the stack
61class AllocaInst : public UnaryInstruction {
62 Type *AllocatedType;
63
64 using AlignmentField = AlignmentBitfieldElementT<0>;
65 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
66 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>;
67 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
68 SwiftErrorField>(),
69 "Bitfields must be contiguous");
70
71protected:
72 // Note: Instruction needs to be a friend here to call cloneImpl.
73 friend class Instruction;
74
75 AllocaInst *cloneImpl() const;
76
77public:
78 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
79 const Twine &Name, Instruction *InsertBefore);
80 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
81 const Twine &Name, BasicBlock *InsertAtEnd);
82
83 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
84 Instruction *InsertBefore);
85 AllocaInst(Type *Ty, unsigned AddrSpace,
86 const Twine &Name, BasicBlock *InsertAtEnd);
87
88 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
89 const Twine &Name = "", Instruction *InsertBefore = nullptr);
90 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
91 const Twine &Name, BasicBlock *InsertAtEnd);
92
93 /// Return true if there is an allocation size parameter to the allocation
94 /// instruction that is not 1.
95 bool isArrayAllocation() const;
96
97 /// Get the number of elements allocated. For a simple allocation of a single
98 /// element, this will return a constant 1 value.
99 const Value *getArraySize() const { return getOperand(0); }
100 Value *getArraySize() { return getOperand(0); }
101
102 /// Overload to return most specific pointer type.
103 PointerType *getType() const {
104 return cast<PointerType>(Instruction::getType());
105 }
106
107 /// Get allocation size in bits. Returns None if size can't be determined,
108 /// e.g. in case of a VLA.
109 Optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
110
111 /// Return the type that is being allocated by the instruction.
112 Type *getAllocatedType() const { return AllocatedType; }
113 /// for use only in special circumstances that need to generically
114 /// transform a whole instruction (eg: IR linking and vectorization).
115 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
116
117 /// Return the alignment of the memory that is being allocated by the
118 /// instruction.
119 Align getAlign() const {
120 return Align(1ULL << getSubclassData<AlignmentField>());
121 }
122
123 void setAlignment(Align Align) {
124 setSubclassData<AlignmentField>(Log2(Align));
125 }
126
127 // FIXME: Remove this one transition to Align is over.
128 unsigned getAlignment() const { return getAlign().value(); }
129
130 /// Return true if this alloca is in the entry block of the function and is a
131 /// constant size. If so, the code generator will fold it into the
132 /// prolog/epilog code, so it is basically free.
133 bool isStaticAlloca() const;
134
135 /// Return true if this alloca is used as an inalloca argument to a call. Such
136 /// allocas are never considered static even if they are in the entry block.
137 bool isUsedWithInAlloca() const {
138 return getSubclassData<UsedWithInAllocaField>();
139 }
140
141 /// Specify whether this alloca is used to represent the arguments to a call.
142 void setUsedWithInAlloca(bool V) {
143 setSubclassData<UsedWithInAllocaField>(V);
144 }
145
146 /// Return true if this alloca is used as a swifterror argument to a call.
147 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
148 /// Specify whether this alloca is used to represent a swifterror.
149 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
150
151 // Methods for support type inquiry through isa, cast, and dyn_cast:
152 static bool classof(const Instruction *I) {
153 return (I->getOpcode() == Instruction::Alloca);
154 }
155 static bool classof(const Value *V) {
156 return isa<Instruction>(V) && classof(cast<Instruction>(V));
157 }
158
159private:
160 // Shadow Instruction::setInstructionSubclassData with a private forwarding
161 // method so that subclasses cannot accidentally use it.
162 template <typename Bitfield>
163 void setSubclassData(typename Bitfield::Type Value) {
164 Instruction::setSubclassData<Bitfield>(Value);
165 }
166};
167
168//===----------------------------------------------------------------------===//
169// LoadInst Class
170//===----------------------------------------------------------------------===//
171
172/// An instruction for reading from memory. This uses the SubclassData field in
173/// Value to store whether or not the load is volatile.
174class LoadInst : public UnaryInstruction {
175 using VolatileField = BoolBitfieldElementT<0>;
176 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
177 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
178 static_assert(
179 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
180 "Bitfields must be contiguous");
181
182 void AssertOK();
183
184protected:
185 // Note: Instruction needs to be a friend here to call cloneImpl.
186 friend class Instruction;
187
188 LoadInst *cloneImpl() const;
189
190public:
191 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
192 Instruction *InsertBefore);
193 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
194 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
195 Instruction *InsertBefore);
196 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
197 BasicBlock *InsertAtEnd);
198 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
199 Align Align, Instruction *InsertBefore = nullptr);
200 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
201 Align Align, BasicBlock *InsertAtEnd);
202 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
203 Align Align, AtomicOrdering Order,
204 SyncScope::ID SSID = SyncScope::System,
205 Instruction *InsertBefore = nullptr);
206 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
207 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
208 BasicBlock *InsertAtEnd);
209
210 /// Return true if this is a load from a volatile memory location.
211 bool isVolatile() const { return getSubclassData<VolatileField>(); }
212
213 /// Specify whether this is a volatile load or not.
214 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
215
216 /// Return the alignment of the access that is being performed.
217 /// FIXME: Remove this function once transition to Align is over.
218 /// Use getAlign() instead.
219 unsigned getAlignment() const { return getAlign().value(); }
220
221 /// Return the alignment of the access that is being performed.
222 Align getAlign() const {
223 return Align(1ULL << (getSubclassData<AlignmentField>()));
224 }
225
226 void setAlignment(Align Align) {
227 setSubclassData<AlignmentField>(Log2(Align));
228 }
229
230 /// Returns the ordering constraint of this load instruction.
231 AtomicOrdering getOrdering() const {
232 return getSubclassData<OrderingField>();
233 }
234 /// Sets the ordering constraint of this load instruction. May not be Release
235 /// or AcquireRelease.
236 void setOrdering(AtomicOrdering Ordering) {
237 setSubclassData<OrderingField>(Ordering);
238 }
239
240 /// Returns the synchronization scope ID of this load instruction.
241 SyncScope::ID getSyncScopeID() const {
242 return SSID;
243 }
244
245 /// Sets the synchronization scope ID of this load instruction.
246 void setSyncScopeID(SyncScope::ID SSID) {
247 this->SSID = SSID;
248 }
249
250 /// Sets the ordering constraint and the synchronization scope ID of this load
251 /// instruction.
252 void setAtomic(AtomicOrdering Ordering,
253 SyncScope::ID SSID = SyncScope::System) {
254 setOrdering(Ordering);
255 setSyncScopeID(SSID);
256 }
257
258 bool isSimple() const { return !isAtomic() && !isVolatile(); }
43
Assuming the condition is true
44
Assuming the condition is true
45
Returning the value 1, which participates in a condition later
259
260 bool isUnordered() const {
261 return (getOrdering() == AtomicOrdering::NotAtomic ||
262 getOrdering() == AtomicOrdering::Unordered) &&
263 !isVolatile();
264 }
265
266 Value *getPointerOperand() { return getOperand(0); }
267 const Value *getPointerOperand() const { return getOperand(0); }
268 static unsigned getPointerOperandIndex() { return 0U; }
269 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
270
271 /// Returns the address space of the pointer operand.
272 unsigned getPointerAddressSpace() const {
273 return getPointerOperandType()->getPointerAddressSpace();
274 }
275
276 // Methods for support type inquiry through isa, cast, and dyn_cast:
277 static bool classof(const Instruction *I) {
278 return I->getOpcode() == Instruction::Load;
279 }
280 static bool classof(const Value *V) {
281 return isa<Instruction>(V) && classof(cast<Instruction>(V));
282 }
283
284private:
285 // Shadow Instruction::setInstructionSubclassData with a private forwarding
286 // method so that subclasses cannot accidentally use it.
287 template <typename Bitfield>
288 void setSubclassData(typename Bitfield::Type Value) {
289 Instruction::setSubclassData<Bitfield>(Value);
290 }
291
292 /// The synchronization scope ID of this load instruction. Not quite enough
293 /// room in SubClassData for everything, so synchronization scope ID gets its
294 /// own field.
295 SyncScope::ID SSID;
296};
297
298//===----------------------------------------------------------------------===//
299// StoreInst Class
300//===----------------------------------------------------------------------===//
301
302/// An instruction for storing to memory.
303class StoreInst : public Instruction {
304 using VolatileField = BoolBitfieldElementT<0>;
305 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
306 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
307 static_assert(
308 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
309 "Bitfields must be contiguous");
310
311 void AssertOK();
312
313protected:
314 // Note: Instruction needs to be a friend here to call cloneImpl.
315 friend class Instruction;
316
317 StoreInst *cloneImpl() const;
318
319public:
320 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
321 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
322 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
323 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
324 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
325 Instruction *InsertBefore = nullptr);
326 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
327 BasicBlock *InsertAtEnd);
328 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
329 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
330 Instruction *InsertBefore = nullptr);
331 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
332 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
333
334 // allocate space for exactly two operands
335 void *operator new(size_t s) {
336 return User::operator new(s, 2);
337 }
338
339 /// Return true if this is a store to a volatile memory location.
340 bool isVolatile() const { return getSubclassData<VolatileField>(); }
341
342 /// Specify whether this is a volatile store or not.
343 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
344
345 /// Transparently provide more efficient getOperand methods.
346 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
347
348 /// Return the alignment of the access that is being performed
349 /// FIXME: Remove this function once transition to Align is over.
350 /// Use getAlign() instead.
351 unsigned getAlignment() const { return getAlign().value(); }
352
353 Align getAlign() const {
354 return Align(1ULL << (getSubclassData<AlignmentField>()));
355 }
356
357 void setAlignment(Align Align) {
358 setSubclassData<AlignmentField>(Log2(Align));
359 }
360
361 /// Returns the ordering constraint of this store instruction.
362 AtomicOrdering getOrdering() const {
363 return getSubclassData<OrderingField>();
364 }
365
366 /// Sets the ordering constraint of this store instruction. May not be
367 /// Acquire or AcquireRelease.
368 void setOrdering(AtomicOrdering Ordering) {
369 setSubclassData<OrderingField>(Ordering);
370 }
371
372 /// Returns the synchronization scope ID of this store instruction.
373 SyncScope::ID getSyncScopeID() const {
374 return SSID;
375 }
376
377 /// Sets the synchronization scope ID of this store instruction.
378 void setSyncScopeID(SyncScope::ID SSID) {
379 this->SSID = SSID;
380 }
381
382 /// Sets the ordering constraint and the synchronization scope ID of this
383 /// store instruction.
384 void setAtomic(AtomicOrdering Ordering,
385 SyncScope::ID SSID = SyncScope::System) {
386 setOrdering(Ordering);
387 setSyncScopeID(SSID);
388 }
389
390 bool isSimple() const { return !isAtomic() && !isVolatile(); }
24
Assuming the condition is true
25
Assuming the condition is true
26
Returning the value 1, which participates in a condition later
391
392 bool isUnordered() const {
393 return (getOrdering() == AtomicOrdering::NotAtomic ||
394 getOrdering() == AtomicOrdering::Unordered) &&
395 !isVolatile();
396 }
397
398 Value *getValueOperand() { return getOperand(0); }
399 const Value *getValueOperand() const { return getOperand(0); }
400
401 Value *getPointerOperand() { return getOperand(1); }
402 const Value *getPointerOperand() const { return getOperand(1); }
403 static unsigned getPointerOperandIndex() { return 1U; }
404 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
405
406 /// Returns the address space of the pointer operand.
407 unsigned getPointerAddressSpace() const {
408 return getPointerOperandType()->getPointerAddressSpace();
409 }
410
411 // Methods for support type inquiry through isa, cast, and dyn_cast:
412 static bool classof(const Instruction *I) {
413 return I->getOpcode() == Instruction::Store;
414 }
415 static bool classof(const Value *V) {
416 return isa<Instruction>(V) && classof(cast<Instruction>(V));
417 }
418
419private:
420 // Shadow Instruction::setInstructionSubclassData with a private forwarding
421 // method so that subclasses cannot accidentally use it.
422 template <typename Bitfield>
423 void setSubclassData(typename Bitfield::Type Value) {
424 Instruction::setSubclassData<Bitfield>(Value);
425 }
426
427 /// The synchronization scope ID of this store instruction. Not quite enough
428 /// room in SubClassData for everything, so synchronization scope ID gets its
429 /// own field.
430 SyncScope::ID SSID;
431};
432
433template <>
434struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
435};
436
437DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<StoreInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 437, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<StoreInst>::op_begin(const_cast
<StoreInst*>(this))[i_nocapture].get()); } void StoreInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<StoreInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 437, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
StoreInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned StoreInst::getNumOperands() const { return OperandTraits
<StoreInst>::operands(this); } template <int Idx_nocapture
> Use &StoreInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
StoreInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
438
439//===----------------------------------------------------------------------===//
440// FenceInst Class
441//===----------------------------------------------------------------------===//
442
443/// An instruction for ordering other memory operations.
444class FenceInst : public Instruction {
445 using OrderingField = AtomicOrderingBitfieldElementT<0>;
446
447 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
448
449protected:
450 // Note: Instruction needs to be a friend here to call cloneImpl.
451 friend class Instruction;
452
453 FenceInst *cloneImpl() const;
454
455public:
456 // Ordering may only be Acquire, Release, AcquireRelease, or
457 // SequentiallyConsistent.
458 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
459 SyncScope::ID SSID = SyncScope::System,
460 Instruction *InsertBefore = nullptr);
461 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
462 BasicBlock *InsertAtEnd);
463
464 // allocate space for exactly zero operands
465 void *operator new(size_t s) {
466 return User::operator new(s, 0);
467 }
468
469 /// Returns the ordering constraint of this fence instruction.
470 AtomicOrdering getOrdering() const {
471 return getSubclassData<OrderingField>();
472 }
473
474 /// Sets the ordering constraint of this fence instruction. May only be
475 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
476 void setOrdering(AtomicOrdering Ordering) {
477 setSubclassData<OrderingField>(Ordering);
478 }
479
480 /// Returns the synchronization scope ID of this fence instruction.
481 SyncScope::ID getSyncScopeID() const {
482 return SSID;
483 }
484
485 /// Sets the synchronization scope ID of this fence instruction.
486 void setSyncScopeID(SyncScope::ID SSID) {
487 this->SSID = SSID;
488 }
489
490 // Methods for support type inquiry through isa, cast, and dyn_cast:
491 static bool classof(const Instruction *I) {
492 return I->getOpcode() == Instruction::Fence;
493 }
494 static bool classof(const Value *V) {
495 return isa<Instruction>(V) && classof(cast<Instruction>(V));
496 }
497
498private:
499 // Shadow Instruction::setInstructionSubclassData with a private forwarding
500 // method so that subclasses cannot accidentally use it.
501 template <typename Bitfield>
502 void setSubclassData(typename Bitfield::Type Value) {
503 Instruction::setSubclassData<Bitfield>(Value);
504 }
505
506 /// The synchronization scope ID of this fence instruction. Not quite enough
507 /// room in SubClassData for everything, so synchronization scope ID gets its
508 /// own field.
509 SyncScope::ID SSID;
510};
511
512//===----------------------------------------------------------------------===//
513// AtomicCmpXchgInst Class
514//===----------------------------------------------------------------------===//
515
516/// An instruction that atomically checks whether a
517/// specified value is in a memory location, and, if it is, stores a new value
518/// there. The value returned by this instruction is a pair containing the
519/// original value as first element, and an i1 indicating success (true) or
520/// failure (false) as second element.
521///
522class AtomicCmpXchgInst : public Instruction {
523 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
524 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
525 SyncScope::ID SSID);
526
527 template <unsigned Offset>
528 using AtomicOrderingBitfieldElement =
529 typename Bitfield::Element<AtomicOrdering, Offset, 3,
530 AtomicOrdering::LAST>;
531
532protected:
533 // Note: Instruction needs to be a friend here to call cloneImpl.
534 friend class Instruction;
535
536 AtomicCmpXchgInst *cloneImpl() const;
537
538public:
539 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
540 AtomicOrdering SuccessOrdering,
541 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
542 Instruction *InsertBefore = nullptr);
543 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
544 AtomicOrdering SuccessOrdering,
545 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
546 BasicBlock *InsertAtEnd);
547
548 // allocate space for exactly three operands
549 void *operator new(size_t s) {
550 return User::operator new(s, 3);
551 }
552
553 using VolatileField = BoolBitfieldElementT<0>;
554 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>;
555 using SuccessOrderingField =
556 AtomicOrderingBitfieldElementT<WeakField::NextBit>;
557 using FailureOrderingField =
558 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>;
559 using AlignmentField =
560 AlignmentBitfieldElementT<FailureOrderingField::NextBit>;
561 static_assert(
562 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField,
563 FailureOrderingField, AlignmentField>(),
564 "Bitfields must be contiguous");
565
566 /// Return the alignment of the memory that is being allocated by the
567 /// instruction.
568 Align getAlign() const {
569 return Align(1ULL << getSubclassData<AlignmentField>());
570 }
571
572 void setAlignment(Align Align) {
573 setSubclassData<AlignmentField>(Log2(Align));
574 }
575
576 /// Return true if this is a cmpxchg from a volatile memory
577 /// location.
578 ///
579 bool isVolatile() const { return getSubclassData<VolatileField>(); }
580
581 /// Specify whether this is a volatile cmpxchg.
582 ///
583 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
584
585 /// Return true if this cmpxchg may spuriously fail.
586 bool isWeak() const { return getSubclassData<WeakField>(); }
587
588 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
589
590 /// Transparently provide more efficient getOperand methods.
591 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
592
593 static bool isValidSuccessOrdering(AtomicOrdering Ordering) {
594 return Ordering != AtomicOrdering::NotAtomic &&
595 Ordering != AtomicOrdering::Unordered;
596 }
597
598 static bool isValidFailureOrdering(AtomicOrdering Ordering) {
599 return Ordering != AtomicOrdering::NotAtomic &&
600 Ordering != AtomicOrdering::Unordered &&
601 Ordering != AtomicOrdering::AcquireRelease &&
602 Ordering != AtomicOrdering::Release;
603 }
604
605 /// Returns the success ordering constraint of this cmpxchg instruction.
606 AtomicOrdering getSuccessOrdering() const {
607 return getSubclassData<SuccessOrderingField>();
608 }
609
610 /// Sets the success ordering constraint of this cmpxchg instruction.
611 void setSuccessOrdering(AtomicOrdering Ordering) {
612 assert(isValidSuccessOrdering(Ordering) &&(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 613, __extension__ __PRETTY_FUNCTION__))
613 "invalid CmpXchg success ordering")(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 613, __extension__ __PRETTY_FUNCTION__))
;
614 setSubclassData<SuccessOrderingField>(Ordering);
615 }
616
617 /// Returns the failure ordering constraint of this cmpxchg instruction.
618 AtomicOrdering getFailureOrdering() const {
619 return getSubclassData<FailureOrderingField>();
620 }
621
622 /// Sets the failure ordering constraint of this cmpxchg instruction.
623 void setFailureOrdering(AtomicOrdering Ordering) {
624 assert(isValidFailureOrdering(Ordering) &&(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 625, __extension__ __PRETTY_FUNCTION__))
625 "invalid CmpXchg failure ordering")(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 625, __extension__ __PRETTY_FUNCTION__))
;
626 setSubclassData<FailureOrderingField>(Ordering);
627 }
628
629 /// Returns a single ordering which is at least as strong as both the
630 /// success and failure orderings for this cmpxchg.
631 AtomicOrdering getMergedOrdering() const {
632 if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent)
633 return AtomicOrdering::SequentiallyConsistent;
634 if (getFailureOrdering() == AtomicOrdering::Acquire) {
635 if (getSuccessOrdering() == AtomicOrdering::Monotonic)
636 return AtomicOrdering::Acquire;
637 if (getSuccessOrdering() == AtomicOrdering::Release)
638 return AtomicOrdering::AcquireRelease;
639 }
640 return getSuccessOrdering();
641 }
642
643 /// Returns the synchronization scope ID of this cmpxchg instruction.
644 SyncScope::ID getSyncScopeID() const {
645 return SSID;
646 }
647
648 /// Sets the synchronization scope ID of this cmpxchg instruction.
649 void setSyncScopeID(SyncScope::ID SSID) {
650 this->SSID = SSID;
651 }
652
653 Value *getPointerOperand() { return getOperand(0); }
654 const Value *getPointerOperand() const { return getOperand(0); }
655 static unsigned getPointerOperandIndex() { return 0U; }
656
657 Value *getCompareOperand() { return getOperand(1); }
658 const Value *getCompareOperand() const { return getOperand(1); }
659
660 Value *getNewValOperand() { return getOperand(2); }
661 const Value *getNewValOperand() const { return getOperand(2); }
662
663 /// Returns the address space of the pointer operand.
664 unsigned getPointerAddressSpace() const {
665 return getPointerOperand()->getType()->getPointerAddressSpace();
666 }
667
668 /// Returns the strongest permitted ordering on failure, given the
669 /// desired ordering on success.
670 ///
671 /// If the comparison in a cmpxchg operation fails, there is no atomic store
672 /// so release semantics cannot be provided. So this function drops explicit
673 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
674 /// operation would remain SequentiallyConsistent.
675 static AtomicOrdering
676 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
677 switch (SuccessOrdering) {
678 default:
679 llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 679)
;
680 case AtomicOrdering::Release:
681 case AtomicOrdering::Monotonic:
682 return AtomicOrdering::Monotonic;
683 case AtomicOrdering::AcquireRelease:
684 case AtomicOrdering::Acquire:
685 return AtomicOrdering::Acquire;
686 case AtomicOrdering::SequentiallyConsistent:
687 return AtomicOrdering::SequentiallyConsistent;
688 }
689 }
690
691 // Methods for support type inquiry through isa, cast, and dyn_cast:
692 static bool classof(const Instruction *I) {
693 return I->getOpcode() == Instruction::AtomicCmpXchg;
694 }
695 static bool classof(const Value *V) {
696 return isa<Instruction>(V) && classof(cast<Instruction>(V));
697 }
698
699private:
700 // Shadow Instruction::setInstructionSubclassData with a private forwarding
701 // method so that subclasses cannot accidentally use it.
702 template <typename Bitfield>
703 void setSubclassData(typename Bitfield::Type Value) {
704 Instruction::setSubclassData<Bitfield>(Value);
705 }
706
707 /// The synchronization scope ID of this cmpxchg instruction. Not quite
708 /// enough room in SubClassData for everything, so synchronization scope ID
709 /// gets its own field.
710 SyncScope::ID SSID;
711};
712
713template <>
714struct OperandTraits<AtomicCmpXchgInst> :
715 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
716};
717
718DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 718, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<AtomicCmpXchgInst>::op_begin
(const_cast<AtomicCmpXchgInst*>(this))[i_nocapture].get
()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 718, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
AtomicCmpXchgInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned AtomicCmpXchgInst::getNumOperands() const { return
OperandTraits<AtomicCmpXchgInst>::operands(this); } template
<int Idx_nocapture> Use &AtomicCmpXchgInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &AtomicCmpXchgInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
719
720//===----------------------------------------------------------------------===//
721// AtomicRMWInst Class
722//===----------------------------------------------------------------------===//
723
724/// an instruction that atomically reads a memory location,
725/// combines it with another value, and then stores the result back. Returns
726/// the old value.
727///
728class AtomicRMWInst : public Instruction {
729protected:
730 // Note: Instruction needs to be a friend here to call cloneImpl.
731 friend class Instruction;
732
733 AtomicRMWInst *cloneImpl() const;
734
735public:
736 /// This enumeration lists the possible modifications atomicrmw can make. In
737 /// the descriptions, 'p' is the pointer to the instruction's memory location,
738 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
739 /// instruction. These instructions always return 'old'.
740 enum BinOp : unsigned {
741 /// *p = v
742 Xchg,
743 /// *p = old + v
744 Add,
745 /// *p = old - v
746 Sub,
747 /// *p = old & v
748 And,
749 /// *p = ~(old & v)
750 Nand,
751 /// *p = old | v
752 Or,
753 /// *p = old ^ v
754 Xor,
755 /// *p = old >signed v ? old : v
756 Max,
757 /// *p = old <signed v ? old : v
758 Min,
759 /// *p = old >unsigned v ? old : v
760 UMax,
761 /// *p = old <unsigned v ? old : v
762 UMin,
763
764 /// *p = old + v
765 FAdd,
766
767 /// *p = old - v
768 FSub,
769
770 FIRST_BINOP = Xchg,
771 LAST_BINOP = FSub,
772 BAD_BINOP
773 };
774
775private:
776 template <unsigned Offset>
777 using AtomicOrderingBitfieldElement =
778 typename Bitfield::Element<AtomicOrdering, Offset, 3,
779 AtomicOrdering::LAST>;
780
781 template <unsigned Offset>
782 using BinOpBitfieldElement =
783 typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>;
784
785public:
786 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
787 AtomicOrdering Ordering, SyncScope::ID SSID,
788 Instruction *InsertBefore = nullptr);
789 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
790 AtomicOrdering Ordering, SyncScope::ID SSID,
791 BasicBlock *InsertAtEnd);
792
793 // allocate space for exactly two operands
794 void *operator new(size_t s) {
795 return User::operator new(s, 2);
796 }
797
798 using VolatileField = BoolBitfieldElementT<0>;
799 using AtomicOrderingField =
800 AtomicOrderingBitfieldElementT<VolatileField::NextBit>;
801 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
802 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>;
803 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField,
804 OperationField, AlignmentField>(),
805 "Bitfields must be contiguous");
806
807 BinOp getOperation() const { return getSubclassData<OperationField>(); }
808
809 static StringRef getOperationName(BinOp Op);
810
811 static bool isFPOperation(BinOp Op) {
812 switch (Op) {
813 case AtomicRMWInst::FAdd:
814 case AtomicRMWInst::FSub:
815 return true;
816 default:
817 return false;
818 }
819 }
820
821 void setOperation(BinOp Operation) {
822 setSubclassData<OperationField>(Operation);
823 }
824
825 /// Return the alignment of the memory that is being allocated by the
826 /// instruction.
827 Align getAlign() const {
828 return Align(1ULL << getSubclassData<AlignmentField>());
829 }
830
831 void setAlignment(Align Align) {
832 setSubclassData<AlignmentField>(Log2(Align));
833 }
834
835 /// Return true if this is a RMW on a volatile memory location.
836 ///
837 bool isVolatile() const { return getSubclassData<VolatileField>(); }
838
839 /// Specify whether this is a volatile RMW or not.
840 ///
841 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
842
843 /// Transparently provide more efficient getOperand methods.
844 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
845
846 /// Returns the ordering constraint of this rmw instruction.
847 AtomicOrdering getOrdering() const {
848 return getSubclassData<AtomicOrderingField>();
849 }
850
851 /// Sets the ordering constraint of this rmw instruction.
852 void setOrdering(AtomicOrdering Ordering) {
853 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 854, __extension__ __PRETTY_FUNCTION__))
854 "atomicrmw instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 854, __extension__ __PRETTY_FUNCTION__))
;
855 setSubclassData<AtomicOrderingField>(Ordering);
856 }
857
858 /// Returns the synchronization scope ID of this rmw instruction.
859 SyncScope::ID getSyncScopeID() const {
860 return SSID;
861 }
862
863 /// Sets the synchronization scope ID of this rmw instruction.
864 void setSyncScopeID(SyncScope::ID SSID) {
865 this->SSID = SSID;
866 }
867
868 Value *getPointerOperand() { return getOperand(0); }
869 const Value *getPointerOperand() const { return getOperand(0); }
870 static unsigned getPointerOperandIndex() { return 0U; }
871
872 Value *getValOperand() { return getOperand(1); }
873 const Value *getValOperand() const { return getOperand(1); }
874
875 /// Returns the address space of the pointer operand.
876 unsigned getPointerAddressSpace() const {
877 return getPointerOperand()->getType()->getPointerAddressSpace();
878 }
879
880 bool isFloatingPointOperation() const {
881 return isFPOperation(getOperation());
882 }
883
884 // Methods for support type inquiry through isa, cast, and dyn_cast:
885 static bool classof(const Instruction *I) {
886 return I->getOpcode() == Instruction::AtomicRMW;
887 }
888 static bool classof(const Value *V) {
889 return isa<Instruction>(V) && classof(cast<Instruction>(V));
890 }
891
892private:
893 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
894 AtomicOrdering Ordering, SyncScope::ID SSID);
895
896 // Shadow Instruction::setInstructionSubclassData with a private forwarding
897 // method so that subclasses cannot accidentally use it.
898 template <typename Bitfield>
899 void setSubclassData(typename Bitfield::Type Value) {
900 Instruction::setSubclassData<Bitfield>(Value);
901 }
902
903 /// The synchronization scope ID of this rmw instruction. Not quite enough
904 /// room in SubClassData for everything, so synchronization scope ID gets its
905 /// own field.
906 SyncScope::ID SSID;
907};
908
909template <>
910struct OperandTraits<AtomicRMWInst>
911 : public FixedNumOperandTraits<AtomicRMWInst,2> {
912};
913
914DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 914, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<AtomicRMWInst>::op_begin(const_cast
<AtomicRMWInst*>(this))[i_nocapture].get()); } void AtomicRMWInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<AtomicRMWInst
>::operands(this) && "setOperand() out of range!")
? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 914, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
AtomicRMWInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned AtomicRMWInst::getNumOperands() const { return OperandTraits
<AtomicRMWInst>::operands(this); } template <int Idx_nocapture
> Use &AtomicRMWInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &AtomicRMWInst::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
915
916//===----------------------------------------------------------------------===//
917// GetElementPtrInst Class
918//===----------------------------------------------------------------------===//
919
920// checkGEPType - Simple wrapper function to give a better assertion failure
921// message on bad indexes for a gep instruction.
922//
923inline Type *checkGEPType(Type *Ty) {
924 assert(Ty && "Invalid GetElementPtrInst indices for type!")(static_cast <bool> (Ty && "Invalid GetElementPtrInst indices for type!"
) ? void (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 924, __extension__ __PRETTY_FUNCTION__))
;
925 return Ty;
926}
927
928/// an instruction for type-safe pointer arithmetic to
929/// access elements of arrays and structs
930///
931class GetElementPtrInst : public Instruction {
932 Type *SourceElementType;
933 Type *ResultElementType;
934
935 GetElementPtrInst(const GetElementPtrInst &GEPI);
936
937 /// Constructors - Create a getelementptr instruction with a base pointer an
938 /// list of indices. The first ctor can optionally insert before an existing
939 /// instruction, the second appends the new instruction to the specified
940 /// BasicBlock.
941 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
942 ArrayRef<Value *> IdxList, unsigned Values,
943 const Twine &NameStr, Instruction *InsertBefore);
944 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
945 ArrayRef<Value *> IdxList, unsigned Values,
946 const Twine &NameStr, BasicBlock *InsertAtEnd);
947
948 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
949
950protected:
951 // Note: Instruction needs to be a friend here to call cloneImpl.
952 friend class Instruction;
953
954 GetElementPtrInst *cloneImpl() const;
955
956public:
957 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
958 ArrayRef<Value *> IdxList,
959 const Twine &NameStr = "",
960 Instruction *InsertBefore = nullptr) {
961 unsigned Values = 1 + unsigned(IdxList.size());
962 if (!PointeeType) {
963 PointeeType =
964 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
965 } else {
966 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 967, __extension__ __PRETTY_FUNCTION__))
967 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 967, __extension__ __PRETTY_FUNCTION__))
;
968 }
969 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
970 NameStr, InsertBefore);
971 }
972
973 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
974 ArrayRef<Value *> IdxList,
975 const Twine &NameStr,
976 BasicBlock *InsertAtEnd) {
977 unsigned Values = 1 + unsigned(IdxList.size());
978 if (!PointeeType) {
979 PointeeType =
980 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
981 } else {
982 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 983, __extension__ __PRETTY_FUNCTION__))
983 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 983, __extension__ __PRETTY_FUNCTION__))
;
984 }
985 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
986 NameStr, InsertAtEnd);
987 }
988
989 /// Create an "inbounds" getelementptr. See the documentation for the
990 /// "inbounds" flag in LangRef.html for details.
991 static GetElementPtrInst *CreateInBounds(Value *Ptr,
992 ArrayRef<Value *> IdxList,
993 const Twine &NameStr = "",
994 Instruction *InsertBefore = nullptr){
995 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore);
996 }
997
998 static GetElementPtrInst *
999 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
1000 const Twine &NameStr = "",
1001 Instruction *InsertBefore = nullptr) {
1002 GetElementPtrInst *GEP =
1003 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
1004 GEP->setIsInBounds(true);
1005 return GEP;
1006 }
1007
1008 static GetElementPtrInst *CreateInBounds(Value *Ptr,
1009 ArrayRef<Value *> IdxList,
1010 const Twine &NameStr,
1011 BasicBlock *InsertAtEnd) {
1012 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd);
1013 }
1014
1015 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
1016 ArrayRef<Value *> IdxList,
1017 const Twine &NameStr,
1018 BasicBlock *InsertAtEnd) {
1019 GetElementPtrInst *GEP =
1020 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
1021 GEP->setIsInBounds(true);
1022 return GEP;
1023 }
1024
1025 /// Transparently provide more efficient getOperand methods.
1026 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1027
1028 Type *getSourceElementType() const { return SourceElementType; }
1029
1030 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1031 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1032
1033 Type *getResultElementType() const {
1034 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1035, __extension__ __PRETTY_FUNCTION__))
1035 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1035, __extension__ __PRETTY_FUNCTION__))
;
1036 return ResultElementType;
1037 }
1038
1039 /// Returns the address space of this instruction's pointer type.
1040 unsigned getAddressSpace() const {
1041 // Note that this is always the same as the pointer operand's address space
1042 // and that is cheaper to compute, so cheat here.
1043 return getPointerAddressSpace();
1044 }
1045
1046 /// Returns the result type of a getelementptr with the given source
1047 /// element type and indexes.
1048 ///
1049 /// Null is returned if the indices are invalid for the specified
1050 /// source element type.
1051 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1052 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1053 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1054
1055 /// Return the type of the element at the given index of an indexable
1056 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1057 ///
1058 /// Returns null if the type can't be indexed, or the given index is not
1059 /// legal for the given type.
1060 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1061 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1062
1063 inline op_iterator idx_begin() { return op_begin()+1; }
1064 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1065 inline op_iterator idx_end() { return op_end(); }
1066 inline const_op_iterator idx_end() const { return op_end(); }
1067
1068 inline iterator_range<op_iterator> indices() {
1069 return make_range(idx_begin(), idx_end());
1070 }
1071
1072 inline iterator_range<const_op_iterator> indices() const {
1073 return make_range(idx_begin(), idx_end());
1074 }
1075
1076 Value *getPointerOperand() {
1077 return getOperand(0);
1078 }
1079 const Value *getPointerOperand() const {
1080 return getOperand(0);
1081 }
1082 static unsigned getPointerOperandIndex() {
1083 return 0U; // get index for modifying correct operand.
1084 }
1085
1086 /// Method to return the pointer operand as a
1087 /// PointerType.
1088 Type *getPointerOperandType() const {
1089 return getPointerOperand()->getType();
1090 }
1091
1092 /// Returns the address space of the pointer operand.
1093 unsigned getPointerAddressSpace() const {
1094 return getPointerOperandType()->getPointerAddressSpace();
1095 }
1096
1097 /// Returns the pointer type returned by the GEP
1098 /// instruction, which may be a vector of pointers.
1099 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1100 ArrayRef<Value *> IdxList) {
1101 Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)),
1102 Ptr->getType()->getPointerAddressSpace());
1103 // Vector GEP
1104 if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
1105 ElementCount EltCount = PtrVTy->getElementCount();
1106 return VectorType::get(PtrTy, EltCount);
1107 }
1108 for (Value *Index : IdxList)
1109 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1110 ElementCount EltCount = IndexVTy->getElementCount();
1111 return VectorType::get(PtrTy, EltCount);
1112 }
1113 // Scalar GEP
1114 return PtrTy;
1115 }
1116
1117 unsigned getNumIndices() const { // Note: always non-negative
1118 return getNumOperands() - 1;
1119 }
1120
1121 bool hasIndices() const {
1122 return getNumOperands() > 1;
1123 }
1124
1125 /// Return true if all of the indices of this GEP are
1126 /// zeros. If so, the result pointer and the first operand have the same
1127 /// value, just potentially different types.
1128 bool hasAllZeroIndices() const;
1129
1130 /// Return true if all of the indices of this GEP are
1131 /// constant integers. If so, the result pointer and the first operand have
1132 /// a constant offset between them.
1133 bool hasAllConstantIndices() const;
1134
1135 /// Set or clear the inbounds flag on this GEP instruction.
1136 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1137 void setIsInBounds(bool b = true);
1138
1139 /// Determine whether the GEP has the inbounds flag.
1140 bool isInBounds() const;
1141
1142 /// Accumulate the constant address offset of this GEP if possible.
1143 ///
1144 /// This routine accepts an APInt into which it will accumulate the constant
1145 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1146 /// all-constant, it returns false and the value of the offset APInt is
1147 /// undefined (it is *not* preserved!). The APInt passed into this routine
1148 /// must be at least as wide as the IntPtr type for the address space of
1149 /// the base GEP pointer.
1150 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1151
1152 // Methods for support type inquiry through isa, cast, and dyn_cast:
1153 static bool classof(const Instruction *I) {
1154 return (I->getOpcode() == Instruction::GetElementPtr);
1155 }
1156 static bool classof(const Value *V) {
1157 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1158 }
1159};
1160
1161template <>
1162struct OperandTraits<GetElementPtrInst> :
1163 public VariadicOperandTraits<GetElementPtrInst, 1> {
1164};
1165
1166GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1167 ArrayRef<Value *> IdxList, unsigned Values,
1168 const Twine &NameStr,
1169 Instruction *InsertBefore)
1170 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1171 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1172 Values, InsertBefore),
1173 SourceElementType(PointeeType),
1174 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1175 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1176, __extension__ __PRETTY_FUNCTION__))
1176 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1176, __extension__ __PRETTY_FUNCTION__))
;
1177 init(Ptr, IdxList, NameStr);
1178}
1179
1180GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1181 ArrayRef<Value *> IdxList, unsigned Values,
1182 const Twine &NameStr,
1183 BasicBlock *InsertAtEnd)
1184 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1185 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1186 Values, InsertAtEnd),
1187 SourceElementType(PointeeType),
1188 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1189 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1190, __extension__ __PRETTY_FUNCTION__))
1190 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1190, __extension__ __PRETTY_FUNCTION__))
;
1191 init(Ptr, IdxList, NameStr);
1192}
1193
1194DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<GetElementPtrInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1194, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<GetElementPtrInst>::op_begin
(const_cast<GetElementPtrInst*>(this))[i_nocapture].get
()); } void GetElementPtrInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<GetElementPtrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1194, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
GetElementPtrInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned GetElementPtrInst::getNumOperands() const { return
OperandTraits<GetElementPtrInst>::operands(this); } template
<int Idx_nocapture> Use &GetElementPtrInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &GetElementPtrInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1195
1196//===----------------------------------------------------------------------===//
1197// ICmpInst Class
1198//===----------------------------------------------------------------------===//
1199
1200/// This instruction compares its operands according to the predicate given
1201/// to the constructor. It only operates on integers or pointers. The operands
1202/// must be identical types.
1203/// Represent an integer comparison operator.
1204class ICmpInst: public CmpInst {
1205 void AssertOK() {
1206 assert(isIntPredicate() &&(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1207, __extension__ __PRETTY_FUNCTION__))
1207 "Invalid ICmp predicate value")(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1207, __extension__ __PRETTY_FUNCTION__))
;
1208 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1209, __extension__ __PRETTY_FUNCTION__))
1209 "Both operands to ICmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1209, __extension__ __PRETTY_FUNCTION__))
;
1210 // Check that the operands are the right type
1211 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1213, __extension__ __PRETTY_FUNCTION__))
1212 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1213, __extension__ __PRETTY_FUNCTION__))
1213 "Invalid operand types for ICmp instruction")(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1213, __extension__ __PRETTY_FUNCTION__))
;
1214 }
1215
1216protected:
1217 // Note: Instruction needs to be a friend here to call cloneImpl.
1218 friend class Instruction;
1219
1220 /// Clone an identical ICmpInst
1221 ICmpInst *cloneImpl() const;
1222
1223public:
1224 /// Constructor with insert-before-instruction semantics.
1225 ICmpInst(
1226 Instruction *InsertBefore, ///< Where to insert
1227 Predicate pred, ///< The predicate to use for the comparison
1228 Value *LHS, ///< The left-hand-side of the expression
1229 Value *RHS, ///< The right-hand-side of the expression
1230 const Twine &NameStr = "" ///< Name of the instruction
1231 ) : CmpInst(makeCmpResultType(LHS->getType()),
1232 Instruction::ICmp, pred, LHS, RHS, NameStr,
1233 InsertBefore) {
1234#ifndef NDEBUG
1235 AssertOK();
1236#endif
1237 }
1238
1239 /// Constructor with insert-at-end semantics.
1240 ICmpInst(
1241 BasicBlock &InsertAtEnd, ///< Block to insert into.
1242 Predicate pred, ///< The predicate to use for the comparison
1243 Value *LHS, ///< The left-hand-side of the expression
1244 Value *RHS, ///< The right-hand-side of the expression
1245 const Twine &NameStr = "" ///< Name of the instruction
1246 ) : CmpInst(makeCmpResultType(LHS->getType()),
1247 Instruction::ICmp, pred, LHS, RHS, NameStr,
1248 &InsertAtEnd) {
1249#ifndef NDEBUG
1250 AssertOK();
1251#endif
1252 }
1253
1254 /// Constructor with no-insertion semantics
1255 ICmpInst(
1256 Predicate pred, ///< The predicate to use for the comparison
1257 Value *LHS, ///< The left-hand-side of the expression
1258 Value *RHS, ///< The right-hand-side of the expression
1259 const Twine &NameStr = "" ///< Name of the instruction
1260 ) : CmpInst(makeCmpResultType(LHS->getType()),
1261 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1262#ifndef NDEBUG
1263 AssertOK();
1264#endif
1265 }
1266
1267 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1268 /// @returns the predicate that would be the result if the operand were
1269 /// regarded as signed.
1270 /// Return the signed version of the predicate
1271 Predicate getSignedPredicate() const {
1272 return getSignedPredicate(getPredicate());
1273 }
1274
1275 /// This is a static version that you can use without an instruction.
1276 /// Return the signed version of the predicate.
1277 static Predicate getSignedPredicate(Predicate pred);
1278
1279 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1280 /// @returns the predicate that would be the result if the operand were
1281 /// regarded as unsigned.
1282 /// Return the unsigned version of the predicate
1283 Predicate getUnsignedPredicate() const {
1284 return getUnsignedPredicate(getPredicate());
1285 }
1286
1287 /// This is a static version that you can use without an instruction.
1288 /// Return the unsigned version of the predicate.
1289 static Predicate getUnsignedPredicate(Predicate pred);
1290
1291 /// Return true if this predicate is either EQ or NE. This also
1292 /// tests for commutativity.
1293 static bool isEquality(Predicate P) {
1294 return P == ICMP_EQ || P == ICMP_NE;
1295 }
1296
1297 /// Return true if this predicate is either EQ or NE. This also
1298 /// tests for commutativity.
1299 bool isEquality() const {
1300 return isEquality(getPredicate());
1301 }
1302
1303 /// @returns true if the predicate of this ICmpInst is commutative
1304 /// Determine if this relation is commutative.
1305 bool isCommutative() const { return isEquality(); }
1306
1307 /// Return true if the predicate is relational (not EQ or NE).
1308 ///
1309 bool isRelational() const {
1310 return !isEquality();
1311 }
1312
1313 /// Return true if the predicate is relational (not EQ or NE).
1314 ///
1315 static bool isRelational(Predicate P) {
1316 return !isEquality(P);
1317 }
1318
1319 /// Return true if the predicate is SGT or UGT.
1320 ///
1321 static bool isGT(Predicate P) {
1322 return P == ICMP_SGT || P == ICMP_UGT;
1323 }
1324
1325 /// Return true if the predicate is SLT or ULT.
1326 ///
1327 static bool isLT(Predicate P) {
1328 return P == ICMP_SLT || P == ICMP_ULT;
1329 }
1330
1331 /// Return true if the predicate is SGE or UGE.
1332 ///
1333 static bool isGE(Predicate P) {
1334 return P == ICMP_SGE || P == ICMP_UGE;
1335 }
1336
1337 /// Return true if the predicate is SLE or ULE.
1338 ///
1339 static bool isLE(Predicate P) {
1340 return P == ICMP_SLE || P == ICMP_ULE;
1341 }
1342
1343 /// Exchange the two operands to this instruction in such a way that it does
1344 /// not modify the semantics of the instruction. The predicate value may be
1345 /// changed to retain the same result if the predicate is order dependent
1346 /// (e.g. ult).
1347 /// Swap operands and adjust predicate.
1348 void swapOperands() {
1349 setPredicate(getSwappedPredicate());
1350 Op<0>().swap(Op<1>());
1351 }
1352
1353 // Methods for support type inquiry through isa, cast, and dyn_cast:
1354 static bool classof(const Instruction *I) {
1355 return I->getOpcode() == Instruction::ICmp;
1356 }
1357 static bool classof(const Value *V) {
1358 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1359 }
1360};
1361
1362//===----------------------------------------------------------------------===//
1363// FCmpInst Class
1364//===----------------------------------------------------------------------===//
1365
1366/// This instruction compares its operands according to the predicate given
1367/// to the constructor. It only operates on floating point values or packed
1368/// vectors of floating point values. The operands must be identical types.
1369/// Represents a floating point comparison operator.
1370class FCmpInst: public CmpInst {
1371 void AssertOK() {
1372 assert(isFPPredicate() && "Invalid FCmp predicate value")(static_cast <bool> (isFPPredicate() && "Invalid FCmp predicate value"
) ? void (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1372, __extension__ __PRETTY_FUNCTION__))
;
1373 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1374, __extension__ __PRETTY_FUNCTION__))
1374 "Both operands to FCmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1374, __extension__ __PRETTY_FUNCTION__))
;
1375 // Check that the operands are the right type
1376 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1377, __extension__ __PRETTY_FUNCTION__))
1377 "Invalid operand types for FCmp instruction")(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1377, __extension__ __PRETTY_FUNCTION__))
;
1378 }
1379
1380protected:
1381 // Note: Instruction needs to be a friend here to call cloneImpl.
1382 friend class Instruction;
1383
1384 /// Clone an identical FCmpInst
1385 FCmpInst *cloneImpl() const;
1386
1387public:
1388 /// Constructor with insert-before-instruction semantics.
1389 FCmpInst(
1390 Instruction *InsertBefore, ///< Where to insert
1391 Predicate pred, ///< The predicate to use for the comparison
1392 Value *LHS, ///< The left-hand-side of the expression
1393 Value *RHS, ///< The right-hand-side of the expression
1394 const Twine &NameStr = "" ///< Name of the instruction
1395 ) : CmpInst(makeCmpResultType(LHS->getType()),
1396 Instruction::FCmp, pred, LHS, RHS, NameStr,
1397 InsertBefore) {
1398 AssertOK();
1399 }
1400
1401 /// Constructor with insert-at-end semantics.
1402 FCmpInst(
1403 BasicBlock &InsertAtEnd, ///< Block to insert into.
1404 Predicate pred, ///< The predicate to use for the comparison
1405 Value *LHS, ///< The left-hand-side of the expression
1406 Value *RHS, ///< The right-hand-side of the expression
1407 const Twine &NameStr = "" ///< Name of the instruction
1408 ) : CmpInst(makeCmpResultType(LHS->getType()),
1409 Instruction::FCmp, pred, LHS, RHS, NameStr,
1410 &InsertAtEnd) {
1411 AssertOK();
1412 }
1413
1414 /// Constructor with no-insertion semantics
1415 FCmpInst(
1416 Predicate Pred, ///< The predicate to use for the comparison
1417 Value *LHS, ///< The left-hand-side of the expression
1418 Value *RHS, ///< The right-hand-side of the expression
1419 const Twine &NameStr = "", ///< Name of the instruction
1420 Instruction *FlagsSource = nullptr
1421 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1422 RHS, NameStr, nullptr, FlagsSource) {
1423 AssertOK();
1424 }
1425
1426 /// @returns true if the predicate of this instruction is EQ or NE.
1427 /// Determine if this is an equality predicate.
1428 static bool isEquality(Predicate Pred) {
1429 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1430 Pred == FCMP_UNE;
1431 }
1432
1433 /// @returns true if the predicate of this instruction is EQ or NE.
1434 /// Determine if this is an equality predicate.
1435 bool isEquality() const { return isEquality(getPredicate()); }
1436
1437 /// @returns true if the predicate of this instruction is commutative.
1438 /// Determine if this is a commutative predicate.
1439 bool isCommutative() const {
1440 return isEquality() ||
1441 getPredicate() == FCMP_FALSE ||
1442 getPredicate() == FCMP_TRUE ||
1443 getPredicate() == FCMP_ORD ||
1444 getPredicate() == FCMP_UNO;
1445 }
1446
1447 /// @returns true if the predicate is relational (not EQ or NE).
1448 /// Determine if this a relational predicate.
1449 bool isRelational() const { return !isEquality(); }
1450
1451 /// Exchange the two operands to this instruction in such a way that it does
1452 /// not modify the semantics of the instruction. The predicate value may be
1453 /// changed to retain the same result if the predicate is order dependent
1454 /// (e.g. ult).
1455 /// Swap operands and adjust predicate.
1456 void swapOperands() {
1457 setPredicate(getSwappedPredicate());
1458 Op<0>().swap(Op<1>());
1459 }
1460
1461 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1462 static bool classof(const Instruction *I) {
1463 return I->getOpcode() == Instruction::FCmp;
1464 }
1465 static bool classof(const Value *V) {
1466 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1467 }
1468};
1469
1470//===----------------------------------------------------------------------===//
1471/// This class represents a function call, abstracting a target
1472/// machine's calling convention. This class uses low bit of the SubClassData
1473/// field to indicate whether or not this is a tail call. The rest of the bits
1474/// hold the calling convention of the call.
1475///
1476class CallInst : public CallBase {
1477 CallInst(const CallInst &CI);
1478
1479 /// Construct a CallInst given a range of arguments.
1480 /// Construct a CallInst from a range of arguments
1481 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1482 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1483 Instruction *InsertBefore);
1484
1485 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1486 const Twine &NameStr, Instruction *InsertBefore)
1487 : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {}
1488
1489 /// Construct a CallInst given a range of arguments.
1490 /// Construct a CallInst from a range of arguments
1491 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1492 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1493 BasicBlock *InsertAtEnd);
1494
1495 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1496 Instruction *InsertBefore);
1497
1498 CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1499 BasicBlock *InsertAtEnd);
1500
1501 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1502 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1503 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1504
1505 /// Compute the number of operands to allocate.
1506 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1507 // We need one operand for the called function, plus the input operand
1508 // counts provided.
1509 return 1 + NumArgs + NumBundleInputs;
1510 }
1511
1512protected:
1513 // Note: Instruction needs to be a friend here to call cloneImpl.
1514 friend class Instruction;
1515
1516 CallInst *cloneImpl() const;
1517
1518public:
1519 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1520 Instruction *InsertBefore = nullptr) {
1521 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1522 }
1523
1524 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1525 const Twine &NameStr,
1526 Instruction *InsertBefore = nullptr) {
1527 return new (ComputeNumOperands(Args.size()))
1528 CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1529 }
1530
1531 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1532 ArrayRef<OperandBundleDef> Bundles = None,
1533 const Twine &NameStr = "",
1534 Instruction *InsertBefore = nullptr) {
1535 const int NumOperands =
1536 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1537 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1538
1539 return new (NumOperands, DescriptorBytes)
1540 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1541 }
1542
1543 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1544 BasicBlock *InsertAtEnd) {
1545 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1546 }
1547
1548 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1549 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1550 return new (ComputeNumOperands(Args.size()))
1551 CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd);
1552 }
1553
1554 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1555 ArrayRef<OperandBundleDef> Bundles,
1556 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1557 const int NumOperands =
1558 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1559 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1560
1561 return new (NumOperands, DescriptorBytes)
1562 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1563 }
1564
1565 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1566 Instruction *InsertBefore = nullptr) {
1567 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1568 InsertBefore);
1569 }
1570
1571 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1572 ArrayRef<OperandBundleDef> Bundles = None,
1573 const Twine &NameStr = "",
1574 Instruction *InsertBefore = nullptr) {
1575 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1576 NameStr, InsertBefore);
1577 }
1578
1579 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1580 const Twine &NameStr,
1581 Instruction *InsertBefore = nullptr) {
1582 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1583 InsertBefore);
1584 }
1585
1586 static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1587 BasicBlock *InsertAtEnd) {
1588 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1589 InsertAtEnd);
1590 }
1591
1592 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1593 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1594 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1595 InsertAtEnd);
1596 }
1597
1598 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1599 ArrayRef<OperandBundleDef> Bundles,
1600 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1601 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1602 NameStr, InsertAtEnd);
1603 }
1604
1605 /// Create a clone of \p CI with a different set of operand bundles and
1606 /// insert it before \p InsertPt.
1607 ///
1608 /// The returned call instruction is identical \p CI in every way except that
1609 /// the operand bundles for the new instruction are set to the operand bundles
1610 /// in \p Bundles.
1611 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1612 Instruction *InsertPt = nullptr);
1613
1614 /// Generate the IR for a call to malloc:
1615 /// 1. Compute the malloc call's argument as the specified type's size,
1616 /// possibly multiplied by the array size if the array size is not
1617 /// constant 1.
1618 /// 2. Call malloc with that argument.
1619 /// 3. Bitcast the result of the malloc call to the specified type.
1620 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1621 Type *AllocTy, Value *AllocSize,
1622 Value *ArraySize = nullptr,
1623 Function *MallocF = nullptr,
1624 const Twine &Name = "");
1625 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1626 Type *AllocTy, Value *AllocSize,
1627 Value *ArraySize = nullptr,
1628 Function *MallocF = nullptr,
1629 const Twine &Name = "");
1630 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1631 Type *AllocTy, Value *AllocSize,
1632 Value *ArraySize = nullptr,
1633 ArrayRef<OperandBundleDef> Bundles = None,
1634 Function *MallocF = nullptr,
1635 const Twine &Name = "");
1636 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1637 Type *AllocTy, Value *AllocSize,
1638 Value *ArraySize = nullptr,
1639 ArrayRef<OperandBundleDef> Bundles = None,
1640 Function *MallocF = nullptr,
1641 const Twine &Name = "");
1642 /// Generate the IR for a call to the builtin free function.
1643 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
1644 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
1645 static Instruction *CreateFree(Value *Source,
1646 ArrayRef<OperandBundleDef> Bundles,
1647 Instruction *InsertBefore);
1648 static Instruction *CreateFree(Value *Source,
1649 ArrayRef<OperandBundleDef> Bundles,
1650 BasicBlock *InsertAtEnd);
1651
1652 // Note that 'musttail' implies 'tail'.
1653 enum TailCallKind : unsigned {
1654 TCK_None = 0,
1655 TCK_Tail = 1,
1656 TCK_MustTail = 2,
1657 TCK_NoTail = 3,
1658 TCK_LAST = TCK_NoTail
1659 };
1660
1661 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>;
1662 static_assert(
1663 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1664 "Bitfields must be contiguous");
1665
1666 TailCallKind getTailCallKind() const {
1667 return getSubclassData<TailCallKindField>();
1668 }
1669
1670 bool isTailCall() const {
1671 TailCallKind Kind = getTailCallKind();
1672 return Kind == TCK_Tail || Kind == TCK_MustTail;
1673 }
1674
1675 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1676
1677 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1678
1679 void setTailCallKind(TailCallKind TCK) {
1680 setSubclassData<TailCallKindField>(TCK);
1681 }
1682
1683 void setTailCall(bool IsTc = true) {
1684 setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1685 }
1686
1687 /// Return true if the call can return twice
1688 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1689 void setCanReturnTwice() {
1690 addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice);
1691 }
1692
1693 // Methods for support type inquiry through isa, cast, and dyn_cast:
1694 static bool classof(const Instruction *I) {
1695 return I->getOpcode() == Instruction::Call;
1696 }
1697 static bool classof(const Value *V) {
1698 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1699 }
1700
1701 /// Updates profile metadata by scaling it by \p S / \p T.
1702 void updateProfWeight(uint64_t S, uint64_t T);
1703
1704private:
1705 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1706 // method so that subclasses cannot accidentally use it.
1707 template <typename Bitfield>
1708 void setSubclassData(typename Bitfield::Type Value) {
1709 Instruction::setSubclassData<Bitfield>(Value);
1710 }
1711};
1712
1713CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1714 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1715 BasicBlock *InsertAtEnd)
1716 : CallBase(Ty->getReturnType(), Instruction::Call,
1717 OperandTraits<CallBase>::op_end(this) -
1718 (Args.size() + CountBundleInputs(Bundles) + 1),
1719 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1720 InsertAtEnd) {
1721 init(Ty, Func, Args, Bundles, NameStr);
1722}
1723
1724CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1725 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1726 Instruction *InsertBefore)
1727 : CallBase(Ty->getReturnType(), Instruction::Call,
1728 OperandTraits<CallBase>::op_end(this) -
1729 (Args.size() + CountBundleInputs(Bundles) + 1),
1730 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1731 InsertBefore) {
1732 init(Ty, Func, Args, Bundles, NameStr);
1733}
1734
1735//===----------------------------------------------------------------------===//
1736// SelectInst Class
1737//===----------------------------------------------------------------------===//
1738
1739/// This class represents the LLVM 'select' instruction.
1740///
1741class SelectInst : public Instruction {
1742 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1743 Instruction *InsertBefore)
1744 : Instruction(S1->getType(), Instruction::Select,
1745 &Op<0>(), 3, InsertBefore) {
1746 init(C, S1, S2);
1747 setName(NameStr);
1748 }
1749
1750 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1751 BasicBlock *InsertAtEnd)
1752 : Instruction(S1->getType(), Instruction::Select,
1753 &Op<0>(), 3, InsertAtEnd) {
1754 init(C, S1, S2);
1755 setName(NameStr);
1756 }
1757
1758 void init(Value *C, Value *S1, Value *S2) {
1759 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")(static_cast <bool> (!areInvalidOperands(C, S1, S2) &&
"Invalid operands for select") ? void (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1759, __extension__ __PRETTY_FUNCTION__))
;
1760 Op<0>() = C;
1761 Op<1>() = S1;
1762 Op<2>() = S2;
1763 }
1764
1765protected:
1766 // Note: Instruction needs to be a friend here to call cloneImpl.
1767 friend class Instruction;
1768
1769 SelectInst *cloneImpl() const;
1770
1771public:
1772 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1773 const Twine &NameStr = "",
1774 Instruction *InsertBefore = nullptr,
1775 Instruction *MDFrom = nullptr) {
1776 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1777 if (MDFrom)
1778 Sel->copyMetadata(*MDFrom);
1779 return Sel;
1780 }
1781
1782 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1783 const Twine &NameStr,
1784 BasicBlock *InsertAtEnd) {
1785 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1786 }
1787
1788 const Value *getCondition() const { return Op<0>(); }
1789 const Value *getTrueValue() const { return Op<1>(); }
1790 const Value *getFalseValue() const { return Op<2>(); }
1791 Value *getCondition() { return Op<0>(); }
1792 Value *getTrueValue() { return Op<1>(); }
1793 Value *getFalseValue() { return Op<2>(); }
1794
1795 void setCondition(Value *V) { Op<0>() = V; }
1796 void setTrueValue(Value *V) { Op<1>() = V; }
1797 void setFalseValue(Value *V) { Op<2>() = V; }
1798
1799 /// Swap the true and false values of the select instruction.
1800 /// This doesn't swap prof metadata.
1801 void swapValues() { Op<1>().swap(Op<2>()); }
1802
1803 /// Return a string if the specified operands are invalid
1804 /// for a select operation, otherwise return null.
1805 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1806
1807 /// Transparently provide more efficient getOperand methods.
1808 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1809
1810 OtherOps getOpcode() const {
1811 return static_cast<OtherOps>(Instruction::getOpcode());
1812 }
1813
1814 // Methods for support type inquiry through isa, cast, and dyn_cast:
1815 static bool classof(const Instruction *I) {
1816 return I->getOpcode() == Instruction::Select;
1817 }
1818 static bool classof(const Value *V) {
1819 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1820 }
1821};
1822
1823template <>
1824struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1825};
1826
1827DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits
<SelectInst>::op_begin(this); } SelectInst::const_op_iterator
SelectInst::op_begin() const { return OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this)); } SelectInst
::op_iterator SelectInst::op_end() { return OperandTraits<
SelectInst>::op_end(this); } SelectInst::const_op_iterator
SelectInst::op_end() const { return OperandTraits<SelectInst
>::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SelectInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1827, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<SelectInst>::op_begin(const_cast
<SelectInst*>(this))[i_nocapture].get()); } void SelectInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<SelectInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1827, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
SelectInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned SelectInst::getNumOperands() const { return OperandTraits
<SelectInst>::operands(this); } template <int Idx_nocapture
> Use &SelectInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SelectInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
1828
1829//===----------------------------------------------------------------------===//
1830// VAArgInst Class
1831//===----------------------------------------------------------------------===//
1832
1833/// This class represents the va_arg llvm instruction, which returns
1834/// an argument of the specified type given a va_list and increments that list
1835///
1836class VAArgInst : public UnaryInstruction {
1837protected:
1838 // Note: Instruction needs to be a friend here to call cloneImpl.
1839 friend class Instruction;
1840
1841 VAArgInst *cloneImpl() const;
1842
1843public:
1844 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1845 Instruction *InsertBefore = nullptr)
1846 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1847 setName(NameStr);
1848 }
1849
1850 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1851 BasicBlock *InsertAtEnd)
1852 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1853 setName(NameStr);
1854 }
1855
1856 Value *getPointerOperand() { return getOperand(0); }
1857 const Value *getPointerOperand() const { return getOperand(0); }
1858 static unsigned getPointerOperandIndex() { return 0U; }
1859
1860 // Methods for support type inquiry through isa, cast, and dyn_cast:
1861 static bool classof(const Instruction *I) {
1862 return I->getOpcode() == VAArg;
1863 }
1864 static bool classof(const Value *V) {
1865 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1866 }
1867};
1868
1869//===----------------------------------------------------------------------===//
1870// ExtractElementInst Class
1871//===----------------------------------------------------------------------===//
1872
1873/// This instruction extracts a single (scalar)
1874/// element from a VectorType value
1875///
1876class ExtractElementInst : public Instruction {
1877 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1878 Instruction *InsertBefore = nullptr);
1879 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1880 BasicBlock *InsertAtEnd);
1881
1882protected:
1883 // Note: Instruction needs to be a friend here to call cloneImpl.
1884 friend class Instruction;
1885
1886 ExtractElementInst *cloneImpl() const;
1887
1888public:
1889 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1890 const Twine &NameStr = "",
1891 Instruction *InsertBefore = nullptr) {
1892 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1893 }
1894
1895 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1896 const Twine &NameStr,
1897 BasicBlock *InsertAtEnd) {
1898 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1899 }
1900
1901 /// Return true if an extractelement instruction can be
1902 /// formed with the specified operands.
1903 static bool isValidOperands(const Value *Vec, const Value *Idx);
1904
1905 Value *getVectorOperand() { return Op<0>(); }
1906 Value *getIndexOperand() { return Op<1>(); }
1907 const Value *getVectorOperand() const { return Op<0>(); }
1908 const Value *getIndexOperand() const { return Op<1>(); }
1909
1910 VectorType *getVectorOperandType() const {
1911 return cast<VectorType>(getVectorOperand()->getType());
1912 }
1913
1914 /// Transparently provide more efficient getOperand methods.
1915 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1916
1917 // Methods for support type inquiry through isa, cast, and dyn_cast:
1918 static bool classof(const Instruction *I) {
1919 return I->getOpcode() == Instruction::ExtractElement;
1920 }
1921 static bool classof(const Value *V) {
1922 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1923 }
1924};
1925
1926template <>
1927struct OperandTraits<ExtractElementInst> :
1928 public FixedNumOperandTraits<ExtractElementInst, 2> {
1929};
1930
1931DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin(
) { return OperandTraits<ExtractElementInst>::op_begin(
this); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_begin() const { return OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this)); }
ExtractElementInst::op_iterator ExtractElementInst::op_end()
{ return OperandTraits<ExtractElementInst>::op_end(this
); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_end() const { return OperandTraits<ExtractElementInst
>::op_end(const_cast<ExtractElementInst*>(this)); } Value
*ExtractElementInst::getOperand(unsigned i_nocapture) const {
(static_cast <bool> (i_nocapture < OperandTraits<
ExtractElementInst>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1931, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ExtractElementInst>::op_begin
(const_cast<ExtractElementInst*>(this))[i_nocapture].get
()); } void ExtractElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ExtractElementInst>::operands(this)
&& "setOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1931, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ExtractElementInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned ExtractElementInst::getNumOperands() const { return
OperandTraits<ExtractElementInst>::operands(this); } template
<int Idx_nocapture> Use &ExtractElementInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &ExtractElementInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1932
1933//===----------------------------------------------------------------------===//
1934// InsertElementInst Class
1935//===----------------------------------------------------------------------===//
1936
1937/// This instruction inserts a single (scalar)
1938/// element into a VectorType value
1939///
1940class InsertElementInst : public Instruction {
1941 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1942 const Twine &NameStr = "",
1943 Instruction *InsertBefore = nullptr);
1944 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1945 BasicBlock *InsertAtEnd);
1946
1947protected:
1948 // Note: Instruction needs to be a friend here to call cloneImpl.
1949 friend class Instruction;
1950
1951 InsertElementInst *cloneImpl() const;
1952
1953public:
1954 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1955 const Twine &NameStr = "",
1956 Instruction *InsertBefore = nullptr) {
1957 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1958 }
1959
1960 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1961 const Twine &NameStr,
1962 BasicBlock *InsertAtEnd) {
1963 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1964 }
1965
1966 /// Return true if an insertelement instruction can be
1967 /// formed with the specified operands.
1968 static bool isValidOperands(const Value *Vec, const Value *NewElt,
1969 const Value *Idx);
1970
1971 /// Overload to return most specific vector type.
1972 ///
1973 VectorType *getType() const {
1974 return cast<VectorType>(Instruction::getType());
1975 }
1976
1977 /// Transparently provide more efficient getOperand methods.
1978 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1979
1980 // Methods for support type inquiry through isa, cast, and dyn_cast:
1981 static bool classof(const Instruction *I) {
1982 return I->getOpcode() == Instruction::InsertElement;
1983 }
1984 static bool classof(const Value *V) {
1985 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1986 }
1987};
1988
1989template <>
1990struct OperandTraits<InsertElementInst> :
1991 public FixedNumOperandTraits<InsertElementInst, 3> {
1992};
1993
1994DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() {
return OperandTraits<InsertElementInst>::op_begin(this
); } InsertElementInst::const_op_iterator InsertElementInst::
op_begin() const { return OperandTraits<InsertElementInst>
::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst
::op_iterator InsertElementInst::op_end() { return OperandTraits
<InsertElementInst>::op_end(this); } InsertElementInst::
const_op_iterator InsertElementInst::op_end() const { return OperandTraits
<InsertElementInst>::op_end(const_cast<InsertElementInst
*>(this)); } Value *InsertElementInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<InsertElementInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1994, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<InsertElementInst>::op_begin
(const_cast<InsertElementInst*>(this))[i_nocapture].get
()); } void InsertElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<InsertElementInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 1994, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
InsertElementInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned InsertElementInst::getNumOperands() const { return
OperandTraits<InsertElementInst>::operands(this); } template
<int Idx_nocapture> Use &InsertElementInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &InsertElementInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1995
1996//===----------------------------------------------------------------------===//
1997// ShuffleVectorInst Class
1998//===----------------------------------------------------------------------===//
1999
2000constexpr int UndefMaskElem = -1;
2001
2002/// This instruction constructs a fixed permutation of two
2003/// input vectors.
2004///
2005/// For each element of the result vector, the shuffle mask selects an element
2006/// from one of the input vectors to copy to the result. Non-negative elements
2007/// in the mask represent an index into the concatenated pair of input vectors.
2008/// UndefMaskElem (-1) specifies that the result element is undefined.
2009///
2010/// For scalable vectors, all the elements of the mask must be 0 or -1. This
2011/// requirement may be relaxed in the future.
2012class ShuffleVectorInst : public Instruction {
2013 SmallVector<int, 4> ShuffleMask;
2014 Constant *ShuffleMaskForBitcode;
2015
2016protected:
2017 // Note: Instruction needs to be a friend here to call cloneImpl.
2018 friend class Instruction;
2019
2020 ShuffleVectorInst *cloneImpl() const;
2021
2022public:
2023 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2024 const Twine &NameStr = "",
2025 Instruction *InsertBefor = nullptr);
2026 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2027 const Twine &NameStr, BasicBlock *InsertAtEnd);
2028 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2029 const Twine &NameStr = "",
2030 Instruction *InsertBefor = nullptr);
2031 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2032 const Twine &NameStr, BasicBlock *InsertAtEnd);
2033
2034 void *operator new(size_t s) { return User::operator new(s, 2); }
2035
2036 /// Swap the operands and adjust the mask to preserve the semantics
2037 /// of the instruction.
2038 void commute();
2039
2040 /// Return true if a shufflevector instruction can be
2041 /// formed with the specified operands.
2042 static bool isValidOperands(const Value *V1, const Value *V2,
2043 const Value *Mask);
2044 static bool isValidOperands(const Value *V1, const Value *V2,
2045 ArrayRef<int> Mask);
2046
2047 /// Overload to return most specific vector type.
2048 ///
2049 VectorType *getType() const {
2050 return cast<VectorType>(Instruction::getType());
2051 }
2052
2053 /// Transparently provide more efficient getOperand methods.
2054 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2055
2056 /// Return the shuffle mask value of this instruction for the given element
2057 /// index. Return UndefMaskElem if the element is undef.
2058 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2059
2060 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2061 /// elements of the mask are returned as UndefMaskElem.
2062 static void getShuffleMask(const Constant *Mask,
2063 SmallVectorImpl<int> &Result);
2064
2065 /// Return the mask for this instruction as a vector of integers. Undefined
2066 /// elements of the mask are returned as UndefMaskElem.
2067 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2068 Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2069 }
2070
2071 /// Return the mask for this instruction, for use in bitcode.
2072 ///
2073 /// TODO: This is temporary until we decide a new bitcode encoding for
2074 /// shufflevector.
2075 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2076
2077 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2078 Type *ResultTy);
2079
2080 void setShuffleMask(ArrayRef<int> Mask);
2081
2082 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2083
2084 /// Return true if this shuffle returns a vector with a different number of
2085 /// elements than its source vectors.
2086 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2087 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2088 bool changesLength() const {
2089 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2090 ->getElementCount()
2091 .getKnownMinValue();
2092 unsigned NumMaskElts = ShuffleMask.size();
2093 return NumSourceElts != NumMaskElts;
2094 }
2095
2096 /// Return true if this shuffle returns a vector with a greater number of
2097 /// elements than its source vectors.
2098 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2099 bool increasesLength() const {
2100 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2101 ->getElementCount()
2102 .getKnownMinValue();
2103 unsigned NumMaskElts = ShuffleMask.size();
2104 return NumSourceElts < NumMaskElts;
2105 }
2106
2107 /// Return true if this shuffle mask chooses elements from exactly one source
2108 /// vector.
2109 /// Example: <7,5,undef,7>
2110 /// This assumes that vector operands are the same length as the mask.
2111 static bool isSingleSourceMask(ArrayRef<int> Mask);
2112 static bool isSingleSourceMask(const Constant *Mask) {
2113 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2113, __extension__ __PRETTY_FUNCTION__))
;
2114 SmallVector<int, 16> MaskAsInts;
2115 getShuffleMask(Mask, MaskAsInts);
2116 return isSingleSourceMask(MaskAsInts);
2117 }
2118
2119 /// Return true if this shuffle chooses elements from exactly one source
2120 /// vector without changing the length of that vector.
2121 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2122 /// TODO: Optionally allow length-changing shuffles.
2123 bool isSingleSource() const {
2124 return !changesLength() && isSingleSourceMask(ShuffleMask);
2125 }
2126
2127 /// Return true if this shuffle mask chooses elements from exactly one source
2128 /// vector without lane crossings. A shuffle using this mask is not
2129 /// necessarily a no-op because it may change the number of elements from its
2130 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2131 /// Example: <undef,undef,2,3>
2132 static bool isIdentityMask(ArrayRef<int> Mask);
2133 static bool isIdentityMask(const Constant *Mask) {
2134 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2134, __extension__ __PRETTY_FUNCTION__))
;
2135 SmallVector<int, 16> MaskAsInts;
2136 getShuffleMask(Mask, MaskAsInts);
2137 return isIdentityMask(MaskAsInts);
2138 }
2139
2140 /// Return true if this shuffle chooses elements from exactly one source
2141 /// vector without lane crossings and does not change the number of elements
2142 /// from its input vectors.
2143 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2144 bool isIdentity() const {
2145 return !changesLength() && isIdentityMask(ShuffleMask);
2146 }
2147
2148 /// Return true if this shuffle lengthens exactly one source vector with
2149 /// undefs in the high elements.
2150 bool isIdentityWithPadding() const;
2151
2152 /// Return true if this shuffle extracts the first N elements of exactly one
2153 /// source vector.
2154 bool isIdentityWithExtract() const;
2155
2156 /// Return true if this shuffle concatenates its 2 source vectors. This
2157 /// returns false if either input is undefined. In that case, the shuffle is
2158 /// is better classified as an identity with padding operation.
2159 bool isConcat() const;
2160
2161 /// Return true if this shuffle mask chooses elements from its source vectors
2162 /// without lane crossings. A shuffle using this mask would be
2163 /// equivalent to a vector select with a constant condition operand.
2164 /// Example: <4,1,6,undef>
2165 /// This returns false if the mask does not choose from both input vectors.
2166 /// In that case, the shuffle is better classified as an identity shuffle.
2167 /// This assumes that vector operands are the same length as the mask
2168 /// (a length-changing shuffle can never be equivalent to a vector select).
2169 static bool isSelectMask(ArrayRef<int> Mask);
2170 static bool isSelectMask(const Constant *Mask) {
2171 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2171, __extension__ __PRETTY_FUNCTION__))
;
2172 SmallVector<int, 16> MaskAsInts;
2173 getShuffleMask(Mask, MaskAsInts);
2174 return isSelectMask(MaskAsInts);
2175 }
2176
2177 /// Return true if this shuffle chooses elements from its source vectors
2178 /// without lane crossings and all operands have the same number of elements.
2179 /// In other words, this shuffle is equivalent to a vector select with a
2180 /// constant condition operand.
2181 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2182 /// This returns false if the mask does not choose from both input vectors.
2183 /// In that case, the shuffle is better classified as an identity shuffle.
2184 /// TODO: Optionally allow length-changing shuffles.
2185 bool isSelect() const {
2186 return !changesLength() && isSelectMask(ShuffleMask);
2187 }
2188
2189 /// Return true if this shuffle mask swaps the order of elements from exactly
2190 /// one source vector.
2191 /// Example: <7,6,undef,4>
2192 /// This assumes that vector operands are the same length as the mask.
2193 static bool isReverseMask(ArrayRef<int> Mask);
2194 static bool isReverseMask(const Constant *Mask) {
2195 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2195, __extension__ __PRETTY_FUNCTION__))
;
2196 SmallVector<int, 16> MaskAsInts;
2197 getShuffleMask(Mask, MaskAsInts);
2198 return isReverseMask(MaskAsInts);
2199 }
2200
2201 /// Return true if this shuffle swaps the order of elements from exactly
2202 /// one source vector.
2203 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2204 /// TODO: Optionally allow length-changing shuffles.
2205 bool isReverse() const {
2206 return !changesLength() && isReverseMask(ShuffleMask);
2207 }
2208
2209 /// Return true if this shuffle mask chooses all elements with the same value
2210 /// as the first element of exactly one source vector.
2211 /// Example: <4,undef,undef,4>
2212 /// This assumes that vector operands are the same length as the mask.
2213 static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2214 static bool isZeroEltSplatMask(const Constant *Mask) {
2215 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2215, __extension__ __PRETTY_FUNCTION__))
;
2216 SmallVector<int, 16> MaskAsInts;
2217 getShuffleMask(Mask, MaskAsInts);
2218 return isZeroEltSplatMask(MaskAsInts);
2219 }
2220
2221 /// Return true if all elements of this shuffle are the same value as the
2222 /// first element of exactly one source vector without changing the length
2223 /// of that vector.
2224 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2225 /// TODO: Optionally allow length-changing shuffles.
2226 /// TODO: Optionally allow splats from other elements.
2227 bool isZeroEltSplat() const {
2228 return !changesLength() && isZeroEltSplatMask(ShuffleMask);
2229 }
2230
2231 /// Return true if this shuffle mask is a transpose mask.
2232 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2233 /// even- or odd-numbered vector elements from two n-dimensional source
2234 /// vectors and write each result into consecutive elements of an
2235 /// n-dimensional destination vector. Two shuffles are necessary to complete
2236 /// the transpose, one for the even elements and another for the odd elements.
2237 /// This description closely follows how the TRN1 and TRN2 AArch64
2238 /// instructions operate.
2239 ///
2240 /// For example, a simple 2x2 matrix can be transposed with:
2241 ///
2242 /// ; Original matrix
2243 /// m0 = < a, b >
2244 /// m1 = < c, d >
2245 ///
2246 /// ; Transposed matrix
2247 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2248 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2249 ///
2250 /// For matrices having greater than n columns, the resulting nx2 transposed
2251 /// matrix is stored in two result vectors such that one vector contains
2252 /// interleaved elements from all the even-numbered rows and the other vector
2253 /// contains interleaved elements from all the odd-numbered rows. For example,
2254 /// a 2x4 matrix can be transposed with:
2255 ///
2256 /// ; Original matrix
2257 /// m0 = < a, b, c, d >
2258 /// m1 = < e, f, g, h >
2259 ///
2260 /// ; Transposed matrix
2261 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2262 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2263 static bool isTransposeMask(ArrayRef<int> Mask);
2264 static bool isTransposeMask(const Constant *Mask) {
2265 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2265, __extension__ __PRETTY_FUNCTION__))
;
2266 SmallVector<int, 16> MaskAsInts;
2267 getShuffleMask(Mask, MaskAsInts);
2268 return isTransposeMask(MaskAsInts);
2269 }
2270
2271 /// Return true if this shuffle transposes the elements of its inputs without
2272 /// changing the length of the vectors. This operation may also be known as a
2273 /// merge or interleave. See the description for isTransposeMask() for the
2274 /// exact specification.
2275 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2276 bool isTranspose() const {
2277 return !changesLength() && isTransposeMask(ShuffleMask);
2278 }
2279
2280 /// Return true if this shuffle mask is an extract subvector mask.
2281 /// A valid extract subvector mask returns a smaller vector from a single
2282 /// source operand. The base extraction index is returned as well.
2283 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2284 int &Index);
2285 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2286 int &Index) {
2287 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2287, __extension__ __PRETTY_FUNCTION__))
;
2288 // Not possible to express a shuffle mask for a scalable vector for this
2289 // case.
2290 if (isa<ScalableVectorType>(Mask->getType()))
2291 return false;
2292 SmallVector<int, 16> MaskAsInts;
2293 getShuffleMask(Mask, MaskAsInts);
2294 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2295 }
2296
2297 /// Return true if this shuffle mask is an extract subvector mask.
2298 bool isExtractSubvectorMask(int &Index) const {
2299 // Not possible to express a shuffle mask for a scalable vector for this
2300 // case.
2301 if (isa<ScalableVectorType>(getType()))
2302 return false;
2303
2304 int NumSrcElts =
2305 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2306 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2307 }
2308
2309 /// Change values in a shuffle permute mask assuming the two vector operands
2310 /// of length InVecNumElts have swapped position.
2311 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2312 unsigned InVecNumElts) {
2313 for (int &Idx : Mask) {
2314 if (Idx == -1)
2315 continue;
2316 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2317 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2318, __extension__ __PRETTY_FUNCTION__))
2318 "shufflevector mask index out of range")(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2318, __extension__ __PRETTY_FUNCTION__))
;
2319 }
2320 }
2321
2322 // Methods for support type inquiry through isa, cast, and dyn_cast:
2323 static bool classof(const Instruction *I) {
2324 return I->getOpcode() == Instruction::ShuffleVector;
2325 }
2326 static bool classof(const Value *V) {
2327 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2328 }
2329};
2330
2331template <>
2332struct OperandTraits<ShuffleVectorInst>
2333 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2334
2335DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() {
return OperandTraits<ShuffleVectorInst>::op_begin(this
); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst::
op_begin() const { return OperandTraits<ShuffleVectorInst>
::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst
::op_iterator ShuffleVectorInst::op_end() { return OperandTraits
<ShuffleVectorInst>::op_end(this); } ShuffleVectorInst::
const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits
<ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst
*>(this)); } Value *ShuffleVectorInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<ShuffleVectorInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2335, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ShuffleVectorInst>::op_begin
(const_cast<ShuffleVectorInst*>(this))[i_nocapture].get
()); } void ShuffleVectorInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ShuffleVectorInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2335, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ShuffleVectorInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned ShuffleVectorInst::getNumOperands() const { return
OperandTraits<ShuffleVectorInst>::operands(this); } template
<int Idx_nocapture> Use &ShuffleVectorInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &ShuffleVectorInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2336
2337//===----------------------------------------------------------------------===//
2338// ExtractValueInst Class
2339//===----------------------------------------------------------------------===//
2340
2341/// This instruction extracts a struct member or array
2342/// element value from an aggregate value.
2343///
2344class ExtractValueInst : public UnaryInstruction {
2345 SmallVector<unsigned, 4> Indices;
2346
2347 ExtractValueInst(const ExtractValueInst &EVI);
2348
2349 /// Constructors - Create a extractvalue instruction with a base aggregate
2350 /// value and a list of indices. The first ctor can optionally insert before
2351 /// an existing instruction, the second appends the new instruction to the
2352 /// specified BasicBlock.
2353 inline ExtractValueInst(Value *Agg,
2354 ArrayRef<unsigned> Idxs,
2355 const Twine &NameStr,
2356 Instruction *InsertBefore);
2357 inline ExtractValueInst(Value *Agg,
2358 ArrayRef<unsigned> Idxs,
2359 const Twine &NameStr, BasicBlock *InsertAtEnd);
2360
2361 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2362
2363protected:
2364 // Note: Instruction needs to be a friend here to call cloneImpl.
2365 friend class Instruction;
2366
2367 ExtractValueInst *cloneImpl() const;
2368
2369public:
2370 static ExtractValueInst *Create(Value *Agg,
2371 ArrayRef<unsigned> Idxs,
2372 const Twine &NameStr = "",
2373 Instruction *InsertBefore = nullptr) {
2374 return new
2375 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2376 }
2377
2378 static ExtractValueInst *Create(Value *Agg,
2379 ArrayRef<unsigned> Idxs,
2380 const Twine &NameStr,
2381 BasicBlock *InsertAtEnd) {
2382 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2383 }
2384
2385 /// Returns the type of the element that would be extracted
2386 /// with an extractvalue instruction with the specified parameters.
2387 ///
2388 /// Null is returned if the indices are invalid for the specified type.
2389 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2390
2391 using idx_iterator = const unsigned*;
2392
2393 inline idx_iterator idx_begin() const { return Indices.begin(); }
2394 inline idx_iterator idx_end() const { return Indices.end(); }
2395 inline iterator_range<idx_iterator> indices() const {
2396 return make_range(idx_begin(), idx_end());
2397 }
2398
2399 Value *getAggregateOperand() {
2400 return getOperand(0);
2401 }
2402 const Value *getAggregateOperand() const {
2403 return getOperand(0);
2404 }
2405 static unsigned getAggregateOperandIndex() {
2406 return 0U; // get index for modifying correct operand
2407 }
2408
2409 ArrayRef<unsigned> getIndices() const {
2410 return Indices;
2411 }
2412
2413 unsigned getNumIndices() const {
2414 return (unsigned)Indices.size();
2415 }
2416
2417 bool hasIndices() const {
2418 return true;
2419 }
2420
2421 // Methods for support type inquiry through isa, cast, and dyn_cast:
2422 static bool classof(const Instruction *I) {
2423 return I->getOpcode() == Instruction::ExtractValue;
2424 }
2425 static bool classof(const Value *V) {
2426 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2427 }
2428};
2429
2430ExtractValueInst::ExtractValueInst(Value *Agg,
2431 ArrayRef<unsigned> Idxs,
2432 const Twine &NameStr,
2433 Instruction *InsertBefore)
2434 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2435 ExtractValue, Agg, InsertBefore) {
2436 init(Idxs, NameStr);
2437}
2438
2439ExtractValueInst::ExtractValueInst(Value *Agg,
2440 ArrayRef<unsigned> Idxs,
2441 const Twine &NameStr,
2442 BasicBlock *InsertAtEnd)
2443 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2444 ExtractValue, Agg, InsertAtEnd) {
2445 init(Idxs, NameStr);
2446}
2447
2448//===----------------------------------------------------------------------===//
2449// InsertValueInst Class
2450//===----------------------------------------------------------------------===//
2451
2452/// This instruction inserts a struct field of array element
2453/// value into an aggregate value.
2454///
2455class InsertValueInst : public Instruction {
2456 SmallVector<unsigned, 4> Indices;
2457
2458 InsertValueInst(const InsertValueInst &IVI);
2459
2460 /// Constructors - Create a insertvalue instruction with a base aggregate
2461 /// value, a value to insert, and a list of indices. The first ctor can
2462 /// optionally insert before an existing instruction, the second appends
2463 /// the new instruction to the specified BasicBlock.
2464 inline InsertValueInst(Value *Agg, Value *Val,
2465 ArrayRef<unsigned> Idxs,
2466 const Twine &NameStr,
2467 Instruction *InsertBefore);
2468 inline InsertValueInst(Value *Agg, Value *Val,
2469 ArrayRef<unsigned> Idxs,
2470 const Twine &NameStr, BasicBlock *InsertAtEnd);
2471
2472 /// Constructors - These two constructors are convenience methods because one
2473 /// and two index insertvalue instructions are so common.
2474 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2475 const Twine &NameStr = "",
2476 Instruction *InsertBefore = nullptr);
2477 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2478 BasicBlock *InsertAtEnd);
2479
2480 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2481 const Twine &NameStr);
2482
2483protected:
2484 // Note: Instruction needs to be a friend here to call cloneImpl.
2485 friend class Instruction;
2486
2487 InsertValueInst *cloneImpl() const;
2488
2489public:
2490 // allocate space for exactly two operands
2491 void *operator new(size_t s) {
2492 return User::operator new(s, 2);
2493 }
2494
2495 static InsertValueInst *Create(Value *Agg, Value *Val,
2496 ArrayRef<unsigned> Idxs,
2497 const Twine &NameStr = "",
2498 Instruction *InsertBefore = nullptr) {
2499 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2500 }
2501
2502 static InsertValueInst *Create(Value *Agg, Value *Val,
2503 ArrayRef<unsigned> Idxs,
2504 const Twine &NameStr,
2505 BasicBlock *InsertAtEnd) {
2506 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2507 }
2508
2509 /// Transparently provide more efficient getOperand methods.
2510 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2511
2512 using idx_iterator = const unsigned*;
2513
2514 inline idx_iterator idx_begin() const { return Indices.begin(); }
2515 inline idx_iterator idx_end() const { return Indices.end(); }
2516 inline iterator_range<idx_iterator> indices() const {
2517 return make_range(idx_begin(), idx_end());
2518 }
2519
2520 Value *getAggregateOperand() {
2521 return getOperand(0);
2522 }
2523 const Value *getAggregateOperand() const {
2524 return getOperand(0);
2525 }
2526 static unsigned getAggregateOperandIndex() {
2527 return 0U; // get index for modifying correct operand
2528 }
2529
2530 Value *getInsertedValueOperand() {
2531 return getOperand(1);
2532 }
2533 const Value *getInsertedValueOperand() const {
2534 return getOperand(1);
2535 }
2536 static unsigned getInsertedValueOperandIndex() {
2537 return 1U; // get index for modifying correct operand
2538 }
2539
2540 ArrayRef<unsigned> getIndices() const {
2541 return Indices;
2542 }
2543
2544 unsigned getNumIndices() const {
2545 return (unsigned)Indices.size();
2546 }
2547
2548 bool hasIndices() const {
2549 return true;
2550 }
2551
2552 // Methods for support type inquiry through isa, cast, and dyn_cast:
2553 static bool classof(const Instruction *I) {
2554 return I->getOpcode() == Instruction::InsertValue;
2555 }
2556 static bool classof(const Value *V) {
2557 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2558 }
2559};
2560
2561template <>
2562struct OperandTraits<InsertValueInst> :
2563 public FixedNumOperandTraits<InsertValueInst, 2> {
2564};
2565
2566InsertValueInst::InsertValueInst(Value *Agg,
2567 Value *Val,
2568 ArrayRef<unsigned> Idxs,
2569 const Twine &NameStr,
2570 Instruction *InsertBefore)
2571 : Instruction(Agg->getType(), InsertValue,
2572 OperandTraits<InsertValueInst>::op_begin(this),
2573 2, InsertBefore) {
2574 init(Agg, Val, Idxs, NameStr);
2575}
2576
2577InsertValueInst::InsertValueInst(Value *Agg,
2578 Value *Val,
2579 ArrayRef<unsigned> Idxs,
2580 const Twine &NameStr,
2581 BasicBlock *InsertAtEnd)
2582 : Instruction(Agg->getType(), InsertValue,
2583 OperandTraits<InsertValueInst>::op_begin(this),
2584 2, InsertAtEnd) {
2585 init(Agg, Val, Idxs, NameStr);
2586}
2587
2588DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return
OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst
::const_op_iterator InsertValueInst::op_begin() const { return
OperandTraits<InsertValueInst>::op_begin(const_cast<
InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst
::op_end() { return OperandTraits<InsertValueInst>::op_end
(this); } InsertValueInst::const_op_iterator InsertValueInst::
op_end() const { return OperandTraits<InsertValueInst>::
op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<InsertValueInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2588, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<InsertValueInst>::op_begin
(const_cast<InsertValueInst*>(this))[i_nocapture].get()
); } void InsertValueInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<InsertValueInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2588, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
InsertValueInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned InsertValueInst::getNumOperands() const { return
OperandTraits<InsertValueInst>::operands(this); } template
<int Idx_nocapture> Use &InsertValueInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &InsertValueInst::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
2589
2590//===----------------------------------------------------------------------===//
2591// PHINode Class
2592//===----------------------------------------------------------------------===//
2593
2594// PHINode - The PHINode class is used to represent the magical mystical PHI
2595// node, that can not exist in nature, but can be synthesized in a computer
2596// scientist's overactive imagination.
2597//
2598class PHINode : public Instruction {
2599 /// The number of operands actually allocated. NumOperands is
2600 /// the number actually in use.
2601 unsigned ReservedSpace;
2602
2603 PHINode(const PHINode &PN);
2604
2605 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2606 const Twine &NameStr = "",
2607 Instruction *InsertBefore = nullptr)
2608 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2609 ReservedSpace(NumReservedValues) {
2610 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!"
) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2610, __extension__ __PRETTY_FUNCTION__))
;
2611 setName(NameStr);
2612 allocHungoffUses(ReservedSpace);
2613 }
2614
2615 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2616 BasicBlock *InsertAtEnd)
2617 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2618 ReservedSpace(NumReservedValues) {
2619 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!"
) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2619, __extension__ __PRETTY_FUNCTION__))
;
2620 setName(NameStr);
2621 allocHungoffUses(ReservedSpace);
2622 }
2623
2624protected:
2625 // Note: Instruction needs to be a friend here to call cloneImpl.
2626 friend class Instruction;
2627
2628 PHINode *cloneImpl() const;
2629
2630 // allocHungoffUses - this is more complicated than the generic
2631 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2632 // values and pointers to the incoming blocks, all in one allocation.
2633 void allocHungoffUses(unsigned N) {
2634 User::allocHungoffUses(N, /* IsPhi */ true);
2635 }
2636
2637public:
2638 /// Constructors - NumReservedValues is a hint for the number of incoming
2639 /// edges that this phi node will have (use 0 if you really have no idea).
2640 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2641 const Twine &NameStr = "",
2642 Instruction *InsertBefore = nullptr) {
2643 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2644 }
2645
2646 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2647 const Twine &NameStr, BasicBlock *InsertAtEnd) {
2648 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2649 }
2650
2651 /// Provide fast operand accessors
2652 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2653
2654 // Block iterator interface. This provides access to the list of incoming
2655 // basic blocks, which parallels the list of incoming values.
2656
2657 using block_iterator = BasicBlock **;
2658 using const_block_iterator = BasicBlock * const *;
2659
2660 block_iterator block_begin() {
2661 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
2662 }
2663
2664 const_block_iterator block_begin() const {
2665 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2666 }
2667
2668 block_iterator block_end() {
2669 return block_begin() + getNumOperands();
2670 }
2671
2672 const_block_iterator block_end() const {
2673 return block_begin() + getNumOperands();
2674 }
2675
2676 iterator_range<block_iterator> blocks() {
2677 return make_range(block_begin(), block_end());
2678 }
2679
2680 iterator_range<const_block_iterator> blocks() const {
2681 return make_range(block_begin(), block_end());
2682 }
2683
2684 op_range incoming_values() { return operands(); }
2685
2686 const_op_range incoming_values() const { return operands(); }
2687
2688 /// Return the number of incoming edges
2689 ///
2690 unsigned getNumIncomingValues() const { return getNumOperands(); }
2691
2692 /// Return incoming value number x
2693 ///
2694 Value *getIncomingValue(unsigned i) const {
2695 return getOperand(i);
2696 }
2697 void setIncomingValue(unsigned i, Value *V) {
2698 assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!"
) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2698, __extension__ __PRETTY_FUNCTION__))
;
2699 assert(getType() == V->getType() &&(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2700, __extension__ __PRETTY_FUNCTION__))
2700 "All operands to PHI node must be the same type as the PHI node!")(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2700, __extension__ __PRETTY_FUNCTION__))
;
2701 setOperand(i, V);
2702 }
2703
2704 static unsigned getOperandNumForIncomingValue(unsigned i) {
2705 return i;
2706 }
2707
2708 static unsigned getIncomingValueNumForOperand(unsigned i) {
2709 return i;
2710 }
2711
2712 /// Return incoming basic block number @p i.
2713 ///
2714 BasicBlock *getIncomingBlock(unsigned i) const {
2715 return block_begin()[i];
2716 }
2717
2718 /// Return incoming basic block corresponding
2719 /// to an operand of the PHI.
2720 ///
2721 BasicBlock *getIncomingBlock(const Use &U) const {
2722 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?"
) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2722, __extension__ __PRETTY_FUNCTION__))
;
2723 return getIncomingBlock(unsigned(&U - op_begin()));
2724 }
2725
2726 /// Return incoming basic block corresponding
2727 /// to value use iterator.
2728 ///
2729 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
2730 return getIncomingBlock(I.getUse());
2731 }
2732
2733 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2734 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2734, __extension__ __PRETTY_FUNCTION__))
;
2735 block_begin()[i] = BB;
2736 }
2737
2738 /// Replace every incoming basic block \p Old to basic block \p New.
2739 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) {
2740 assert(New && Old && "PHI node got a null basic block!")(static_cast <bool> (New && Old && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("New && Old && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2740, __extension__ __PRETTY_FUNCTION__))
;
2741 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2742 if (getIncomingBlock(Op) == Old)
2743 setIncomingBlock(Op, New);
2744 }
2745
2746 /// Add an incoming value to the end of the PHI list
2747 ///
2748 void addIncoming(Value *V, BasicBlock *BB) {
2749 if (getNumOperands() == ReservedSpace)
2750 growOperands(); // Get more space!
2751 // Initialize some new operands.
2752 setNumHungOffUseOperands(getNumOperands() + 1);
2753 setIncomingValue(getNumOperands() - 1, V);
2754 setIncomingBlock(getNumOperands() - 1, BB);
2755 }
2756
2757 /// Remove an incoming value. This is useful if a
2758 /// predecessor basic block is deleted. The value removed is returned.
2759 ///
2760 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2761 /// is true), the PHI node is destroyed and any uses of it are replaced with
2762 /// dummy values. The only time there should be zero incoming values to a PHI
2763 /// node is when the block is dead, so this strategy is sound.
2764 ///
2765 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2766
2767 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2768 int Idx = getBasicBlockIndex(BB);
2769 assert(Idx >= 0 && "Invalid basic block argument to remove!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument to remove!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2769, __extension__ __PRETTY_FUNCTION__))
;
2770 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2771 }
2772
2773 /// Return the first index of the specified basic
2774 /// block in the value list for this PHI. Returns -1 if no instance.
2775 ///
2776 int getBasicBlockIndex(const BasicBlock *BB) const {
2777 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2778 if (block_begin()[i] == BB)
2779 return i;
2780 return -1;
2781 }
2782
2783 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
2784 int Idx = getBasicBlockIndex(BB);
2785 assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2785, __extension__ __PRETTY_FUNCTION__))
;
2786 return getIncomingValue(Idx);
2787 }
2788
2789 /// Set every incoming value(s) for block \p BB to \p V.
2790 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) {
2791 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2791, __extension__ __PRETTY_FUNCTION__))
;
2792 bool Found = false;
2793 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2794 if (getIncomingBlock(Op) == BB) {
2795 Found = true;
2796 setIncomingValue(Op, V);
2797 }
2798 (void)Found;
2799 assert(Found && "Invalid basic block argument to set!")(static_cast <bool> (Found && "Invalid basic block argument to set!"
) ? void (0) : __assert_fail ("Found && \"Invalid basic block argument to set!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2799, __extension__ __PRETTY_FUNCTION__))
;
2800 }
2801
2802 /// If the specified PHI node always merges together the
2803 /// same value, return the value, otherwise return null.
2804 Value *hasConstantValue() const;
2805
2806 /// Whether the specified PHI node always merges
2807 /// together the same value, assuming undefs are equal to a unique
2808 /// non-undef value.
2809 bool hasConstantOrUndefValue() const;
2810
2811 /// If the PHI node is complete which means all of its parent's predecessors
2812 /// have incoming value in this PHI, return true, otherwise return false.
2813 bool isComplete() const {
2814 return llvm::all_of(predecessors(getParent()),
2815 [this](const BasicBlock *Pred) {
2816 return getBasicBlockIndex(Pred) >= 0;
2817 });
2818 }
2819
2820 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2821 static bool classof(const Instruction *I) {
2822 return I->getOpcode() == Instruction::PHI;
2823 }
2824 static bool classof(const Value *V) {
2825 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2826 }
2827
2828private:
2829 void growOperands();
2830};
2831
2832template <>
2833struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
2834};
2835
2836DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits
<PHINode>::op_begin(this); } PHINode::const_op_iterator
PHINode::op_begin() const { return OperandTraits<PHINode>
::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator
PHINode::op_end() { return OperandTraits<PHINode>::op_end
(this); } PHINode::const_op_iterator PHINode::op_end() const {
return OperandTraits<PHINode>::op_end(const_cast<PHINode
*>(this)); } Value *PHINode::getOperand(unsigned i_nocapture
) const { (static_cast <bool> (i_nocapture < OperandTraits
<PHINode>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2836, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<PHINode>::op_begin(const_cast
<PHINode*>(this))[i_nocapture].get()); } void PHINode::
setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<PHINode>::
operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2836, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
PHINode>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
PHINode::getNumOperands() const { return OperandTraits<PHINode
>::operands(this); } template <int Idx_nocapture> Use
&PHINode::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
PHINode::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
2837
2838//===----------------------------------------------------------------------===//
2839// LandingPadInst Class
2840//===----------------------------------------------------------------------===//
2841
2842//===---------------------------------------------------------------------------
2843/// The landingpad instruction holds all of the information
2844/// necessary to generate correct exception handling. The landingpad instruction
2845/// cannot be moved from the top of a landing pad block, which itself is
2846/// accessible only from the 'unwind' edge of an invoke. This uses the
2847/// SubclassData field in Value to store whether or not the landingpad is a
2848/// cleanup.
2849///
2850class LandingPadInst : public Instruction {
2851 using CleanupField = BoolBitfieldElementT<0>;
2852
2853 /// The number of operands actually allocated. NumOperands is
2854 /// the number actually in use.
2855 unsigned ReservedSpace;
2856
2857 LandingPadInst(const LandingPadInst &LP);
2858
2859public:
2860 enum ClauseType { Catch, Filter };
2861
2862private:
2863 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2864 const Twine &NameStr, Instruction *InsertBefore);
2865 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2866 const Twine &NameStr, BasicBlock *InsertAtEnd);
2867
2868 // Allocate space for exactly zero operands.
2869 void *operator new(size_t s) {
2870 return User::operator new(s);
2871 }
2872
2873 void growOperands(unsigned Size);
2874 void init(unsigned NumReservedValues, const Twine &NameStr);
2875
2876protected:
2877 // Note: Instruction needs to be a friend here to call cloneImpl.
2878 friend class Instruction;
2879
2880 LandingPadInst *cloneImpl() const;
2881
2882public:
2883 /// Constructors - NumReservedClauses is a hint for the number of incoming
2884 /// clauses that this landingpad will have (use 0 if you really have no idea).
2885 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2886 const Twine &NameStr = "",
2887 Instruction *InsertBefore = nullptr);
2888 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2889 const Twine &NameStr, BasicBlock *InsertAtEnd);
2890
2891 /// Provide fast operand accessors
2892 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2893
2894 /// Return 'true' if this landingpad instruction is a
2895 /// cleanup. I.e., it should be run when unwinding even if its landing pad
2896 /// doesn't catch the exception.
2897 bool isCleanup() const { return getSubclassData<CleanupField>(); }
2898
2899 /// Indicate that this landingpad instruction is a cleanup.
2900 void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
2901
2902 /// Add a catch or filter clause to the landing pad.
2903 void addClause(Constant *ClauseVal);
2904
2905 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
2906 /// determine what type of clause this is.
2907 Constant *getClause(unsigned Idx) const {
2908 return cast<Constant>(getOperandList()[Idx]);
2909 }
2910
2911 /// Return 'true' if the clause and index Idx is a catch clause.
2912 bool isCatch(unsigned Idx) const {
2913 return !isa<ArrayType>(getOperandList()[Idx]->getType());
2914 }
2915
2916 /// Return 'true' if the clause and index Idx is a filter clause.
2917 bool isFilter(unsigned Idx) const {
2918 return isa<ArrayType>(getOperandList()[Idx]->getType());
2919 }
2920
2921 /// Get the number of clauses for this landing pad.
2922 unsigned getNumClauses() const { return getNumOperands(); }
2923
2924 /// Grow the size of the operand list to accommodate the new
2925 /// number of clauses.
2926 void reserveClauses(unsigned Size) { growOperands(Size); }
2927
2928 // Methods for support type inquiry through isa, cast, and dyn_cast:
2929 static bool classof(const Instruction *I) {
2930 return I->getOpcode() == Instruction::LandingPad;
2931 }
2932 static bool classof(const Value *V) {
2933 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2934 }
2935};
2936
2937template <>
2938struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
2939};
2940
2941DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return
OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst
::const_op_iterator LandingPadInst::op_begin() const { return
OperandTraits<LandingPadInst>::op_begin(const_cast<
LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst
::op_end() { return OperandTraits<LandingPadInst>::op_end
(this); } LandingPadInst::const_op_iterator LandingPadInst::op_end
() const { return OperandTraits<LandingPadInst>::op_end
(const_cast<LandingPadInst*>(this)); } Value *LandingPadInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<LandingPadInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2941, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<LandingPadInst>::op_begin(
const_cast<LandingPadInst*>(this))[i_nocapture].get());
} void LandingPadInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<LandingPadInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 2941, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
LandingPadInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned LandingPadInst::getNumOperands() const { return OperandTraits
<LandingPadInst>::operands(this); } template <int Idx_nocapture
> Use &LandingPadInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &LandingPadInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
2942
2943//===----------------------------------------------------------------------===//
2944// ReturnInst Class
2945//===----------------------------------------------------------------------===//
2946
2947//===---------------------------------------------------------------------------
2948/// Return a value (possibly void), from a function. Execution
2949/// does not continue in this function any longer.
2950///
2951class ReturnInst : public Instruction {
2952 ReturnInst(const ReturnInst &RI);
2953
2954private:
2955 // ReturnInst constructors:
2956 // ReturnInst() - 'ret void' instruction
2957 // ReturnInst( null) - 'ret void' instruction
2958 // ReturnInst(Value* X) - 'ret X' instruction
2959 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
2960 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
2961 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
2962 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
2963 //
2964 // NOTE: If the Value* passed is of type void then the constructor behaves as
2965 // if it was passed NULL.
2966 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
2967 Instruction *InsertBefore = nullptr);
2968 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
2969 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
2970
2971protected:
2972 // Note: Instruction needs to be a friend here to call cloneImpl.
2973 friend class Instruction;
2974
2975 ReturnInst *cloneImpl() const;
2976
2977public:
2978 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
2979 Instruction *InsertBefore = nullptr) {
2980 return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
2981 }
2982
2983 static ReturnInst* Create(LLVMContext &C, Value *retVal,
2984 BasicBlock *InsertAtEnd) {
2985 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
2986 }
2987
2988 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
2989 return new(0) ReturnInst(C, InsertAtEnd);
2990 }
2991
2992 /// Provide fast operand accessors
2993 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2994
2995 /// Convenience accessor. Returns null if there is no return value.
2996 Value *getReturnValue() const {
2997 return getNumOperands() != 0 ? getOperand(0) : nullptr;
2998 }
2999
3000 unsigned getNumSuccessors() const { return 0; }
3001
3002 // Methods for support type inquiry through isa, cast, and dyn_cast:
3003 static bool classof(const Instruction *I) {
3004 return (I->getOpcode() == Instruction::Ret);
3005 }
3006 static bool classof(const Value *V) {
3007 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3008 }
3009
3010private:
3011 BasicBlock *getSuccessor(unsigned idx) const {
3012 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3012)
;
3013 }
3014
3015 void setSuccessor(unsigned idx, BasicBlock *B) {
3016 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3016)
;
3017 }
3018};
3019
3020template <>
3021struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3022};
3023
3024DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits
<ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator
ReturnInst::op_begin() const { return OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst
::op_iterator ReturnInst::op_end() { return OperandTraits<
ReturnInst>::op_end(this); } ReturnInst::const_op_iterator
ReturnInst::op_end() const { return OperandTraits<ReturnInst
>::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<ReturnInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3024, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ReturnInst>::op_begin(const_cast
<ReturnInst*>(this))[i_nocapture].get()); } void ReturnInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<ReturnInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3024, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ReturnInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned ReturnInst::getNumOperands() const { return OperandTraits
<ReturnInst>::operands(this); } template <int Idx_nocapture
> Use &ReturnInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
ReturnInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3025
3026//===----------------------------------------------------------------------===//
3027// BranchInst Class
3028//===----------------------------------------------------------------------===//
3029
3030//===---------------------------------------------------------------------------
3031/// Conditional or Unconditional Branch instruction.
3032///
3033class BranchInst : public Instruction {
3034 /// Ops list - Branches are strange. The operands are ordered:
3035 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3036 /// they don't have to check for cond/uncond branchness. These are mostly
3037 /// accessed relative from op_end().
3038 BranchInst(const BranchInst &BI);
3039 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3040 // BranchInst(BB *B) - 'br B'
3041 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3042 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3043 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3044 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3045 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3046 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3047 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3048 Instruction *InsertBefore = nullptr);
3049 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3050 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3051 BasicBlock *InsertAtEnd);
3052
3053 void AssertOK();
3054
3055protected:
3056 // Note: Instruction needs to be a friend here to call cloneImpl.
3057 friend class Instruction;
3058
3059 BranchInst *cloneImpl() const;
3060
3061public:
3062 /// Iterator type that casts an operand to a basic block.
3063 ///
3064 /// This only makes sense because the successors are stored as adjacent
3065 /// operands for branch instructions.
3066 struct succ_op_iterator
3067 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3068 std::random_access_iterator_tag, BasicBlock *,
3069 ptrdiff_t, BasicBlock *, BasicBlock *> {
3070 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3071
3072 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3073 BasicBlock *operator->() const { return operator*(); }
3074 };
3075
3076 /// The const version of `succ_op_iterator`.
3077 struct const_succ_op_iterator
3078 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3079 std::random_access_iterator_tag,
3080 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3081 const BasicBlock *> {
3082 explicit const_succ_op_iterator(const_value_op_iterator I)
3083 : iterator_adaptor_base(I) {}
3084
3085 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3086 const BasicBlock *operator->() const { return operator*(); }
3087 };
3088
3089 static BranchInst *Create(BasicBlock *IfTrue,
3090 Instruction *InsertBefore = nullptr) {
3091 return new(1) BranchInst(IfTrue, InsertBefore);
3092 }
3093
3094 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3095 Value *Cond, Instruction *InsertBefore = nullptr) {
3096 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3097 }
3098
3099 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3100 return new(1) BranchInst(IfTrue, InsertAtEnd);
3101 }
3102
3103 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3104 Value *Cond, BasicBlock *InsertAtEnd) {
3105 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3106 }
3107
3108 /// Transparently provide more efficient getOperand methods.
3109 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3110
3111 bool isUnconditional() const { return getNumOperands() == 1; }
3112 bool isConditional() const { return getNumOperands() == 3; }
3113
3114 Value *getCondition() const {
3115 assert(isConditional() && "Cannot get condition of an uncond branch!")(static_cast <bool> (isConditional() && "Cannot get condition of an uncond branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot get condition of an uncond branch!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3115, __extension__ __PRETTY_FUNCTION__))
;
3116 return Op<-3>();
3117 }
3118
3119 void setCondition(Value *V) {
3120 assert(isConditional() && "Cannot set condition of unconditional branch!")(static_cast <bool> (isConditional() && "Cannot set condition of unconditional branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot set condition of unconditional branch!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3120, __extension__ __PRETTY_FUNCTION__))
;
3121 Op<-3>() = V;
3122 }
3123
3124 unsigned getNumSuccessors() const { return 1+isConditional(); }
3125
3126 BasicBlock *getSuccessor(unsigned i) const {
3127 assert(i < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (i < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("i < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3127, __extension__ __PRETTY_FUNCTION__))
;
3128 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3129 }
3130
3131 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3132 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3132, __extension__ __PRETTY_FUNCTION__))
;
3133 *(&Op<-1>() - idx) = NewSucc;
3134 }
3135
3136 /// Swap the successors of this branch instruction.
3137 ///
3138 /// Swaps the successors of the branch instruction. This also swaps any
3139 /// branch weight metadata associated with the instruction so that it
3140 /// continues to map correctly to each operand.
3141 void swapSuccessors();
3142
3143 iterator_range<succ_op_iterator> successors() {
3144 return make_range(
3145 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3146 succ_op_iterator(value_op_end()));
3147 }
3148
3149 iterator_range<const_succ_op_iterator> successors() const {
3150 return make_range(const_succ_op_iterator(
3151 std::next(value_op_begin(), isConditional() ? 1 : 0)),
3152 const_succ_op_iterator(value_op_end()));
3153 }
3154
3155 // Methods for support type inquiry through isa, cast, and dyn_cast:
3156 static bool classof(const Instruction *I) {
3157 return (I->getOpcode() == Instruction::Br);
3158 }
3159 static bool classof(const Value *V) {
3160 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3161 }
3162};
3163
3164template <>
3165struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3166};
3167
3168DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits
<BranchInst>::op_begin(this); } BranchInst::const_op_iterator
BranchInst::op_begin() const { return OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this)); } BranchInst
::op_iterator BranchInst::op_end() { return OperandTraits<
BranchInst>::op_end(this); } BranchInst::const_op_iterator
BranchInst::op_end() const { return OperandTraits<BranchInst
>::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<BranchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3168, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<BranchInst>::op_begin(const_cast
<BranchInst*>(this))[i_nocapture].get()); } void BranchInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<BranchInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3168, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
BranchInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned BranchInst::getNumOperands() const { return OperandTraits
<BranchInst>::operands(this); } template <int Idx_nocapture
> Use &BranchInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
BranchInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3169
3170//===----------------------------------------------------------------------===//
3171// SwitchInst Class
3172//===----------------------------------------------------------------------===//
3173
3174//===---------------------------------------------------------------------------
3175/// Multiway switch
3176///
3177class SwitchInst : public Instruction {
3178 unsigned ReservedSpace;
3179
3180 // Operand[0] = Value to switch on
3181 // Operand[1] = Default basic block destination
3182 // Operand[2n ] = Value to match
3183 // Operand[2n+1] = BasicBlock to go to on match
3184 SwitchInst(const SwitchInst &SI);
3185
3186 /// Create a new switch instruction, specifying a value to switch on and a
3187 /// default destination. The number of additional cases can be specified here
3188 /// to make memory allocation more efficient. This constructor can also
3189 /// auto-insert before another instruction.
3190 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3191 Instruction *InsertBefore);
3192
3193 /// Create a new switch instruction, specifying a value to switch on and a
3194 /// default destination. The number of additional cases can be specified here
3195 /// to make memory allocation more efficient. This constructor also
3196 /// auto-inserts at the end of the specified BasicBlock.
3197 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3198 BasicBlock *InsertAtEnd);
3199
3200 // allocate space for exactly zero operands
3201 void *operator new(size_t s) {
3202 return User::operator new(s);
3203 }
3204
3205 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3206 void growOperands();
3207
3208protected:
3209 // Note: Instruction needs to be a friend here to call cloneImpl.
3210 friend class Instruction;
3211
3212 SwitchInst *cloneImpl() const;
3213
3214public:
3215 // -2
3216 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3217
3218 template <typename CaseHandleT> class CaseIteratorImpl;
3219
3220 /// A handle to a particular switch case. It exposes a convenient interface
3221 /// to both the case value and the successor block.
3222 ///
3223 /// We define this as a template and instantiate it to form both a const and
3224 /// non-const handle.
3225 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3226 class CaseHandleImpl {
3227 // Directly befriend both const and non-const iterators.
3228 friend class SwitchInst::CaseIteratorImpl<
3229 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3230
3231 protected:
3232 // Expose the switch type we're parameterized with to the iterator.
3233 using SwitchInstType = SwitchInstT;
3234
3235 SwitchInstT *SI;
3236 ptrdiff_t Index;
3237
3238 CaseHandleImpl() = default;
3239 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3240
3241 public:
3242 /// Resolves case value for current case.
3243 ConstantIntT *getCaseValue() const {
3244 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3245, __extension__ __PRETTY_FUNCTION__))
3245 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3245, __extension__ __PRETTY_FUNCTION__))
;
3246 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3247 }
3248
3249 /// Resolves successor for current case.
3250 BasicBlockT *getCaseSuccessor() const {
3251 assert(((unsigned)Index < SI->getNumCases() ||(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3253, __extension__ __PRETTY_FUNCTION__))
3252 (unsigned)Index == DefaultPseudoIndex) &&(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3253, __extension__ __PRETTY_FUNCTION__))
3253 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3253, __extension__ __PRETTY_FUNCTION__))
;
3254 return SI->getSuccessor(getSuccessorIndex());
3255 }
3256
3257 /// Returns number of current case.
3258 unsigned getCaseIndex() const { return Index; }
3259
3260 /// Returns successor index for current case successor.
3261 unsigned getSuccessorIndex() const {
3262 assert(((unsigned)Index == DefaultPseudoIndex ||(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3264, __extension__ __PRETTY_FUNCTION__))
3263 (unsigned)Index < SI->getNumCases()) &&(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3264, __extension__ __PRETTY_FUNCTION__))
3264 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3264, __extension__ __PRETTY_FUNCTION__))
;
3265 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3266 }
3267
3268 bool operator==(const CaseHandleImpl &RHS) const {
3269 assert(SI == RHS.SI && "Incompatible operators.")(static_cast <bool> (SI == RHS.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("SI == RHS.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3269, __extension__ __PRETTY_FUNCTION__))
;
3270 return Index == RHS.Index;
3271 }
3272 };
3273
3274 using ConstCaseHandle =
3275 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>;
3276
3277 class CaseHandle
3278 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3279 friend class SwitchInst::CaseIteratorImpl<CaseHandle>;
3280
3281 public:
3282 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {}
3283
3284 /// Sets the new value for current case.
3285 void setValue(ConstantInt *V) {
3286 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3287, __extension__ __PRETTY_FUNCTION__))
3287 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3287, __extension__ __PRETTY_FUNCTION__))
;
3288 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3289 }
3290
3291 /// Sets the new successor for current case.
3292 void setSuccessor(BasicBlock *S) {
3293 SI->setSuccessor(getSuccessorIndex(), S);
3294 }
3295 };
3296
3297 template <typename CaseHandleT>
3298 class CaseIteratorImpl
3299 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3300 std::random_access_iterator_tag,
3301 CaseHandleT> {
3302 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3303
3304 CaseHandleT Case;
3305
3306 public:
3307 /// Default constructed iterator is in an invalid state until assigned to
3308 /// a case for a particular switch.
3309 CaseIteratorImpl() = default;
3310
3311 /// Initializes case iterator for given SwitchInst and for given
3312 /// case number.
3313 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3314
3315 /// Initializes case iterator for given SwitchInst and for given
3316 /// successor index.
3317 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
3318 unsigned SuccessorIndex) {
3319 assert(SuccessorIndex < SI->getNumSuccessors() &&(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3320, __extension__ __PRETTY_FUNCTION__))
3320 "Successor index # out of range!")(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3320, __extension__ __PRETTY_FUNCTION__))
;
3321 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3322 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3323 }
3324
3325 /// Support converting to the const variant. This will be a no-op for const
3326 /// variant.
3327 operator CaseIteratorImpl<ConstCaseHandle>() const {
3328 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3329 }
3330
3331 CaseIteratorImpl &operator+=(ptrdiff_t N) {
3332 // Check index correctness after addition.
3333 // Note: Index == getNumCases() means end().
3334 assert(Case.Index + N >= 0 &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3336, __extension__ __PRETTY_FUNCTION__))
3335 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3336, __extension__ __PRETTY_FUNCTION__))
3336 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3336, __extension__ __PRETTY_FUNCTION__))
;
3337 Case.Index += N;
3338 return *this;
3339 }
3340 CaseIteratorImpl &operator-=(ptrdiff_t N) {
3341 // Check index correctness after subtraction.
3342 // Note: Case.Index == getNumCases() means end().
3343 assert(Case.Index - N >= 0 &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3345, __extension__ __PRETTY_FUNCTION__))
3344 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3345, __extension__ __PRETTY_FUNCTION__))
3345 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3345, __extension__ __PRETTY_FUNCTION__))
;
3346 Case.Index -= N;
3347 return *this;
3348 }
3349 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const {
3350 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3350, __extension__ __PRETTY_FUNCTION__))
;
3351 return Case.Index - RHS.Case.Index;
3352 }
3353 bool operator==(const CaseIteratorImpl &RHS) const {
3354 return Case == RHS.Case;
3355 }
3356 bool operator<(const CaseIteratorImpl &RHS) const {
3357 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3357, __extension__ __PRETTY_FUNCTION__))
;
3358 return Case.Index < RHS.Case.Index;
3359 }
3360 CaseHandleT &operator*() { return Case; }
3361 const CaseHandleT &operator*() const { return Case; }
3362 };
3363
3364 using CaseIt = CaseIteratorImpl<CaseHandle>;
3365 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>;
3366
3367 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3368 unsigned NumCases,
3369 Instruction *InsertBefore = nullptr) {
3370 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3371 }
3372
3373 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3374 unsigned NumCases, BasicBlock *InsertAtEnd) {
3375 return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3376 }
3377
3378 /// Provide fast operand accessors
3379 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3380
3381 // Accessor Methods for Switch stmt
3382 Value *getCondition() const { return getOperand(0); }
3383 void setCondition(Value *V) { setOperand(0, V); }
3384
3385 BasicBlock *getDefaultDest() const {
3386 return cast<BasicBlock>(getOperand(1));
3387 }
3388
3389 void setDefaultDest(BasicBlock *DefaultCase) {
3390 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3391 }
3392
3393 /// Return the number of 'cases' in this switch instruction, excluding the
3394 /// default case.
3395 unsigned getNumCases() const {
3396 return getNumOperands()/2 - 1;
3397 }
3398
3399 /// Returns a read/write iterator that points to the first case in the
3400 /// SwitchInst.
3401 CaseIt case_begin() {
3402 return CaseIt(this, 0);
3403 }
3404
3405 /// Returns a read-only iterator that points to the first case in the
3406 /// SwitchInst.
3407 ConstCaseIt case_begin() const {
3408 return ConstCaseIt(this, 0);
3409 }
3410
3411 /// Returns a read/write iterator that points one past the last in the
3412 /// SwitchInst.
3413 CaseIt case_end() {
3414 return CaseIt(this, getNumCases());
3415 }
3416
3417 /// Returns a read-only iterator that points one past the last in the
3418 /// SwitchInst.
3419 ConstCaseIt case_end() const {
3420 return ConstCaseIt(this, getNumCases());
3421 }
3422
3423 /// Iteration adapter for range-for loops.
3424 iterator_range<CaseIt> cases() {
3425 return make_range(case_begin(), case_end());
3426 }
3427
3428 /// Constant iteration adapter for range-for loops.
3429 iterator_range<ConstCaseIt> cases() const {
3430 return make_range(case_begin(), case_end());
3431 }
3432
3433 /// Returns an iterator that points to the default case.
3434 /// Note: this iterator allows to resolve successor only. Attempt
3435 /// to resolve case value causes an assertion.
3436 /// Also note, that increment and decrement also causes an assertion and
3437 /// makes iterator invalid.
3438 CaseIt case_default() {
3439 return CaseIt(this, DefaultPseudoIndex);
3440 }
3441 ConstCaseIt case_default() const {
3442 return ConstCaseIt(this, DefaultPseudoIndex);
3443 }
3444
3445 /// Search all of the case values for the specified constant. If it is
3446 /// explicitly handled, return the case iterator of it, otherwise return
3447 /// default case iterator to indicate that it is handled by the default
3448 /// handler.
3449 CaseIt findCaseValue(const ConstantInt *C) {
3450 CaseIt I = llvm::find_if(
3451 cases(), [C](CaseHandle &Case) { return Case.getCaseValue() == C; });
3452 if (I != case_end())
3453 return I;
3454
3455 return case_default();
3456 }
3457 ConstCaseIt findCaseValue(const ConstantInt *C) const {
3458 ConstCaseIt I = llvm::find_if(cases(), [C](ConstCaseHandle &Case) {
3459 return Case.getCaseValue() == C;
3460 });
3461 if (I != case_end())
3462 return I;
3463
3464 return case_default();
3465 }
3466
3467 /// Finds the unique case value for a given successor. Returns null if the
3468 /// successor is not found, not unique, or is the default case.
3469 ConstantInt *findCaseDest(BasicBlock *BB) {
3470 if (BB == getDefaultDest())
3471 return nullptr;
3472
3473 ConstantInt *CI = nullptr;
3474 for (auto Case : cases()) {
3475 if (Case.getCaseSuccessor() != BB)
3476 continue;
3477
3478 if (CI)
3479 return nullptr; // Multiple cases lead to BB.
3480
3481 CI = Case.getCaseValue();
3482 }
3483
3484 return CI;
3485 }
3486
3487 /// Add an entry to the switch instruction.
3488 /// Note:
3489 /// This action invalidates case_end(). Old case_end() iterator will
3490 /// point to the added case.
3491 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3492
3493 /// This method removes the specified case and its successor from the switch
3494 /// instruction. Note that this operation may reorder the remaining cases at
3495 /// index idx and above.
3496 /// Note:
3497 /// This action invalidates iterators for all cases following the one removed,
3498 /// including the case_end() iterator. It returns an iterator for the next
3499 /// case.
3500 CaseIt removeCase(CaseIt I);
3501
3502 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3503 BasicBlock *getSuccessor(unsigned idx) const {
3504 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor idx out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() &&\"Successor idx out of range for switch!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3504, __extension__ __PRETTY_FUNCTION__))
;
3505 return cast<BasicBlock>(getOperand(idx*2+1));
3506 }
3507 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3508 assert(idx < getNumSuccessors() && "Successor # out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for switch!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3508, __extension__ __PRETTY_FUNCTION__))
;
3509 setOperand(idx * 2 + 1, NewSucc);
3510 }
3511
3512 // Methods for support type inquiry through isa, cast, and dyn_cast:
3513 static bool classof(const Instruction *I) {
3514 return I->getOpcode() == Instruction::Switch;
3515 }
3516 static bool classof(const Value *V) {
3517 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3518 }
3519};
3520
3521/// A wrapper class to simplify modification of SwitchInst cases along with
3522/// their prof branch_weights metadata.
3523class SwitchInstProfUpdateWrapper {
3524 SwitchInst &SI;
3525 Optional<SmallVector<uint32_t, 8> > Weights = None;
3526 bool Changed = false;
3527
3528protected:
3529 static MDNode *getProfBranchWeightsMD(const SwitchInst &SI);
3530
3531 MDNode *buildProfBranchWeightsMD();
3532
3533 void init();
3534
3535public:
3536 using CaseWeightOpt = Optional<uint32_t>;
3537 SwitchInst *operator->() { return &SI; }
3538 SwitchInst &operator*() { return SI; }
3539 operator SwitchInst *() { return &SI; }
3540
3541 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); }
3542
3543 ~SwitchInstProfUpdateWrapper() {
3544 if (Changed)
3545 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3546 }
3547
3548 /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3549 /// correspondent branch weight.
3550 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I);
3551
3552 /// Delegate the call to the underlying SwitchInst::addCase() and set the
3553 /// specified branch weight for the added case.
3554 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3555
3556 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3557 /// this object to not touch the underlying SwitchInst in destructor.
3558 SymbolTableList<Instruction>::iterator eraseFromParent();
3559
3560 void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3561 CaseWeightOpt getSuccessorWeight(unsigned idx);
3562
3563 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3564};
3565
3566template <>
3567struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> {
3568};
3569
3570DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)SwitchInst::op_iterator SwitchInst::op_begin() { return OperandTraits
<SwitchInst>::op_begin(this); } SwitchInst::const_op_iterator
SwitchInst::op_begin() const { return OperandTraits<SwitchInst
>::op_begin(const_cast<SwitchInst*>(this)); } SwitchInst
::op_iterator SwitchInst::op_end() { return OperandTraits<
SwitchInst>::op_end(this); } SwitchInst::const_op_iterator
SwitchInst::op_end() const { return OperandTraits<SwitchInst
>::op_end(const_cast<SwitchInst*>(this)); } Value *SwitchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SwitchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3570, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<SwitchInst>::op_begin(const_cast
<SwitchInst*>(this))[i_nocapture].get()); } void SwitchInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<SwitchInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210621111111+acefe0eaaf82/llvm/include/llvm/IR/Instructions.h"
, 3570, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
SwitchInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned SwitchInst::getNumOperands() const { return OperandTraits
<SwitchInst>::operands(this); } template <int Idx_nocapture
> Use &SwitchInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SwitchInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3571
3572//===----------------------------------------------------------------------===//
3573// IndirectBrInst Class
3574//===----------------------------------------------------------------------===//
3575
3576//===---------------------------------------------------------------------------
3577/// Indirect Branch Instruction.
3578///
3579class IndirectBrInst : public Instruction {
3580 unsigned ReservedSpace;
3581
3582 // Operand[0] = Address to jump to
3583 // Operand[n+1] = n-th destination
3584 IndirectBrInst(const IndirectBrInst &IBI);
3585
3586 /// Create a new indirectbr instruction, specifying an
3587 /// Address to jump to. The number of expected destinations can be specified
3588 /// here to make memory allocation more efficient. This constructor can also
3589 /// autoinsert before another instruction.
3590 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3591
3592 /// Create a new indirectbr instruction, specifying an
3593 /// Address to jump to. The number of expected destinations can be specified
3594 /// here to make memory allocation more efficient. This constructor also
3595 /// autoinserts at the end of the specified BasicBlock.
3596 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3597
3598 // allocate space for exactly zero operands
3599 void *operator new(size_t s) {
3600 return User::operator new(s);
3601 }
3602
3603 void init(Value *Address, unsigned NumDests);
3604 void growOperands();
3605
3606protected:
3607 // Note: Instruction needs to be a friend here to call cloneImpl.
3608 friend class Instruction;
3609
3610 IndirectBrInst *cloneImpl() const;
3611
3612public:
3613 /// Iterator type that casts an operand to a basic block.
3614 ///
3615 /// This only makes sense because the successors are stored as adjacent
3616 /// operands for indirectbr instructions.
3617 struct succ_op_