Bug Summary

File:llvm/lib/Analysis/LazyValueInfo.cpp
Warning:line 1094, column 14
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name LazyValueInfo.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Analysis -I /build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/lib/Analysis -I include -I /build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-01-16-232930-107970-1 -x c++ /build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/lib/Analysis/LazyValueInfo.cpp

/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/lib/Analysis/LazyValueInfo.cpp

1//===- LazyValueInfo.cpp - Value constraint analysis ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interface for lazy computation of value constraint
10// information.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/Analysis/LazyValueInfo.h"
15#include "llvm/ADT/DenseSet.h"
16#include "llvm/ADT/Optional.h"
17#include "llvm/ADT/STLExtras.h"
18#include "llvm/Analysis/AssumptionCache.h"
19#include "llvm/Analysis/ConstantFolding.h"
20#include "llvm/Analysis/InstructionSimplify.h"
21#include "llvm/Analysis/TargetLibraryInfo.h"
22#include "llvm/Analysis/ValueLattice.h"
23#include "llvm/Analysis/ValueTracking.h"
24#include "llvm/IR/AssemblyAnnotationWriter.h"
25#include "llvm/IR/CFG.h"
26#include "llvm/IR/ConstantRange.h"
27#include "llvm/IR/Constants.h"
28#include "llvm/IR/DataLayout.h"
29#include "llvm/IR/Dominators.h"
30#include "llvm/IR/Instructions.h"
31#include "llvm/IR/IntrinsicInst.h"
32#include "llvm/IR/Intrinsics.h"
33#include "llvm/IR/LLVMContext.h"
34#include "llvm/IR/PatternMatch.h"
35#include "llvm/IR/ValueHandle.h"
36#include "llvm/InitializePasses.h"
37#include "llvm/Support/Debug.h"
38#include "llvm/Support/FormattedStream.h"
39#include "llvm/Support/KnownBits.h"
40#include "llvm/Support/raw_ostream.h"
41#include <map>
42using namespace llvm;
43using namespace PatternMatch;
44
45#define DEBUG_TYPE"lazy-value-info" "lazy-value-info"
46
47// This is the number of worklist items we will process to try to discover an
48// answer for a given value.
49static const unsigned MaxProcessedPerValue = 500;
50
51char LazyValueInfoWrapperPass::ID = 0;
52LazyValueInfoWrapperPass::LazyValueInfoWrapperPass() : FunctionPass(ID) {
53 initializeLazyValueInfoWrapperPassPass(*PassRegistry::getPassRegistry());
54}
55INITIALIZE_PASS_BEGIN(LazyValueInfoWrapperPass, "lazy-value-info",static void *initializeLazyValueInfoWrapperPassPassOnce(PassRegistry
&Registry) {
56 "Lazy Value Information Analysis", false, true)static void *initializeLazyValueInfoWrapperPassPassOnce(PassRegistry
&Registry) {
57INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)initializeAssumptionCacheTrackerPass(Registry);
58INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry);
59INITIALIZE_PASS_END(LazyValueInfoWrapperPass, "lazy-value-info",PassInfo *PI = new PassInfo( "Lazy Value Information Analysis"
, "lazy-value-info", &LazyValueInfoWrapperPass::ID, PassInfo
::NormalCtor_t(callDefaultCtor<LazyValueInfoWrapperPass>
), false, true); Registry.registerPass(*PI, true); return PI;
} static llvm::once_flag InitializeLazyValueInfoWrapperPassPassFlag
; void llvm::initializeLazyValueInfoWrapperPassPass(PassRegistry
&Registry) { llvm::call_once(InitializeLazyValueInfoWrapperPassPassFlag
, initializeLazyValueInfoWrapperPassPassOnce, std::ref(Registry
)); }
60 "Lazy Value Information Analysis", false, true)PassInfo *PI = new PassInfo( "Lazy Value Information Analysis"
, "lazy-value-info", &LazyValueInfoWrapperPass::ID, PassInfo
::NormalCtor_t(callDefaultCtor<LazyValueInfoWrapperPass>
), false, true); Registry.registerPass(*PI, true); return PI;
} static llvm::once_flag InitializeLazyValueInfoWrapperPassPassFlag
; void llvm::initializeLazyValueInfoWrapperPassPass(PassRegistry
&Registry) { llvm::call_once(InitializeLazyValueInfoWrapperPassPassFlag
, initializeLazyValueInfoWrapperPassPassOnce, std::ref(Registry
)); }
61
62namespace llvm {
63 FunctionPass *createLazyValueInfoPass() { return new LazyValueInfoWrapperPass(); }
64}
65
66AnalysisKey LazyValueAnalysis::Key;
67
68/// Returns true if this lattice value represents at most one possible value.
69/// This is as precise as any lattice value can get while still representing
70/// reachable code.
71static bool hasSingleValue(const ValueLatticeElement &Val) {
72 if (Val.isConstantRange() &&
73 Val.getConstantRange().isSingleElement())
74 // Integer constants are single element ranges
75 return true;
76 if (Val.isConstant())
77 // Non integer constants
78 return true;
79 return false;
80}
81
82/// Combine two sets of facts about the same value into a single set of
83/// facts. Note that this method is not suitable for merging facts along
84/// different paths in a CFG; that's what the mergeIn function is for. This
85/// is for merging facts gathered about the same value at the same location
86/// through two independent means.
87/// Notes:
88/// * This method does not promise to return the most precise possible lattice
89/// value implied by A and B. It is allowed to return any lattice element
90/// which is at least as strong as *either* A or B (unless our facts
91/// conflict, see below).
92/// * Due to unreachable code, the intersection of two lattice values could be
93/// contradictory. If this happens, we return some valid lattice value so as
94/// not confuse the rest of LVI. Ideally, we'd always return Undefined, but
95/// we do not make this guarantee. TODO: This would be a useful enhancement.
96static ValueLatticeElement intersect(const ValueLatticeElement &A,
97 const ValueLatticeElement &B) {
98 // Undefined is the strongest state. It means the value is known to be along
99 // an unreachable path.
100 if (A.isUnknown())
101 return A;
102 if (B.isUnknown())
103 return B;
104
105 // If we gave up for one, but got a useable fact from the other, use it.
106 if (A.isOverdefined())
107 return B;
108 if (B.isOverdefined())
109 return A;
110
111 // Can't get any more precise than constants.
112 if (hasSingleValue(A))
113 return A;
114 if (hasSingleValue(B))
115 return B;
116
117 // Could be either constant range or not constant here.
118 if (!A.isConstantRange() || !B.isConstantRange()) {
119 // TODO: Arbitrary choice, could be improved
120 return A;
121 }
122
123 // Intersect two constant ranges
124 ConstantRange Range =
125 A.getConstantRange().intersectWith(B.getConstantRange());
126 // Note: An empty range is implicitly converted to unknown or undef depending
127 // on MayIncludeUndef internally.
128 return ValueLatticeElement::getRange(
129 std::move(Range), /*MayIncludeUndef=*/A.isConstantRangeIncludingUndef() ||
130 B.isConstantRangeIncludingUndef());
131}
132
133//===----------------------------------------------------------------------===//
134// LazyValueInfoCache Decl
135//===----------------------------------------------------------------------===//
136
137namespace {
138 /// A callback value handle updates the cache when values are erased.
139 class LazyValueInfoCache;
140 struct LVIValueHandle final : public CallbackVH {
141 LazyValueInfoCache *Parent;
142
143 LVIValueHandle(Value *V, LazyValueInfoCache *P = nullptr)
144 : CallbackVH(V), Parent(P) { }
145
146 void deleted() override;
147 void allUsesReplacedWith(Value *V) override {
148 deleted();
149 }
150 };
151} // end anonymous namespace
152
153namespace {
154 using NonNullPointerSet = SmallDenseSet<AssertingVH<Value>, 2>;
155
156 /// This is the cache kept by LazyValueInfo which
157 /// maintains information about queries across the clients' queries.
158 class LazyValueInfoCache {
159 /// This is all of the cached information for one basic block. It contains
160 /// the per-value lattice elements, as well as a separate set for
161 /// overdefined values to reduce memory usage. Additionally pointers
162 /// dereferenced in the block are cached for nullability queries.
163 struct BlockCacheEntry {
164 SmallDenseMap<AssertingVH<Value>, ValueLatticeElement, 4> LatticeElements;
165 SmallDenseSet<AssertingVH<Value>, 4> OverDefined;
166 // None indicates that the nonnull pointers for this basic block
167 // block have not been computed yet.
168 Optional<NonNullPointerSet> NonNullPointers;
169 };
170
171 /// Cached information per basic block.
172 DenseMap<PoisoningVH<BasicBlock>, std::unique_ptr<BlockCacheEntry>>
173 BlockCache;
174 /// Set of value handles used to erase values from the cache on deletion.
175 DenseSet<LVIValueHandle, DenseMapInfo<Value *>> ValueHandles;
176
177 const BlockCacheEntry *getBlockEntry(BasicBlock *BB) const {
178 auto It = BlockCache.find_as(BB);
179 if (It == BlockCache.end())
180 return nullptr;
181 return It->second.get();
182 }
183
184 BlockCacheEntry *getOrCreateBlockEntry(BasicBlock *BB) {
185 auto It = BlockCache.find_as(BB);
186 if (It == BlockCache.end())
187 It = BlockCache.insert({ BB, std::make_unique<BlockCacheEntry>() })
188 .first;
189
190 return It->second.get();
191 }
192
193 void addValueHandle(Value *Val) {
194 auto HandleIt = ValueHandles.find_as(Val);
195 if (HandleIt == ValueHandles.end())
196 ValueHandles.insert({ Val, this });
197 }
198
199 public:
200 void insertResult(Value *Val, BasicBlock *BB,
201 const ValueLatticeElement &Result) {
202 BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
203
204 // Insert over-defined values into their own cache to reduce memory
205 // overhead.
206 if (Result.isOverdefined())
207 Entry->OverDefined.insert(Val);
208 else
209 Entry->LatticeElements.insert({ Val, Result });
210
211 addValueHandle(Val);
212 }
213
214 Optional<ValueLatticeElement> getCachedValueInfo(Value *V,
215 BasicBlock *BB) const {
216 const BlockCacheEntry *Entry = getBlockEntry(BB);
217 if (!Entry)
218 return None;
219
220 if (Entry->OverDefined.count(V))
221 return ValueLatticeElement::getOverdefined();
222
223 auto LatticeIt = Entry->LatticeElements.find_as(V);
224 if (LatticeIt == Entry->LatticeElements.end())
225 return None;
226
227 return LatticeIt->second;
228 }
229
230 bool isNonNullAtEndOfBlock(
231 Value *V, BasicBlock *BB,
232 function_ref<NonNullPointerSet(BasicBlock *)> InitFn) {
233 BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
234 if (!Entry->NonNullPointers) {
235 Entry->NonNullPointers = InitFn(BB);
236 for (Value *V : *Entry->NonNullPointers)
237 addValueHandle(V);
238 }
239
240 return Entry->NonNullPointers->count(V);
241 }
242
243 /// clear - Empty the cache.
244 void clear() {
245 BlockCache.clear();
246 ValueHandles.clear();
247 }
248
249 /// Inform the cache that a given value has been deleted.
250 void eraseValue(Value *V);
251
252 /// This is part of the update interface to inform the cache
253 /// that a block has been deleted.
254 void eraseBlock(BasicBlock *BB);
255
256 /// Updates the cache to remove any influence an overdefined value in
257 /// OldSucc might have (unless also overdefined in NewSucc). This just
258 /// flushes elements from the cache and does not add any.
259 void threadEdgeImpl(BasicBlock *OldSucc,BasicBlock *NewSucc);
260 };
261}
262
263void LazyValueInfoCache::eraseValue(Value *V) {
264 for (auto &Pair : BlockCache) {
265 Pair.second->LatticeElements.erase(V);
266 Pair.second->OverDefined.erase(V);
267 if (Pair.second->NonNullPointers)
268 Pair.second->NonNullPointers->erase(V);
269 }
270
271 auto HandleIt = ValueHandles.find_as(V);
272 if (HandleIt != ValueHandles.end())
273 ValueHandles.erase(HandleIt);
274}
275
276void LVIValueHandle::deleted() {
277 // This erasure deallocates *this, so it MUST happen after we're done
278 // using any and all members of *this.
279 Parent->eraseValue(*this);
280}
281
282void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
283 BlockCache.erase(BB);
284}
285
286void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
287 BasicBlock *NewSucc) {
288 // When an edge in the graph has been threaded, values that we could not
289 // determine a value for before (i.e. were marked overdefined) may be
290 // possible to solve now. We do NOT try to proactively update these values.
291 // Instead, we clear their entries from the cache, and allow lazy updating to
292 // recompute them when needed.
293
294 // The updating process is fairly simple: we need to drop cached info
295 // for all values that were marked overdefined in OldSucc, and for those same
296 // values in any successor of OldSucc (except NewSucc) in which they were
297 // also marked overdefined.
298 std::vector<BasicBlock*> worklist;
299 worklist.push_back(OldSucc);
300
301 const BlockCacheEntry *Entry = getBlockEntry(OldSucc);
302 if (!Entry || Entry->OverDefined.empty())
303 return; // Nothing to process here.
304 SmallVector<Value *, 4> ValsToClear(Entry->OverDefined.begin(),
305 Entry->OverDefined.end());
306
307 // Use a worklist to perform a depth-first search of OldSucc's successors.
308 // NOTE: We do not need a visited list since any blocks we have already
309 // visited will have had their overdefined markers cleared already, and we
310 // thus won't loop to their successors.
311 while (!worklist.empty()) {
312 BasicBlock *ToUpdate = worklist.back();
313 worklist.pop_back();
314
315 // Skip blocks only accessible through NewSucc.
316 if (ToUpdate == NewSucc) continue;
317
318 // If a value was marked overdefined in OldSucc, and is here too...
319 auto OI = BlockCache.find_as(ToUpdate);
320 if (OI == BlockCache.end() || OI->second->OverDefined.empty())
321 continue;
322 auto &ValueSet = OI->second->OverDefined;
323
324 bool changed = false;
325 for (Value *V : ValsToClear) {
326 if (!ValueSet.erase(V))
327 continue;
328
329 // If we removed anything, then we potentially need to update
330 // blocks successors too.
331 changed = true;
332 }
333
334 if (!changed) continue;
335
336 llvm::append_range(worklist, successors(ToUpdate));
337 }
338}
339
340
341namespace {
342/// An assembly annotator class to print LazyValueCache information in
343/// comments.
344class LazyValueInfoImpl;
345class LazyValueInfoAnnotatedWriter : public AssemblyAnnotationWriter {
346 LazyValueInfoImpl *LVIImpl;
347 // While analyzing which blocks we can solve values for, we need the dominator
348 // information.
349 DominatorTree &DT;
350
351public:
352 LazyValueInfoAnnotatedWriter(LazyValueInfoImpl *L, DominatorTree &DTree)
353 : LVIImpl(L), DT(DTree) {}
354
355 void emitBasicBlockStartAnnot(const BasicBlock *BB,
356 formatted_raw_ostream &OS) override;
357
358 void emitInstructionAnnot(const Instruction *I,
359 formatted_raw_ostream &OS) override;
360};
361}
362namespace {
363// The actual implementation of the lazy analysis and update. Note that the
364// inheritance from LazyValueInfoCache is intended to be temporary while
365// splitting the code and then transitioning to a has-a relationship.
366class LazyValueInfoImpl {
367
368 /// Cached results from previous queries
369 LazyValueInfoCache TheCache;
370
371 /// This stack holds the state of the value solver during a query.
372 /// It basically emulates the callstack of the naive
373 /// recursive value lookup process.
374 SmallVector<std::pair<BasicBlock*, Value*>, 8> BlockValueStack;
375
376 /// Keeps track of which block-value pairs are in BlockValueStack.
377 DenseSet<std::pair<BasicBlock*, Value*> > BlockValueSet;
378
379 /// Push BV onto BlockValueStack unless it's already in there.
380 /// Returns true on success.
381 bool pushBlockValue(const std::pair<BasicBlock *, Value *> &BV) {
382 if (!BlockValueSet.insert(BV).second)
383 return false; // It's already in the stack.
384
385 LLVM_DEBUG(dbgs() << "PUSH: " << *BV.second << " in "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << "PUSH: " << *BV.
second << " in " << BV.first->getName() <<
"\n"; } } while (false)
386 << BV.first->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << "PUSH: " << *BV.
second << " in " << BV.first->getName() <<
"\n"; } } while (false)
;
387 BlockValueStack.push_back(BV);
388 return true;
389 }
390
391 AssumptionCache *AC; ///< A pointer to the cache of @llvm.assume calls.
392 const DataLayout &DL; ///< A mandatory DataLayout
393
394 /// Declaration of the llvm.experimental.guard() intrinsic,
395 /// if it exists in the module.
396 Function *GuardDecl;
397
398 Optional<ValueLatticeElement> getBlockValue(Value *Val, BasicBlock *BB);
399 Optional<ValueLatticeElement> getEdgeValue(Value *V, BasicBlock *F,
400 BasicBlock *T, Instruction *CxtI = nullptr);
401
402 // These methods process one work item and may add more. A false value
403 // returned means that the work item was not completely processed and must
404 // be revisited after going through the new items.
405 bool solveBlockValue(Value *Val, BasicBlock *BB);
406 Optional<ValueLatticeElement> solveBlockValueImpl(Value *Val, BasicBlock *BB);
407 Optional<ValueLatticeElement> solveBlockValueNonLocal(Value *Val,
408 BasicBlock *BB);
409 Optional<ValueLatticeElement> solveBlockValuePHINode(PHINode *PN,
410 BasicBlock *BB);
411 Optional<ValueLatticeElement> solveBlockValueSelect(SelectInst *S,
412 BasicBlock *BB);
413 Optional<ConstantRange> getRangeFor(Value *V, Instruction *CxtI,
414 BasicBlock *BB);
415 Optional<ValueLatticeElement> solveBlockValueBinaryOpImpl(
416 Instruction *I, BasicBlock *BB,
417 std::function<ConstantRange(const ConstantRange &,
418 const ConstantRange &)> OpFn);
419 Optional<ValueLatticeElement> solveBlockValueBinaryOp(BinaryOperator *BBI,
420 BasicBlock *BB);
421 Optional<ValueLatticeElement> solveBlockValueCast(CastInst *CI,
422 BasicBlock *BB);
423 Optional<ValueLatticeElement> solveBlockValueOverflowIntrinsic(
424 WithOverflowInst *WO, BasicBlock *BB);
425 Optional<ValueLatticeElement> solveBlockValueIntrinsic(IntrinsicInst *II,
426 BasicBlock *BB);
427 Optional<ValueLatticeElement> solveBlockValueExtractValue(
428 ExtractValueInst *EVI, BasicBlock *BB);
429 bool isNonNullAtEndOfBlock(Value *Val, BasicBlock *BB);
430 void intersectAssumeOrGuardBlockValueConstantRange(Value *Val,
431 ValueLatticeElement &BBLV,
432 Instruction *BBI);
433
434 void solve();
435
436public:
437 /// This is the query interface to determine the lattice value for the
438 /// specified Value* at the context instruction (if specified) or at the
439 /// start of the block.
440 ValueLatticeElement getValueInBlock(Value *V, BasicBlock *BB,
441 Instruction *CxtI = nullptr);
442
443 /// This is the query interface to determine the lattice value for the
444 /// specified Value* at the specified instruction using only information
445 /// from assumes/guards and range metadata. Unlike getValueInBlock(), no
446 /// recursive query is performed.
447 ValueLatticeElement getValueAt(Value *V, Instruction *CxtI);
448
449 /// This is the query interface to determine the lattice
450 /// value for the specified Value* that is true on the specified edge.
451 ValueLatticeElement getValueOnEdge(Value *V, BasicBlock *FromBB,
452 BasicBlock *ToBB,
453 Instruction *CxtI = nullptr);
454
455 /// Complete flush all previously computed values
456 void clear() {
457 TheCache.clear();
458 }
459
460 /// Printing the LazyValueInfo Analysis.
461 void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS) {
462 LazyValueInfoAnnotatedWriter Writer(this, DTree);
463 F.print(OS, &Writer);
464 }
465
466 /// This is part of the update interface to inform the cache
467 /// that a block has been deleted.
468 void eraseBlock(BasicBlock *BB) {
469 TheCache.eraseBlock(BB);
470 }
471
472 /// This is the update interface to inform the cache that an edge from
473 /// PredBB to OldSucc has been threaded to be from PredBB to NewSucc.
474 void threadEdge(BasicBlock *PredBB,BasicBlock *OldSucc,BasicBlock *NewSucc);
475
476 LazyValueInfoImpl(AssumptionCache *AC, const DataLayout &DL,
477 Function *GuardDecl)
478 : AC(AC), DL(DL), GuardDecl(GuardDecl) {}
479};
480} // end anonymous namespace
481
482
483void LazyValueInfoImpl::solve() {
484 SmallVector<std::pair<BasicBlock *, Value *>, 8> StartingStack(
485 BlockValueStack.begin(), BlockValueStack.end());
486
487 unsigned processedCount = 0;
488 while (!BlockValueStack.empty()) {
489 processedCount++;
490 // Abort if we have to process too many values to get a result for this one.
491 // Because of the design of the overdefined cache currently being per-block
492 // to avoid naming-related issues (IE it wants to try to give different
493 // results for the same name in different blocks), overdefined results don't
494 // get cached globally, which in turn means we will often try to rediscover
495 // the same overdefined result again and again. Once something like
496 // PredicateInfo is used in LVI or CVP, we should be able to make the
497 // overdefined cache global, and remove this throttle.
498 if (processedCount > MaxProcessedPerValue) {
499 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << "Giving up on stack because we are getting too deep\n"
; } } while (false)
500 dbgs() << "Giving up on stack because we are getting too deep\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << "Giving up on stack because we are getting too deep\n"
; } } while (false)
;
501 // Fill in the original values
502 while (!StartingStack.empty()) {
503 std::pair<BasicBlock *, Value *> &e = StartingStack.back();
504 TheCache.insertResult(e.second, e.first,
505 ValueLatticeElement::getOverdefined());
506 StartingStack.pop_back();
507 }
508 BlockValueSet.clear();
509 BlockValueStack.clear();
510 return;
511 }
512 std::pair<BasicBlock *, Value *> e = BlockValueStack.back();
513 assert(BlockValueSet.count(e) && "Stack value should be in BlockValueSet!")(static_cast <bool> (BlockValueSet.count(e) && "Stack value should be in BlockValueSet!"
) ? void (0) : __assert_fail ("BlockValueSet.count(e) && \"Stack value should be in BlockValueSet!\""
, "llvm/lib/Analysis/LazyValueInfo.cpp", 513, __extension__ __PRETTY_FUNCTION__
))
;
514
515 if (solveBlockValue(e.second, e.first)) {
516 // The work item was completely processed.
517 assert(BlockValueStack.back() == e && "Nothing should have been pushed!")(static_cast <bool> (BlockValueStack.back() == e &&
"Nothing should have been pushed!") ? void (0) : __assert_fail
("BlockValueStack.back() == e && \"Nothing should have been pushed!\""
, "llvm/lib/Analysis/LazyValueInfo.cpp", 517, __extension__ __PRETTY_FUNCTION__
))
;
518#ifndef NDEBUG
519 Optional<ValueLatticeElement> BBLV =
520 TheCache.getCachedValueInfo(e.second, e.first);
521 assert(BBLV && "Result should be in cache!")(static_cast <bool> (BBLV && "Result should be in cache!"
) ? void (0) : __assert_fail ("BBLV && \"Result should be in cache!\""
, "llvm/lib/Analysis/LazyValueInfo.cpp", 521, __extension__ __PRETTY_FUNCTION__
))
;
522 LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << "POP " << *e.second
<< " in " << e.first->getName() << " = "
<< *BBLV << "\n"; } } while (false)
523 dbgs() << "POP " << *e.second << " in " << e.first->getName() << " = "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << "POP " << *e.second
<< " in " << e.first->getName() << " = "
<< *BBLV << "\n"; } } while (false)
524 << *BBLV << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << "POP " << *e.second
<< " in " << e.first->getName() << " = "
<< *BBLV << "\n"; } } while (false)
;
525#endif
526
527 BlockValueStack.pop_back();
528 BlockValueSet.erase(e);
529 } else {
530 // More work needs to be done before revisiting.
531 assert(BlockValueStack.back() != e && "Stack should have been pushed!")(static_cast <bool> (BlockValueStack.back() != e &&
"Stack should have been pushed!") ? void (0) : __assert_fail
("BlockValueStack.back() != e && \"Stack should have been pushed!\""
, "llvm/lib/Analysis/LazyValueInfo.cpp", 531, __extension__ __PRETTY_FUNCTION__
))
;
532 }
533 }
534}
535
536Optional<ValueLatticeElement> LazyValueInfoImpl::getBlockValue(Value *Val,
537 BasicBlock *BB) {
538 // If already a constant, there is nothing to compute.
539 if (Constant *VC = dyn_cast<Constant>(Val))
540 return ValueLatticeElement::get(VC);
541
542 if (Optional<ValueLatticeElement> OptLatticeVal =
543 TheCache.getCachedValueInfo(Val, BB))
544 return OptLatticeVal;
545
546 // We have hit a cycle, assume overdefined.
547 if (!pushBlockValue({ BB, Val }))
548 return ValueLatticeElement::getOverdefined();
549
550 // Yet to be resolved.
551 return None;
552}
553
554static ValueLatticeElement getFromRangeMetadata(Instruction *BBI) {
555 switch (BBI->getOpcode()) {
556 default: break;
557 case Instruction::Load:
558 case Instruction::Call:
559 case Instruction::Invoke:
560 if (MDNode *Ranges = BBI->getMetadata(LLVMContext::MD_range))
561 if (isa<IntegerType>(BBI->getType())) {
562 return ValueLatticeElement::getRange(
563 getConstantRangeFromMetadata(*Ranges));
564 }
565 break;
566 };
567 // Nothing known - will be intersected with other facts
568 return ValueLatticeElement::getOverdefined();
569}
570
571bool LazyValueInfoImpl::solveBlockValue(Value *Val, BasicBlock *BB) {
572 assert(!isa<Constant>(Val) && "Value should not be constant")(static_cast <bool> (!isa<Constant>(Val) &&
"Value should not be constant") ? void (0) : __assert_fail (
"!isa<Constant>(Val) && \"Value should not be constant\""
, "llvm/lib/Analysis/LazyValueInfo.cpp", 572, __extension__ __PRETTY_FUNCTION__
))
;
573 assert(!TheCache.getCachedValueInfo(Val, BB) &&(static_cast <bool> (!TheCache.getCachedValueInfo(Val, BB
) && "Value should not be in cache") ? void (0) : __assert_fail
("!TheCache.getCachedValueInfo(Val, BB) && \"Value should not be in cache\""
, "llvm/lib/Analysis/LazyValueInfo.cpp", 574, __extension__ __PRETTY_FUNCTION__
))
574 "Value should not be in cache")(static_cast <bool> (!TheCache.getCachedValueInfo(Val, BB
) && "Value should not be in cache") ? void (0) : __assert_fail
("!TheCache.getCachedValueInfo(Val, BB) && \"Value should not be in cache\""
, "llvm/lib/Analysis/LazyValueInfo.cpp", 574, __extension__ __PRETTY_FUNCTION__
))
;
575
576 // Hold off inserting this value into the Cache in case we have to return
577 // false and come back later.
578 Optional<ValueLatticeElement> Res = solveBlockValueImpl(Val, BB);
579 if (!Res)
580 // Work pushed, will revisit
581 return false;
582
583 TheCache.insertResult(Val, BB, *Res);
584 return true;
585}
586
587Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueImpl(
588 Value *Val, BasicBlock *BB) {
589 Instruction *BBI = dyn_cast<Instruction>(Val);
590 if (!BBI || BBI->getParent() != BB)
591 return solveBlockValueNonLocal(Val, BB);
592
593 if (PHINode *PN = dyn_cast<PHINode>(BBI))
594 return solveBlockValuePHINode(PN, BB);
595
596 if (auto *SI = dyn_cast<SelectInst>(BBI))
597 return solveBlockValueSelect(SI, BB);
598
599 // If this value is a nonnull pointer, record it's range and bailout. Note
600 // that for all other pointer typed values, we terminate the search at the
601 // definition. We could easily extend this to look through geps, bitcasts,
602 // and the like to prove non-nullness, but it's not clear that's worth it
603 // compile time wise. The context-insensitive value walk done inside
604 // isKnownNonZero gets most of the profitable cases at much less expense.
605 // This does mean that we have a sensitivity to where the defining
606 // instruction is placed, even if it could legally be hoisted much higher.
607 // That is unfortunate.
608 PointerType *PT = dyn_cast<PointerType>(BBI->getType());
609 if (PT && isKnownNonZero(BBI, DL))
610 return ValueLatticeElement::getNot(ConstantPointerNull::get(PT));
611
612 if (BBI->getType()->isIntegerTy()) {
613 if (auto *CI = dyn_cast<CastInst>(BBI))
614 return solveBlockValueCast(CI, BB);
615
616 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI))
617 return solveBlockValueBinaryOp(BO, BB);
618
619 if (auto *EVI = dyn_cast<ExtractValueInst>(BBI))
620 return solveBlockValueExtractValue(EVI, BB);
621
622 if (auto *II = dyn_cast<IntrinsicInst>(BBI))
623 return solveBlockValueIntrinsic(II, BB);
624 }
625
626 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << " compute BB '" <<
BB->getName() << "' - unknown inst def found.\n"; }
} while (false)
627 << "' - unknown inst def found.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << " compute BB '" <<
BB->getName() << "' - unknown inst def found.\n"; }
} while (false)
;
628 return getFromRangeMetadata(BBI);
629}
630
631static void AddNonNullPointer(Value *Ptr, NonNullPointerSet &PtrSet) {
632 // TODO: Use NullPointerIsDefined instead.
633 if (Ptr->getType()->getPointerAddressSpace() == 0)
634 PtrSet.insert(getUnderlyingObject(Ptr));
635}
636
637static void AddNonNullPointersByInstruction(
638 Instruction *I, NonNullPointerSet &PtrSet) {
639 if (LoadInst *L = dyn_cast<LoadInst>(I)) {
640 AddNonNullPointer(L->getPointerOperand(), PtrSet);
641 } else if (StoreInst *S = dyn_cast<StoreInst>(I)) {
642 AddNonNullPointer(S->getPointerOperand(), PtrSet);
643 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
644 if (MI->isVolatile()) return;
645
646 // FIXME: check whether it has a valuerange that excludes zero?
647 ConstantInt *Len = dyn_cast<ConstantInt>(MI->getLength());
648 if (!Len || Len->isZero()) return;
649
650 AddNonNullPointer(MI->getRawDest(), PtrSet);
651 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
652 AddNonNullPointer(MTI->getRawSource(), PtrSet);
653 }
654}
655
656bool LazyValueInfoImpl::isNonNullAtEndOfBlock(Value *Val, BasicBlock *BB) {
657 if (NullPointerIsDefined(BB->getParent(),
658 Val->getType()->getPointerAddressSpace()))
659 return false;
660
661 Val = Val->stripInBoundsOffsets();
662 return TheCache.isNonNullAtEndOfBlock(Val, BB, [](BasicBlock *BB) {
663 NonNullPointerSet NonNullPointers;
664 for (Instruction &I : *BB)
665 AddNonNullPointersByInstruction(&I, NonNullPointers);
666 return NonNullPointers;
667 });
668}
669
670Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueNonLocal(
671 Value *Val, BasicBlock *BB) {
672 ValueLatticeElement Result; // Start Undefined.
673
674 // If this is the entry block, we must be asking about an argument. The
675 // value is overdefined.
676 if (BB->isEntryBlock()) {
677 assert(isa<Argument>(Val) && "Unknown live-in to the entry block")(static_cast <bool> (isa<Argument>(Val) &&
"Unknown live-in to the entry block") ? void (0) : __assert_fail
("isa<Argument>(Val) && \"Unknown live-in to the entry block\""
, "llvm/lib/Analysis/LazyValueInfo.cpp", 677, __extension__ __PRETTY_FUNCTION__
))
;
678 return ValueLatticeElement::getOverdefined();
679 }
680
681 // Loop over all of our predecessors, merging what we know from them into
682 // result. If we encounter an unexplored predecessor, we eagerly explore it
683 // in a depth first manner. In practice, this has the effect of discovering
684 // paths we can't analyze eagerly without spending compile times analyzing
685 // other paths. This heuristic benefits from the fact that predecessors are
686 // frequently arranged such that dominating ones come first and we quickly
687 // find a path to function entry. TODO: We should consider explicitly
688 // canonicalizing to make this true rather than relying on this happy
689 // accident.
690 for (BasicBlock *Pred : predecessors(BB)) {
691 Optional<ValueLatticeElement> EdgeResult = getEdgeValue(Val, Pred, BB);
692 if (!EdgeResult)
693 // Explore that input, then return here
694 return None;
695
696 Result.mergeIn(*EdgeResult);
697
698 // If we hit overdefined, exit early. The BlockVals entry is already set
699 // to overdefined.
700 if (Result.isOverdefined()) {
701 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << " compute BB '" <<
BB->getName() << "' - overdefined because of pred (non local).\n"
; } } while (false)
702 << "' - overdefined because of pred (non local).\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << " compute BB '" <<
BB->getName() << "' - overdefined because of pred (non local).\n"
; } } while (false)
;
703 return Result;
704 }
705 }
706
707 // Return the merged value, which is more precise than 'overdefined'.
708 assert(!Result.isOverdefined())(static_cast <bool> (!Result.isOverdefined()) ? void (0
) : __assert_fail ("!Result.isOverdefined()", "llvm/lib/Analysis/LazyValueInfo.cpp"
, 708, __extension__ __PRETTY_FUNCTION__))
;
709 return Result;
710}
711
712Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValuePHINode(
713 PHINode *PN, BasicBlock *BB) {
714 ValueLatticeElement Result; // Start Undefined.
715
716 // Loop over all of our predecessors, merging what we know from them into
717 // result. See the comment about the chosen traversal order in
718 // solveBlockValueNonLocal; the same reasoning applies here.
719 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
720 BasicBlock *PhiBB = PN->getIncomingBlock(i);
721 Value *PhiVal = PN->getIncomingValue(i);
722 // Note that we can provide PN as the context value to getEdgeValue, even
723 // though the results will be cached, because PN is the value being used as
724 // the cache key in the caller.
725 Optional<ValueLatticeElement> EdgeResult =
726 getEdgeValue(PhiVal, PhiBB, BB, PN);
727 if (!EdgeResult)
728 // Explore that input, then return here
729 return None;
730
731 Result.mergeIn(*EdgeResult);
732
733 // If we hit overdefined, exit early. The BlockVals entry is already set
734 // to overdefined.
735 if (Result.isOverdefined()) {
736 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << " compute BB '" <<
BB->getName() << "' - overdefined because of pred (local).\n"
; } } while (false)
737 << "' - overdefined because of pred (local).\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << " compute BB '" <<
BB->getName() << "' - overdefined because of pred (local).\n"
; } } while (false)
;
738
739 return Result;
740 }
741 }
742
743 // Return the merged value, which is more precise than 'overdefined'.
744 assert(!Result.isOverdefined() && "Possible PHI in entry block?")(static_cast <bool> (!Result.isOverdefined() &&
"Possible PHI in entry block?") ? void (0) : __assert_fail (
"!Result.isOverdefined() && \"Possible PHI in entry block?\""
, "llvm/lib/Analysis/LazyValueInfo.cpp", 744, __extension__ __PRETTY_FUNCTION__
))
;
745 return Result;
746}
747
748static ValueLatticeElement getValueFromCondition(Value *Val, Value *Cond,
749 bool isTrueDest = true);
750
751// If we can determine a constraint on the value given conditions assumed by
752// the program, intersect those constraints with BBLV
753void LazyValueInfoImpl::intersectAssumeOrGuardBlockValueConstantRange(
754 Value *Val, ValueLatticeElement &BBLV, Instruction *BBI) {
755 BBI = BBI ? BBI : dyn_cast<Instruction>(Val);
756 if (!BBI)
757 return;
758
759 BasicBlock *BB = BBI->getParent();
760 for (auto &AssumeVH : AC->assumptionsFor(Val)) {
761 if (!AssumeVH)
762 continue;
763
764 // Only check assumes in the block of the context instruction. Other
765 // assumes will have already been taken into account when the value was
766 // propagated from predecessor blocks.
767 auto *I = cast<CallInst>(AssumeVH);
768 if (I->getParent() != BB || !isValidAssumeForContext(I, BBI))
769 continue;
770
771 BBLV = intersect(BBLV, getValueFromCondition(Val, I->getArgOperand(0)));
772 }
773
774 // If guards are not used in the module, don't spend time looking for them
775 if (GuardDecl && !GuardDecl->use_empty() &&
776 BBI->getIterator() != BB->begin()) {
777 for (Instruction &I : make_range(std::next(BBI->getIterator().getReverse()),
778 BB->rend())) {
779 Value *Cond = nullptr;
780 if (match(&I, m_Intrinsic<Intrinsic::experimental_guard>(m_Value(Cond))))
781 BBLV = intersect(BBLV, getValueFromCondition(Val, Cond));
782 }
783 }
784
785 if (BBLV.isOverdefined()) {
786 // Check whether we're checking at the terminator, and the pointer has
787 // been dereferenced in this block.
788 PointerType *PTy = dyn_cast<PointerType>(Val->getType());
789 if (PTy && BB->getTerminator() == BBI &&
790 isNonNullAtEndOfBlock(Val, BB))
791 BBLV = ValueLatticeElement::getNot(ConstantPointerNull::get(PTy));
792 }
793}
794
795Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueSelect(
796 SelectInst *SI, BasicBlock *BB) {
797 // Recurse on our inputs if needed
798 Optional<ValueLatticeElement> OptTrueVal =
799 getBlockValue(SI->getTrueValue(), BB);
800 if (!OptTrueVal)
801 return None;
802 ValueLatticeElement &TrueVal = *OptTrueVal;
803
804 Optional<ValueLatticeElement> OptFalseVal =
805 getBlockValue(SI->getFalseValue(), BB);
806 if (!OptFalseVal)
807 return None;
808 ValueLatticeElement &FalseVal = *OptFalseVal;
809
810 if (TrueVal.isConstantRange() && FalseVal.isConstantRange()) {
811 const ConstantRange &TrueCR = TrueVal.getConstantRange();
812 const ConstantRange &FalseCR = FalseVal.getConstantRange();
813 Value *LHS = nullptr;
814 Value *RHS = nullptr;
815 SelectPatternResult SPR = matchSelectPattern(SI, LHS, RHS);
816 // Is this a min specifically of our two inputs? (Avoid the risk of
817 // ValueTracking getting smarter looking back past our immediate inputs.)
818 if (SelectPatternResult::isMinOrMax(SPR.Flavor) &&
819 LHS == SI->getTrueValue() && RHS == SI->getFalseValue()) {
820 ConstantRange ResultCR = [&]() {
821 switch (SPR.Flavor) {
822 default:
823 llvm_unreachable("unexpected minmax type!")::llvm::llvm_unreachable_internal("unexpected minmax type!", "llvm/lib/Analysis/LazyValueInfo.cpp"
, 823)
;
824 case SPF_SMIN: /// Signed minimum
825 return TrueCR.smin(FalseCR);
826 case SPF_UMIN: /// Unsigned minimum
827 return TrueCR.umin(FalseCR);
828 case SPF_SMAX: /// Signed maximum
829 return TrueCR.smax(FalseCR);
830 case SPF_UMAX: /// Unsigned maximum
831 return TrueCR.umax(FalseCR);
832 };
833 }();
834 return ValueLatticeElement::getRange(
835 ResultCR, TrueVal.isConstantRangeIncludingUndef() ||
836 FalseVal.isConstantRangeIncludingUndef());
837 }
838
839 if (SPR.Flavor == SPF_ABS) {
840 if (LHS == SI->getTrueValue())
841 return ValueLatticeElement::getRange(
842 TrueCR.abs(), TrueVal.isConstantRangeIncludingUndef());
843 if (LHS == SI->getFalseValue())
844 return ValueLatticeElement::getRange(
845 FalseCR.abs(), FalseVal.isConstantRangeIncludingUndef());
846 }
847
848 if (SPR.Flavor == SPF_NABS) {
849 ConstantRange Zero(APInt::getZero(TrueCR.getBitWidth()));
850 if (LHS == SI->getTrueValue())
851 return ValueLatticeElement::getRange(
852 Zero.sub(TrueCR.abs()), FalseVal.isConstantRangeIncludingUndef());
853 if (LHS == SI->getFalseValue())
854 return ValueLatticeElement::getRange(
855 Zero.sub(FalseCR.abs()), FalseVal.isConstantRangeIncludingUndef());
856 }
857 }
858
859 // Can we constrain the facts about the true and false values by using the
860 // condition itself? This shows up with idioms like e.g. select(a > 5, a, 5).
861 // TODO: We could potentially refine an overdefined true value above.
862 Value *Cond = SI->getCondition();
863 TrueVal = intersect(TrueVal,
864 getValueFromCondition(SI->getTrueValue(), Cond, true));
865 FalseVal = intersect(FalseVal,
866 getValueFromCondition(SI->getFalseValue(), Cond, false));
867
868 ValueLatticeElement Result = TrueVal;
869 Result.mergeIn(FalseVal);
870 return Result;
871}
872
873Optional<ConstantRange> LazyValueInfoImpl::getRangeFor(Value *V,
874 Instruction *CxtI,
875 BasicBlock *BB) {
876 Optional<ValueLatticeElement> OptVal = getBlockValue(V, BB);
877 if (!OptVal)
878 return None;
879
880 ValueLatticeElement &Val = *OptVal;
881 intersectAssumeOrGuardBlockValueConstantRange(V, Val, CxtI);
882 if (Val.isConstantRange())
883 return Val.getConstantRange();
884
885 const unsigned OperandBitWidth = DL.getTypeSizeInBits(V->getType());
886 return ConstantRange::getFull(OperandBitWidth);
887}
888
889Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueCast(
890 CastInst *CI, BasicBlock *BB) {
891 // Without knowing how wide the input is, we can't analyze it in any useful
892 // way.
893 if (!CI->getOperand(0)->getType()->isSized())
894 return ValueLatticeElement::getOverdefined();
895
896 // Filter out casts we don't know how to reason about before attempting to
897 // recurse on our operand. This can cut a long search short if we know we're
898 // not going to be able to get any useful information anways.
899 switch (CI->getOpcode()) {
900 case Instruction::Trunc:
901 case Instruction::SExt:
902 case Instruction::ZExt:
903 case Instruction::BitCast:
904 break;
905 default:
906 // Unhandled instructions are overdefined.
907 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << " compute BB '" <<
BB->getName() << "' - overdefined (unknown cast).\n"
; } } while (false)
908 << "' - overdefined (unknown cast).\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << " compute BB '" <<
BB->getName() << "' - overdefined (unknown cast).\n"
; } } while (false)
;
909 return ValueLatticeElement::getOverdefined();
910 }
911
912 // Figure out the range of the LHS. If that fails, we still apply the
913 // transfer rule on the full set since we may be able to locally infer
914 // interesting facts.
915 Optional<ConstantRange> LHSRes = getRangeFor(CI->getOperand(0), CI, BB);
916 if (!LHSRes.hasValue())
917 // More work to do before applying this transfer rule.
918 return None;
919 const ConstantRange &LHSRange = LHSRes.getValue();
920
921 const unsigned ResultBitWidth = CI->getType()->getIntegerBitWidth();
922
923 // NOTE: We're currently limited by the set of operations that ConstantRange
924 // can evaluate symbolically. Enhancing that set will allows us to analyze
925 // more definitions.
926 return ValueLatticeElement::getRange(LHSRange.castOp(CI->getOpcode(),
927 ResultBitWidth));
928}
929
930Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueBinaryOpImpl(
931 Instruction *I, BasicBlock *BB,
932 std::function<ConstantRange(const ConstantRange &,
933 const ConstantRange &)> OpFn) {
934 // Figure out the ranges of the operands. If that fails, use a
935 // conservative range, but apply the transfer rule anyways. This
936 // lets us pick up facts from expressions like "and i32 (call i32
937 // @foo()), 32"
938 Optional<ConstantRange> LHSRes = getRangeFor(I->getOperand(0), I, BB);
939 Optional<ConstantRange> RHSRes = getRangeFor(I->getOperand(1), I, BB);
940 if (!LHSRes.hasValue() || !RHSRes.hasValue())
941 // More work to do before applying this transfer rule.
942 return None;
943
944 const ConstantRange &LHSRange = LHSRes.getValue();
945 const ConstantRange &RHSRange = RHSRes.getValue();
946 return ValueLatticeElement::getRange(OpFn(LHSRange, RHSRange));
947}
948
949Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueBinaryOp(
950 BinaryOperator *BO, BasicBlock *BB) {
951 assert(BO->getOperand(0)->getType()->isSized() &&(static_cast <bool> (BO->getOperand(0)->getType()
->isSized() && "all operands to binary operators are sized"
) ? void (0) : __assert_fail ("BO->getOperand(0)->getType()->isSized() && \"all operands to binary operators are sized\""
, "llvm/lib/Analysis/LazyValueInfo.cpp", 952, __extension__ __PRETTY_FUNCTION__
))
952 "all operands to binary operators are sized")(static_cast <bool> (BO->getOperand(0)->getType()
->isSized() && "all operands to binary operators are sized"
) ? void (0) : __assert_fail ("BO->getOperand(0)->getType()->isSized() && \"all operands to binary operators are sized\""
, "llvm/lib/Analysis/LazyValueInfo.cpp", 952, __extension__ __PRETTY_FUNCTION__
))
;
953 if (BO->getOpcode() == Instruction::Xor) {
954 // Xor is the only operation not supported by ConstantRange::binaryOp().
955 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << " compute BB '" <<
BB->getName() << "' - overdefined (unknown binary operator).\n"
; } } while (false)
956 << "' - overdefined (unknown binary operator).\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << " compute BB '" <<
BB->getName() << "' - overdefined (unknown binary operator).\n"
; } } while (false)
;
957 return ValueLatticeElement::getOverdefined();
958 }
959
960 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(BO)) {
961 unsigned NoWrapKind = 0;
962 if (OBO->hasNoUnsignedWrap())
963 NoWrapKind |= OverflowingBinaryOperator::NoUnsignedWrap;
964 if (OBO->hasNoSignedWrap())
965 NoWrapKind |= OverflowingBinaryOperator::NoSignedWrap;
966
967 return solveBlockValueBinaryOpImpl(
968 BO, BB,
969 [BO, NoWrapKind](const ConstantRange &CR1, const ConstantRange &CR2) {
970 return CR1.overflowingBinaryOp(BO->getOpcode(), CR2, NoWrapKind);
971 });
972 }
973
974 return solveBlockValueBinaryOpImpl(
975 BO, BB, [BO](const ConstantRange &CR1, const ConstantRange &CR2) {
976 return CR1.binaryOp(BO->getOpcode(), CR2);
977 });
978}
979
980Optional<ValueLatticeElement>
981LazyValueInfoImpl::solveBlockValueOverflowIntrinsic(WithOverflowInst *WO,
982 BasicBlock *BB) {
983 return solveBlockValueBinaryOpImpl(
984 WO, BB, [WO](const ConstantRange &CR1, const ConstantRange &CR2) {
985 return CR1.binaryOp(WO->getBinaryOp(), CR2);
986 });
987}
988
989Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueIntrinsic(
990 IntrinsicInst *II, BasicBlock *BB) {
991 if (!ConstantRange::isIntrinsicSupported(II->getIntrinsicID())) {
992 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << " compute BB '" <<
BB->getName() << "' - unknown intrinsic.\n"; } } while
(false)
993 << "' - unknown intrinsic.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << " compute BB '" <<
BB->getName() << "' - unknown intrinsic.\n"; } } while
(false)
;
994 return getFromRangeMetadata(II);
995 }
996
997 SmallVector<ConstantRange, 2> OpRanges;
998 for (Value *Op : II->args()) {
999 Optional<ConstantRange> Range = getRangeFor(Op, II, BB);
1000 if (!Range)
1001 return None;
1002 OpRanges.push_back(*Range);
1003 }
1004
1005 return ValueLatticeElement::getRange(
1006 ConstantRange::intrinsic(II->getIntrinsicID(), OpRanges));
1007}
1008
1009Optional<ValueLatticeElement> LazyValueInfoImpl::solveBlockValueExtractValue(
1010 ExtractValueInst *EVI, BasicBlock *BB) {
1011 if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()))
1012 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 0)
1013 return solveBlockValueOverflowIntrinsic(WO, BB);
1014
1015 // Handle extractvalue of insertvalue to allow further simplification
1016 // based on replaced with.overflow intrinsics.
1017 if (Value *V = SimplifyExtractValueInst(
1018 EVI->getAggregateOperand(), EVI->getIndices(),
1019 EVI->getModule()->getDataLayout()))
1020 return getBlockValue(V, BB);
1021
1022 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << " compute BB '" <<
BB->getName() << "' - overdefined (unknown extractvalue).\n"
; } } while (false)
1023 << "' - overdefined (unknown extractvalue).\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << " compute BB '" <<
BB->getName() << "' - overdefined (unknown extractvalue).\n"
; } } while (false)
;
1024 return ValueLatticeElement::getOverdefined();
1025}
1026
1027static bool matchICmpOperand(APInt &Offset, Value *LHS, Value *Val,
1028 ICmpInst::Predicate Pred) {
1029 if (LHS == Val)
1030 return true;
1031
1032 // Handle range checking idiom produced by InstCombine. We will subtract the
1033 // offset from the allowed range for RHS in this case.
1034 const APInt *C;
1035 if (match(LHS, m_Add(m_Specific(Val), m_APInt(C)))) {
1036 Offset = *C;
1037 return true;
1038 }
1039
1040 // Handle the symmetric case. This appears in saturation patterns like
1041 // (x == 16) ? 16 : (x + 1).
1042 if (match(Val, m_Add(m_Specific(LHS), m_APInt(C)))) {
1043 Offset = -*C;
1044 return true;
1045 }
1046
1047 // If (x | y) < C, then (x < C) && (y < C).
1048 if (match(LHS, m_c_Or(m_Specific(Val), m_Value())) &&
1049 (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE))
1050 return true;
1051
1052 // If (x & y) > C, then (x > C) && (y > C).
1053 if (match(LHS, m_c_And(m_Specific(Val), m_Value())) &&
1054 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE))
1055 return true;
1056
1057 return false;
1058}
1059
1060/// Get value range for a "(Val + Offset) Pred RHS" condition.
1061static ValueLatticeElement getValueFromSimpleICmpCondition(
1062 CmpInst::Predicate Pred, Value *RHS, const APInt &Offset) {
1063 ConstantRange RHSRange(RHS->getType()->getIntegerBitWidth(),
1064 /*isFullSet=*/true);
1065 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS))
1066 RHSRange = ConstantRange(CI->getValue());
1067 else if (Instruction *I = dyn_cast<Instruction>(RHS))
1068 if (auto *Ranges = I->getMetadata(LLVMContext::MD_range))
1069 RHSRange = getConstantRangeFromMetadata(*Ranges);
1070
1071 ConstantRange TrueValues =
1072 ConstantRange::makeAllowedICmpRegion(Pred, RHSRange);
1073 return ValueLatticeElement::getRange(TrueValues.subtract(Offset));
1074}
1075
1076static ValueLatticeElement getValueFromICmpCondition(Value *Val, ICmpInst *ICI,
1077 bool isTrueDest) {
1078 Value *LHS = ICI->getOperand(0);
1079 Value *RHS = ICI->getOperand(1);
1080
1081 // Get the predicate that must hold along the considered edge.
1082 CmpInst::Predicate EdgePred =
1083 isTrueDest ? ICI->getPredicate() : ICI->getInversePredicate();
7
Assuming 'isTrueDest' is true
8
'?' condition is true
1084
1085 if (isa<Constant>(RHS)) {
9
Assuming 'RHS' is a 'Constant'
1086 if (ICI->isEquality() && LHS == Val) {
10
Calling 'ICmpInst::isEquality'
17
Returning from 'ICmpInst::isEquality'
18
Assuming 'LHS' is equal to 'Val'
19
Taking true branch
1087 if (EdgePred
19.1
'EdgePred' is not equal to ICMP_EQ
19.1
'EdgePred' is not equal to ICMP_EQ
== ICmpInst::ICMP_EQ)
20
Taking false branch
1088 return ValueLatticeElement::get(cast<Constant>(RHS));
1089 else if (!isa<UndefValue>(RHS))
21
Assuming 'RHS' is a 'UndefValue'
22
Taking false branch
1090 return ValueLatticeElement::getNot(cast<Constant>(RHS));
1091 }
1092 }
1093
1094 Type *Ty = Val->getType();
23
Called C++ object pointer is null
1095 if (!Ty->isIntegerTy())
1096 return ValueLatticeElement::getOverdefined();
1097
1098 unsigned BitWidth = Ty->getScalarSizeInBits();
1099 APInt Offset(BitWidth, 0);
1100 if (matchICmpOperand(Offset, LHS, Val, EdgePred))
1101 return getValueFromSimpleICmpCondition(EdgePred, RHS, Offset);
1102
1103 CmpInst::Predicate SwappedPred = CmpInst::getSwappedPredicate(EdgePred);
1104 if (matchICmpOperand(Offset, RHS, Val, SwappedPred))
1105 return getValueFromSimpleICmpCondition(SwappedPred, LHS, Offset);
1106
1107 const APInt *Mask, *C;
1108 if (match(LHS, m_And(m_Specific(Val), m_APInt(Mask))) &&
1109 match(RHS, m_APInt(C))) {
1110 // If (Val & Mask) == C then all the masked bits are known and we can
1111 // compute a value range based on that.
1112 if (EdgePred == ICmpInst::ICMP_EQ) {
1113 KnownBits Known;
1114 Known.Zero = ~*C & *Mask;
1115 Known.One = *C & *Mask;
1116 return ValueLatticeElement::getRange(
1117 ConstantRange::fromKnownBits(Known, /*IsSigned*/ false));
1118 }
1119 // If (Val & Mask) != 0 then the value must be larger than the lowest set
1120 // bit of Mask.
1121 if (EdgePred == ICmpInst::ICMP_NE && !Mask->isZero() && C->isZero()) {
1122 return ValueLatticeElement::getRange(ConstantRange::getNonEmpty(
1123 APInt::getOneBitSet(BitWidth, Mask->countTrailingZeros()),
1124 APInt::getZero(BitWidth)));
1125 }
1126 }
1127
1128 // If (X urem Modulus) >= C, then X >= C.
1129 // TODO: An upper bound could be computed as well.
1130 if (match(LHS, m_URem(m_Specific(Val), m_Value())) &&
1131 match(RHS, m_APInt(C))) {
1132 // Use the icmp region so we don't have to deal with different predicates.
1133 ConstantRange CR = ConstantRange::makeExactICmpRegion(EdgePred, *C);
1134 if (!CR.isEmptySet())
1135 return ValueLatticeElement::getRange(ConstantRange::getNonEmpty(
1136 CR.getUnsignedMin(), APInt(BitWidth, 0)));
1137 }
1138
1139 return ValueLatticeElement::getOverdefined();
1140}
1141
1142// Handle conditions of the form
1143// extractvalue(op.with.overflow(%x, C), 1).
1144static ValueLatticeElement getValueFromOverflowCondition(
1145 Value *Val, WithOverflowInst *WO, bool IsTrueDest) {
1146 // TODO: This only works with a constant RHS for now. We could also compute
1147 // the range of the RHS, but this doesn't fit into the current structure of
1148 // the edge value calculation.
1149 const APInt *C;
1150 if (WO->getLHS() != Val || !match(WO->getRHS(), m_APInt(C)))
1151 return ValueLatticeElement::getOverdefined();
1152
1153 // Calculate the possible values of %x for which no overflow occurs.
1154 ConstantRange NWR = ConstantRange::makeExactNoWrapRegion(
1155 WO->getBinaryOp(), *C, WO->getNoWrapKind());
1156
1157 // If overflow is false, %x is constrained to NWR. If overflow is true, %x is
1158 // constrained to it's inverse (all values that might cause overflow).
1159 if (IsTrueDest)
1160 NWR = NWR.inverse();
1161 return ValueLatticeElement::getRange(NWR);
1162}
1163
1164static Optional<ValueLatticeElement>
1165getValueFromConditionImpl(Value *Val, Value *Cond, bool isTrueDest,
1166 bool isRevisit,
1167 SmallDenseMap<Value *, ValueLatticeElement> &Visited,
1168 SmallVectorImpl<Value *> &Worklist) {
1169 if (!isRevisit) {
1
Assuming 'isRevisit' is false
2
Taking true branch
1170 if (ICmpInst *ICI
3.1
'ICI' is non-null
3.1
'ICI' is non-null
= dyn_cast<ICmpInst>(Cond))
3
Assuming 'Cond' is a 'ICmpInst'
4
Taking true branch
1171 return getValueFromICmpCondition(Val, ICI, isTrueDest);
5
Passing value via 1st parameter 'Val'
6
Calling 'getValueFromICmpCondition'
1172
1173 if (auto *EVI = dyn_cast<ExtractValueInst>(Cond))
1174 if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()))
1175 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 1)
1176 return getValueFromOverflowCondition(Val, WO, isTrueDest);
1177 }
1178
1179 Value *L, *R;
1180 bool IsAnd;
1181 if (match(Cond, m_LogicalAnd(m_Value(L), m_Value(R))))
1182 IsAnd = true;
1183 else if (match(Cond, m_LogicalOr(m_Value(L), m_Value(R))))
1184 IsAnd = false;
1185 else
1186 return ValueLatticeElement::getOverdefined();
1187
1188 auto LV = Visited.find(L);
1189 auto RV = Visited.find(R);
1190
1191 // if (L && R) -> intersect L and R
1192 // if (!(L || R)) -> intersect L and R
1193 // if (L || R) -> union L and R
1194 // if (!(L && R)) -> union L and R
1195 if ((isTrueDest ^ IsAnd) && (LV != Visited.end())) {
1196 ValueLatticeElement V = LV->second;
1197 if (V.isOverdefined())
1198 return V;
1199 if (RV != Visited.end()) {
1200 V.mergeIn(RV->second);
1201 return V;
1202 }
1203 }
1204
1205 if (LV == Visited.end() || RV == Visited.end()) {
1206 assert(!isRevisit)(static_cast <bool> (!isRevisit) ? void (0) : __assert_fail
("!isRevisit", "llvm/lib/Analysis/LazyValueInfo.cpp", 1206, __extension__
__PRETTY_FUNCTION__))
;
1207 if (LV == Visited.end())
1208 Worklist.push_back(L);
1209 if (RV == Visited.end())
1210 Worklist.push_back(R);
1211 return None;
1212 }
1213
1214 return intersect(LV->second, RV->second);
1215}
1216
1217ValueLatticeElement getValueFromCondition(Value *Val, Value *Cond,
1218 bool isTrueDest) {
1219 assert(Cond && "precondition")(static_cast <bool> (Cond && "precondition") ? void
(0) : __assert_fail ("Cond && \"precondition\"", "llvm/lib/Analysis/LazyValueInfo.cpp"
, 1219, __extension__ __PRETTY_FUNCTION__))
;
1220 SmallDenseMap<Value*, ValueLatticeElement> Visited;
1221 SmallVector<Value *> Worklist;
1222
1223 Worklist.push_back(Cond);
1224 do {
1225 Value *CurrentCond = Worklist.back();
1226 // Insert an Overdefined placeholder into the set to prevent
1227 // infinite recursion if there exists IRs that use not
1228 // dominated by its def as in this example:
1229 // "%tmp3 = or i1 undef, %tmp4"
1230 // "%tmp4 = or i1 undef, %tmp3"
1231 auto Iter =
1232 Visited.try_emplace(CurrentCond, ValueLatticeElement::getOverdefined());
1233 bool isRevisit = !Iter.second;
1234 Optional<ValueLatticeElement> Result = getValueFromConditionImpl(
1235 Val, CurrentCond, isTrueDest, isRevisit, Visited, Worklist);
1236 if (Result) {
1237 Visited[CurrentCond] = *Result;
1238 Worklist.pop_back();
1239 }
1240 } while (!Worklist.empty());
1241
1242 auto Result = Visited.find(Cond);
1243 assert(Result != Visited.end())(static_cast <bool> (Result != Visited.end()) ? void (0
) : __assert_fail ("Result != Visited.end()", "llvm/lib/Analysis/LazyValueInfo.cpp"
, 1243, __extension__ __PRETTY_FUNCTION__))
;
1244 return Result->second;
1245}
1246
1247// Return true if Usr has Op as an operand, otherwise false.
1248static bool usesOperand(User *Usr, Value *Op) {
1249 return is_contained(Usr->operands(), Op);
1250}
1251
1252// Return true if the instruction type of Val is supported by
1253// constantFoldUser(). Currently CastInst, BinaryOperator and FreezeInst only.
1254// Call this before calling constantFoldUser() to find out if it's even worth
1255// attempting to call it.
1256static bool isOperationFoldable(User *Usr) {
1257 return isa<CastInst>(Usr) || isa<BinaryOperator>(Usr) || isa<FreezeInst>(Usr);
1258}
1259
1260// Check if Usr can be simplified to an integer constant when the value of one
1261// of its operands Op is an integer constant OpConstVal. If so, return it as an
1262// lattice value range with a single element or otherwise return an overdefined
1263// lattice value.
1264static ValueLatticeElement constantFoldUser(User *Usr, Value *Op,
1265 const APInt &OpConstVal,
1266 const DataLayout &DL) {
1267 assert(isOperationFoldable(Usr) && "Precondition")(static_cast <bool> (isOperationFoldable(Usr) &&
"Precondition") ? void (0) : __assert_fail ("isOperationFoldable(Usr) && \"Precondition\""
, "llvm/lib/Analysis/LazyValueInfo.cpp", 1267, __extension__ __PRETTY_FUNCTION__
))
;
1268 Constant* OpConst = Constant::getIntegerValue(Op->getType(), OpConstVal);
1269 // Check if Usr can be simplified to a constant.
1270 if (auto *CI = dyn_cast<CastInst>(Usr)) {
1271 assert(CI->getOperand(0) == Op && "Operand 0 isn't Op")(static_cast <bool> (CI->getOperand(0) == Op &&
"Operand 0 isn't Op") ? void (0) : __assert_fail ("CI->getOperand(0) == Op && \"Operand 0 isn't Op\""
, "llvm/lib/Analysis/LazyValueInfo.cpp", 1271, __extension__ __PRETTY_FUNCTION__
))
;
1272 if (auto *C = dyn_cast_or_null<ConstantInt>(
1273 SimplifyCastInst(CI->getOpcode(), OpConst,
1274 CI->getDestTy(), DL))) {
1275 return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
1276 }
1277 } else if (auto *BO = dyn_cast<BinaryOperator>(Usr)) {
1278 bool Op0Match = BO->getOperand(0) == Op;
1279 bool Op1Match = BO->getOperand(1) == Op;
1280 assert((Op0Match || Op1Match) &&(static_cast <bool> ((Op0Match || Op1Match) && "Operand 0 nor Operand 1 isn't a match"
) ? void (0) : __assert_fail ("(Op0Match || Op1Match) && \"Operand 0 nor Operand 1 isn't a match\""
, "llvm/lib/Analysis/LazyValueInfo.cpp", 1281, __extension__ __PRETTY_FUNCTION__
))
1281 "Operand 0 nor Operand 1 isn't a match")(static_cast <bool> ((Op0Match || Op1Match) && "Operand 0 nor Operand 1 isn't a match"
) ? void (0) : __assert_fail ("(Op0Match || Op1Match) && \"Operand 0 nor Operand 1 isn't a match\""
, "llvm/lib/Analysis/LazyValueInfo.cpp", 1281, __extension__ __PRETTY_FUNCTION__
))
;
1282 Value *LHS = Op0Match ? OpConst : BO->getOperand(0);
1283 Value *RHS = Op1Match ? OpConst : BO->getOperand(1);
1284 if (auto *C = dyn_cast_or_null<ConstantInt>(
1285 SimplifyBinOp(BO->getOpcode(), LHS, RHS, DL))) {
1286 return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
1287 }
1288 } else if (isa<FreezeInst>(Usr)) {
1289 assert(cast<FreezeInst>(Usr)->getOperand(0) == Op && "Operand 0 isn't Op")(static_cast <bool> (cast<FreezeInst>(Usr)->getOperand
(0) == Op && "Operand 0 isn't Op") ? void (0) : __assert_fail
("cast<FreezeInst>(Usr)->getOperand(0) == Op && \"Operand 0 isn't Op\""
, "llvm/lib/Analysis/LazyValueInfo.cpp", 1289, __extension__ __PRETTY_FUNCTION__
))
;
1290 return ValueLatticeElement::getRange(ConstantRange(OpConstVal));
1291 }
1292 return ValueLatticeElement::getOverdefined();
1293}
1294
1295/// Compute the value of Val on the edge BBFrom -> BBTo. Returns false if
1296/// Val is not constrained on the edge. Result is unspecified if return value
1297/// is false.
1298static Optional<ValueLatticeElement> getEdgeValueLocal(Value *Val,
1299 BasicBlock *BBFrom,
1300 BasicBlock *BBTo) {
1301 // TODO: Handle more complex conditionals. If (v == 0 || v2 < 1) is false, we
1302 // know that v != 0.
1303 if (BranchInst *BI = dyn_cast<BranchInst>(BBFrom->getTerminator())) {
1304 // If this is a conditional branch and only one successor goes to BBTo, then
1305 // we may be able to infer something from the condition.
1306 if (BI->isConditional() &&
1307 BI->getSuccessor(0) != BI->getSuccessor(1)) {
1308 bool isTrueDest = BI->getSuccessor(0) == BBTo;
1309 assert(BI->getSuccessor(!isTrueDest) == BBTo &&(static_cast <bool> (BI->getSuccessor(!isTrueDest) ==
BBTo && "BBTo isn't a successor of BBFrom") ? void (
0) : __assert_fail ("BI->getSuccessor(!isTrueDest) == BBTo && \"BBTo isn't a successor of BBFrom\""
, "llvm/lib/Analysis/LazyValueInfo.cpp", 1310, __extension__ __PRETTY_FUNCTION__
))
1310 "BBTo isn't a successor of BBFrom")(static_cast <bool> (BI->getSuccessor(!isTrueDest) ==
BBTo && "BBTo isn't a successor of BBFrom") ? void (
0) : __assert_fail ("BI->getSuccessor(!isTrueDest) == BBTo && \"BBTo isn't a successor of BBFrom\""
, "llvm/lib/Analysis/LazyValueInfo.cpp", 1310, __extension__ __PRETTY_FUNCTION__
))
;
1311 Value *Condition = BI->getCondition();
1312
1313 // If V is the condition of the branch itself, then we know exactly what
1314 // it is.
1315 if (Condition == Val)
1316 return ValueLatticeElement::get(ConstantInt::get(
1317 Type::getInt1Ty(Val->getContext()), isTrueDest));
1318
1319 // If the condition of the branch is an equality comparison, we may be
1320 // able to infer the value.
1321 ValueLatticeElement Result = getValueFromCondition(Val, Condition,
1322 isTrueDest);
1323 if (!Result.isOverdefined())
1324 return Result;
1325
1326 if (User *Usr = dyn_cast<User>(Val)) {
1327 assert(Result.isOverdefined() && "Result isn't overdefined")(static_cast <bool> (Result.isOverdefined() && "Result isn't overdefined"
) ? void (0) : __assert_fail ("Result.isOverdefined() && \"Result isn't overdefined\""
, "llvm/lib/Analysis/LazyValueInfo.cpp", 1327, __extension__ __PRETTY_FUNCTION__
))
;
1328 // Check with isOperationFoldable() first to avoid linearly iterating
1329 // over the operands unnecessarily which can be expensive for
1330 // instructions with many operands.
1331 if (isa<IntegerType>(Usr->getType()) && isOperationFoldable(Usr)) {
1332 const DataLayout &DL = BBTo->getModule()->getDataLayout();
1333 if (usesOperand(Usr, Condition)) {
1334 // If Val has Condition as an operand and Val can be folded into a
1335 // constant with either Condition == true or Condition == false,
1336 // propagate the constant.
1337 // eg.
1338 // ; %Val is true on the edge to %then.
1339 // %Val = and i1 %Condition, true.
1340 // br %Condition, label %then, label %else
1341 APInt ConditionVal(1, isTrueDest ? 1 : 0);
1342 Result = constantFoldUser(Usr, Condition, ConditionVal, DL);
1343 } else {
1344 // If one of Val's operand has an inferred value, we may be able to
1345 // infer the value of Val.
1346 // eg.
1347 // ; %Val is 94 on the edge to %then.
1348 // %Val = add i8 %Op, 1
1349 // %Condition = icmp eq i8 %Op, 93
1350 // br i1 %Condition, label %then, label %else
1351 for (unsigned i = 0; i < Usr->getNumOperands(); ++i) {
1352 Value *Op = Usr->getOperand(i);
1353 ValueLatticeElement OpLatticeVal =
1354 getValueFromCondition(Op, Condition, isTrueDest);
1355 if (Optional<APInt> OpConst = OpLatticeVal.asConstantInteger()) {
1356 Result = constantFoldUser(Usr, Op, OpConst.getValue(), DL);
1357 break;
1358 }
1359 }
1360 }
1361 }
1362 }
1363 if (!Result.isOverdefined())
1364 return Result;
1365 }
1366 }
1367
1368 // If the edge was formed by a switch on the value, then we may know exactly
1369 // what it is.
1370 if (SwitchInst *SI = dyn_cast<SwitchInst>(BBFrom->getTerminator())) {
1371 Value *Condition = SI->getCondition();
1372 if (!isa<IntegerType>(Val->getType()))
1373 return None;
1374 bool ValUsesConditionAndMayBeFoldable = false;
1375 if (Condition != Val) {
1376 // Check if Val has Condition as an operand.
1377 if (User *Usr = dyn_cast<User>(Val))
1378 ValUsesConditionAndMayBeFoldable = isOperationFoldable(Usr) &&
1379 usesOperand(Usr, Condition);
1380 if (!ValUsesConditionAndMayBeFoldable)
1381 return None;
1382 }
1383 assert((Condition == Val || ValUsesConditionAndMayBeFoldable) &&(static_cast <bool> ((Condition == Val || ValUsesConditionAndMayBeFoldable
) && "Condition != Val nor Val doesn't use Condition"
) ? void (0) : __assert_fail ("(Condition == Val || ValUsesConditionAndMayBeFoldable) && \"Condition != Val nor Val doesn't use Condition\""
, "llvm/lib/Analysis/LazyValueInfo.cpp", 1384, __extension__ __PRETTY_FUNCTION__
))
1384 "Condition != Val nor Val doesn't use Condition")(static_cast <bool> ((Condition == Val || ValUsesConditionAndMayBeFoldable
) && "Condition != Val nor Val doesn't use Condition"
) ? void (0) : __assert_fail ("(Condition == Val || ValUsesConditionAndMayBeFoldable) && \"Condition != Val nor Val doesn't use Condition\""
, "llvm/lib/Analysis/LazyValueInfo.cpp", 1384, __extension__ __PRETTY_FUNCTION__
))
;
1385
1386 bool DefaultCase = SI->getDefaultDest() == BBTo;
1387 unsigned BitWidth = Val->getType()->getIntegerBitWidth();
1388 ConstantRange EdgesVals(BitWidth, DefaultCase/*isFullSet*/);
1389
1390 for (auto Case : SI->cases()) {
1391 APInt CaseValue = Case.getCaseValue()->getValue();
1392 ConstantRange EdgeVal(CaseValue);
1393 if (ValUsesConditionAndMayBeFoldable) {
1394 User *Usr = cast<User>(Val);
1395 const DataLayout &DL = BBTo->getModule()->getDataLayout();
1396 ValueLatticeElement EdgeLatticeVal =
1397 constantFoldUser(Usr, Condition, CaseValue, DL);
1398 if (EdgeLatticeVal.isOverdefined())
1399 return None;
1400 EdgeVal = EdgeLatticeVal.getConstantRange();
1401 }
1402 if (DefaultCase) {
1403 // It is possible that the default destination is the destination of
1404 // some cases. We cannot perform difference for those cases.
1405 // We know Condition != CaseValue in BBTo. In some cases we can use
1406 // this to infer Val == f(Condition) is != f(CaseValue). For now, we
1407 // only do this when f is identity (i.e. Val == Condition), but we
1408 // should be able to do this for any injective f.
1409 if (Case.getCaseSuccessor() != BBTo && Condition == Val)
1410 EdgesVals = EdgesVals.difference(EdgeVal);
1411 } else if (Case.getCaseSuccessor() == BBTo)
1412 EdgesVals = EdgesVals.unionWith(EdgeVal);
1413 }
1414 return ValueLatticeElement::getRange(std::move(EdgesVals));
1415 }
1416 return None;
1417}
1418
1419/// Compute the value of Val on the edge BBFrom -> BBTo or the value at
1420/// the basic block if the edge does not constrain Val.
1421Optional<ValueLatticeElement> LazyValueInfoImpl::getEdgeValue(
1422 Value *Val, BasicBlock *BBFrom, BasicBlock *BBTo, Instruction *CxtI) {
1423 // If already a constant, there is nothing to compute.
1424 if (Constant *VC = dyn_cast<Constant>(Val))
1425 return ValueLatticeElement::get(VC);
1426
1427 ValueLatticeElement LocalResult = getEdgeValueLocal(Val, BBFrom, BBTo)
1428 .getValueOr(ValueLatticeElement::getOverdefined());
1429 if (hasSingleValue(LocalResult))
1430 // Can't get any more precise here
1431 return LocalResult;
1432
1433 Optional<ValueLatticeElement> OptInBlock = getBlockValue(Val, BBFrom);
1434 if (!OptInBlock)
1435 return None;
1436 ValueLatticeElement &InBlock = *OptInBlock;
1437
1438 // Try to intersect ranges of the BB and the constraint on the edge.
1439 intersectAssumeOrGuardBlockValueConstantRange(Val, InBlock,
1440 BBFrom->getTerminator());
1441 // We can use the context instruction (generically the ultimate instruction
1442 // the calling pass is trying to simplify) here, even though the result of
1443 // this function is generally cached when called from the solve* functions
1444 // (and that cached result might be used with queries using a different
1445 // context instruction), because when this function is called from the solve*
1446 // functions, the context instruction is not provided. When called from
1447 // LazyValueInfoImpl::getValueOnEdge, the context instruction is provided,
1448 // but then the result is not cached.
1449 intersectAssumeOrGuardBlockValueConstantRange(Val, InBlock, CxtI);
1450
1451 return intersect(LocalResult, InBlock);
1452}
1453
1454ValueLatticeElement LazyValueInfoImpl::getValueInBlock(Value *V, BasicBlock *BB,
1455 Instruction *CxtI) {
1456 LLVM_DEBUG(dbgs() << "LVI Getting block end value " << *V << " at '"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << "LVI Getting block end value "
<< *V << " at '" << BB->getName() <<
"'\n"; } } while (false)
1457 << BB->getName() << "'\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << "LVI Getting block end value "
<< *V << " at '" << BB->getName() <<
"'\n"; } } while (false)
;
1458
1459 assert(BlockValueStack.empty() && BlockValueSet.empty())(static_cast <bool> (BlockValueStack.empty() &&
BlockValueSet.empty()) ? void (0) : __assert_fail ("BlockValueStack.empty() && BlockValueSet.empty()"
, "llvm/lib/Analysis/LazyValueInfo.cpp", 1459, __extension__ __PRETTY_FUNCTION__
))
;
1460 Optional<ValueLatticeElement> OptResult = getBlockValue(V, BB);
1461 if (!OptResult) {
1462 solve();
1463 OptResult = getBlockValue(V, BB);
1464 assert(OptResult && "Value not available after solving")(static_cast <bool> (OptResult && "Value not available after solving"
) ? void (0) : __assert_fail ("OptResult && \"Value not available after solving\""
, "llvm/lib/Analysis/LazyValueInfo.cpp", 1464, __extension__ __PRETTY_FUNCTION__
))
;
1465 }
1466 ValueLatticeElement Result = *OptResult;
1467 intersectAssumeOrGuardBlockValueConstantRange(V, Result, CxtI);
1468
1469 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << " Result = " <<
Result << "\n"; } } while (false)
;
1470 return Result;
1471}
1472
1473ValueLatticeElement LazyValueInfoImpl::getValueAt(Value *V, Instruction *CxtI) {
1474 LLVM_DEBUG(dbgs() << "LVI Getting value " << *V << " at '" << CxtI->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << "LVI Getting value " <<
*V << " at '" << CxtI->getName() << "'\n"
; } } while (false)
1475 << "'\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << "LVI Getting value " <<
*V << " at '" << CxtI->getName() << "'\n"
; } } while (false)
;
1476
1477 if (auto *C = dyn_cast<Constant>(V))
1478 return ValueLatticeElement::get(C);
1479
1480 ValueLatticeElement Result = ValueLatticeElement::getOverdefined();
1481 if (auto *I = dyn_cast<Instruction>(V))
1482 Result = getFromRangeMetadata(I);
1483 intersectAssumeOrGuardBlockValueConstantRange(V, Result, CxtI);
1484
1485 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << " Result = " <<
Result << "\n"; } } while (false)
;
1486 return Result;
1487}
1488
1489ValueLatticeElement LazyValueInfoImpl::
1490getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB,
1491 Instruction *CxtI) {
1492 LLVM_DEBUG(dbgs() << "LVI Getting edge value " << *V << " from '"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << "LVI Getting edge value "
<< *V << " from '" << FromBB->getName()
<< "' to '" << ToBB->getName() << "'\n"
; } } while (false)
1493 << FromBB->getName() << "' to '" << ToBB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << "LVI Getting edge value "
<< *V << " from '" << FromBB->getName()
<< "' to '" << ToBB->getName() << "'\n"
; } } while (false)
1494 << "'\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << "LVI Getting edge value "
<< *V << " from '" << FromBB->getName()
<< "' to '" << ToBB->getName() << "'\n"
; } } while (false)
;
1495
1496 Optional<ValueLatticeElement> Result = getEdgeValue(V, FromBB, ToBB, CxtI);
1497 if (!Result) {
1498 solve();
1499 Result = getEdgeValue(V, FromBB, ToBB, CxtI);
1500 assert(Result && "More work to do after problem solved?")(static_cast <bool> (Result && "More work to do after problem solved?"
) ? void (0) : __assert_fail ("Result && \"More work to do after problem solved?\""
, "llvm/lib/Analysis/LazyValueInfo.cpp", 1500, __extension__ __PRETTY_FUNCTION__
))
;
1501 }
1502
1503 LLVM_DEBUG(dbgs() << " Result = " << *Result << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lazy-value-info")) { dbgs() << " Result = " <<
*Result << "\n"; } } while (false)
;
1504 return *Result;
1505}
1506
1507void LazyValueInfoImpl::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
1508 BasicBlock *NewSucc) {
1509 TheCache.threadEdgeImpl(OldSucc, NewSucc);
1510}
1511
1512//===----------------------------------------------------------------------===//
1513// LazyValueInfo Impl
1514//===----------------------------------------------------------------------===//
1515
1516/// This lazily constructs the LazyValueInfoImpl.
1517static LazyValueInfoImpl &getImpl(void *&PImpl, AssumptionCache *AC,
1518 const Module *M) {
1519 if (!PImpl) {
1520 assert(M && "getCache() called with a null Module")(static_cast <bool> (M && "getCache() called with a null Module"
) ? void (0) : __assert_fail ("M && \"getCache() called with a null Module\""
, "llvm/lib/Analysis/LazyValueInfo.cpp", 1520, __extension__ __PRETTY_FUNCTION__
))
;
1521 const DataLayout &DL = M->getDataLayout();
1522 Function *GuardDecl = M->getFunction(
1523 Intrinsic::getName(Intrinsic::experimental_guard));
1524 PImpl = new LazyValueInfoImpl(AC, DL, GuardDecl);
1525 }
1526 return *static_cast<LazyValueInfoImpl*>(PImpl);
1527}
1528
1529bool LazyValueInfoWrapperPass::runOnFunction(Function &F) {
1530 Info.AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1531 Info.TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
1532
1533 if (Info.PImpl)
1534 getImpl(Info.PImpl, Info.AC, F.getParent()).clear();
1535
1536 // Fully lazy.
1537 return false;
1538}
1539
1540void LazyValueInfoWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1541 AU.setPreservesAll();
1542 AU.addRequired<AssumptionCacheTracker>();
1543 AU.addRequired<TargetLibraryInfoWrapperPass>();
1544}
1545
1546LazyValueInfo &LazyValueInfoWrapperPass::getLVI() { return Info; }
1547
1548LazyValueInfo::~LazyValueInfo() { releaseMemory(); }
1549
1550void LazyValueInfo::releaseMemory() {
1551 // If the cache was allocated, free it.
1552 if (PImpl) {
1553 delete &getImpl(PImpl, AC, nullptr);
1554 PImpl = nullptr;
1555 }
1556}
1557
1558bool LazyValueInfo::invalidate(Function &F, const PreservedAnalyses &PA,
1559 FunctionAnalysisManager::Invalidator &Inv) {
1560 // We need to invalidate if we have either failed to preserve this analyses
1561 // result directly or if any of its dependencies have been invalidated.
1562 auto PAC = PA.getChecker<LazyValueAnalysis>();
1563 if (!(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()))
1564 return true;
1565
1566 return false;
1567}
1568
1569void LazyValueInfoWrapperPass::releaseMemory() { Info.releaseMemory(); }
1570
1571LazyValueInfo LazyValueAnalysis::run(Function &F,
1572 FunctionAnalysisManager &FAM) {
1573 auto &AC = FAM.getResult<AssumptionAnalysis>(F);
1574 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
1575
1576 return LazyValueInfo(&AC, &F.getParent()->getDataLayout(), &TLI);
1577}
1578
1579/// Returns true if we can statically tell that this value will never be a
1580/// "useful" constant. In practice, this means we've got something like an
1581/// alloca or a malloc call for which a comparison against a constant can
1582/// only be guarding dead code. Note that we are potentially giving up some
1583/// precision in dead code (a constant result) in favour of avoiding a
1584/// expensive search for a easily answered common query.
1585static bool isKnownNonConstant(Value *V) {
1586 V = V->stripPointerCasts();
1587 // The return val of alloc cannot be a Constant.
1588 if (isa<AllocaInst>(V))
1589 return true;
1590 return false;
1591}
1592
1593Constant *LazyValueInfo::getConstant(Value *V, Instruction *CxtI) {
1594 // Bail out early if V is known not to be a Constant.
1595 if (isKnownNonConstant(V))
1596 return nullptr;
1597
1598 BasicBlock *BB = CxtI->getParent();
1599 ValueLatticeElement Result =
1600 getImpl(PImpl, AC, BB->getModule()).getValueInBlock(V, BB, CxtI);
1601
1602 if (Result.isConstant())
1603 return Result.getConstant();
1604 if (Result.isConstantRange()) {
1605 const ConstantRange &CR = Result.getConstantRange();
1606 if (const APInt *SingleVal = CR.getSingleElement())
1607 return ConstantInt::get(V->getContext(), *SingleVal);
1608 }
1609 return nullptr;
1610}
1611
1612ConstantRange LazyValueInfo::getConstantRange(Value *V, Instruction *CxtI,
1613 bool UndefAllowed) {
1614 assert(V->getType()->isIntegerTy())(static_cast <bool> (V->getType()->isIntegerTy())
? void (0) : __assert_fail ("V->getType()->isIntegerTy()"
, "llvm/lib/Analysis/LazyValueInfo.cpp", 1614, __extension__ __PRETTY_FUNCTION__
))
;
1615 unsigned Width = V->getType()->getIntegerBitWidth();
1616 BasicBlock *BB = CxtI->getParent();
1617 ValueLatticeElement Result =
1618 getImpl(PImpl, AC, BB->getModule()).getValueInBlock(V, BB, CxtI);
1619 if (Result.isUnknown())
1620 return ConstantRange::getEmpty(Width);
1621 if (Result.isConstantRange(UndefAllowed))
1622 return Result.getConstantRange(UndefAllowed);
1623 // We represent ConstantInt constants as constant ranges but other kinds
1624 // of integer constants, i.e. ConstantExpr will be tagged as constants
1625 assert(!(Result.isConstant() && isa<ConstantInt>(Result.getConstant())) &&(static_cast <bool> (!(Result.isConstant() && isa
<ConstantInt>(Result.getConstant())) && "ConstantInt value must be represented as constantrange"
) ? void (0) : __assert_fail ("!(Result.isConstant() && isa<ConstantInt>(Result.getConstant())) && \"ConstantInt value must be represented as constantrange\""
, "llvm/lib/Analysis/LazyValueInfo.cpp", 1626, __extension__ __PRETTY_FUNCTION__
))
1626 "ConstantInt value must be represented as constantrange")(static_cast <bool> (!(Result.isConstant() && isa
<ConstantInt>(Result.getConstant())) && "ConstantInt value must be represented as constantrange"
) ? void (0) : __assert_fail ("!(Result.isConstant() && isa<ConstantInt>(Result.getConstant())) && \"ConstantInt value must be represented as constantrange\""
, "llvm/lib/Analysis/LazyValueInfo.cpp", 1626, __extension__ __PRETTY_FUNCTION__
))
;
1627 return ConstantRange::getFull(Width);
1628}
1629
1630/// Determine whether the specified value is known to be a
1631/// constant on the specified edge. Return null if not.
1632Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB,
1633 BasicBlock *ToBB,
1634 Instruction *CxtI) {
1635 Module *M = FromBB->getModule();
1636 ValueLatticeElement Result =
1637 getImpl(PImpl, AC, M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1638
1639 if (Result.isConstant())
1640 return Result.getConstant();
1641 if (Result.isConstantRange()) {
1642 const ConstantRange &CR = Result.getConstantRange();
1643 if (const APInt *SingleVal = CR.getSingleElement())
1644 return ConstantInt::get(V->getContext(), *SingleVal);
1645 }
1646 return nullptr;
1647}
1648
1649ConstantRange LazyValueInfo::getConstantRangeOnEdge(Value *V,
1650 BasicBlock *FromBB,
1651 BasicBlock *ToBB,
1652 Instruction *CxtI) {
1653 unsigned Width = V->getType()->getIntegerBitWidth();
1654 Module *M = FromBB->getModule();
1655 ValueLatticeElement Result =
1656 getImpl(PImpl, AC, M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1657
1658 if (Result.isUnknown())
1659 return ConstantRange::getEmpty(Width);
1660 if (Result.isConstantRange())
1661 return Result.getConstantRange();
1662 // We represent ConstantInt constants as constant ranges but other kinds
1663 // of integer constants, i.e. ConstantExpr will be tagged as constants
1664 assert(!(Result.isConstant() && isa<ConstantInt>(Result.getConstant())) &&(static_cast <bool> (!(Result.isConstant() && isa
<ConstantInt>(Result.getConstant())) && "ConstantInt value must be represented as constantrange"
) ? void (0) : __assert_fail ("!(Result.isConstant() && isa<ConstantInt>(Result.getConstant())) && \"ConstantInt value must be represented as constantrange\""
, "llvm/lib/Analysis/LazyValueInfo.cpp", 1665, __extension__ __PRETTY_FUNCTION__
))
1665 "ConstantInt value must be represented as constantrange")(static_cast <bool> (!(Result.isConstant() && isa
<ConstantInt>(Result.getConstant())) && "ConstantInt value must be represented as constantrange"
) ? void (0) : __assert_fail ("!(Result.isConstant() && isa<ConstantInt>(Result.getConstant())) && \"ConstantInt value must be represented as constantrange\""
, "llvm/lib/Analysis/LazyValueInfo.cpp", 1665, __extension__ __PRETTY_FUNCTION__
))
;
1666 return ConstantRange::getFull(Width);
1667}
1668
1669static LazyValueInfo::Tristate
1670getPredicateResult(unsigned Pred, Constant *C, const ValueLatticeElement &Val,
1671 const DataLayout &DL, TargetLibraryInfo *TLI) {
1672 // If we know the value is a constant, evaluate the conditional.
1673 Constant *Res = nullptr;
1674 if (Val.isConstant()) {
1675 Res = ConstantFoldCompareInstOperands(Pred, Val.getConstant(), C, DL, TLI);
1676 if (ConstantInt *ResCI = dyn_cast<ConstantInt>(Res))
1677 return ResCI->isZero() ? LazyValueInfo::False : LazyValueInfo::True;
1678 return LazyValueInfo::Unknown;
1679 }
1680
1681 if (Val.isConstantRange()) {
1682 ConstantInt *CI = dyn_cast<ConstantInt>(C);
1683 if (!CI) return LazyValueInfo::Unknown;
1684
1685 const ConstantRange &CR = Val.getConstantRange();
1686 if (Pred == ICmpInst::ICMP_EQ) {
1687 if (!CR.contains(CI->getValue()))
1688 return LazyValueInfo::False;
1689
1690 if (CR.isSingleElement())
1691 return LazyValueInfo::True;
1692 } else if (Pred == ICmpInst::ICMP_NE) {
1693 if (!CR.contains(CI->getValue()))
1694 return LazyValueInfo::True;
1695
1696 if (CR.isSingleElement())
1697 return LazyValueInfo::False;
1698 } else {
1699 // Handle more complex predicates.
1700 ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(
1701 (ICmpInst::Predicate)Pred, CI->getValue());
1702 if (TrueValues.contains(CR))
1703 return LazyValueInfo::True;
1704 if (TrueValues.inverse().contains(CR))
1705 return LazyValueInfo::False;
1706 }
1707 return LazyValueInfo::Unknown;
1708 }
1709
1710 if (Val.isNotConstant()) {
1711 // If this is an equality comparison, we can try to fold it knowing that
1712 // "V != C1".
1713 if (Pred == ICmpInst::ICMP_EQ) {
1714 // !C1 == C -> false iff C1 == C.
1715 Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
1716 Val.getNotConstant(), C, DL,
1717 TLI);
1718 if (Res->isNullValue())
1719 return LazyValueInfo::False;
1720 } else if (Pred == ICmpInst::ICMP_NE) {
1721 // !C1 != C -> true iff C1 == C.
1722 Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
1723 Val.getNotConstant(), C, DL,
1724 TLI);
1725 if (Res->isNullValue())
1726 return LazyValueInfo::True;
1727 }
1728 return LazyValueInfo::Unknown;
1729 }
1730
1731 return LazyValueInfo::Unknown;
1732}
1733
1734/// Determine whether the specified value comparison with a constant is known to
1735/// be true or false on the specified CFG edge. Pred is a CmpInst predicate.
1736LazyValueInfo::Tristate
1737LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
1738 BasicBlock *FromBB, BasicBlock *ToBB,
1739 Instruction *CxtI) {
1740 Module *M = FromBB->getModule();
1741 ValueLatticeElement Result =
1742 getImpl(PImpl, AC, M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1743
1744 return getPredicateResult(Pred, C, Result, M->getDataLayout(), TLI);
1745}
1746
1747LazyValueInfo::Tristate
1748LazyValueInfo::getPredicateAt(unsigned Pred, Value *V, Constant *C,
1749 Instruction *CxtI, bool UseBlockValue) {
1750 // Is or is not NonNull are common predicates being queried. If
1751 // isKnownNonZero can tell us the result of the predicate, we can
1752 // return it quickly. But this is only a fastpath, and falling
1753 // through would still be correct.
1754 Module *M = CxtI->getModule();
1755 const DataLayout &DL = M->getDataLayout();
1756 if (V->getType()->isPointerTy() && C->isNullValue() &&
1757 isKnownNonZero(V->stripPointerCastsSameRepresentation(), DL)) {
1758 if (Pred == ICmpInst::ICMP_EQ)
1759 return LazyValueInfo::False;
1760 else if (Pred == ICmpInst::ICMP_NE)
1761 return LazyValueInfo::True;
1762 }
1763
1764 ValueLatticeElement Result = UseBlockValue
1765 ? getImpl(PImpl, AC, M).getValueInBlock(V, CxtI->getParent(), CxtI)
1766 : getImpl(PImpl, AC, M).getValueAt(V, CxtI);
1767 Tristate Ret = getPredicateResult(Pred, C, Result, DL, TLI);
1768 if (Ret != Unknown)
1769 return Ret;
1770
1771 // Note: The following bit of code is somewhat distinct from the rest of LVI;
1772 // LVI as a whole tries to compute a lattice value which is conservatively
1773 // correct at a given location. In this case, we have a predicate which we
1774 // weren't able to prove about the merged result, and we're pushing that
1775 // predicate back along each incoming edge to see if we can prove it
1776 // separately for each input. As a motivating example, consider:
1777 // bb1:
1778 // %v1 = ... ; constantrange<1, 5>
1779 // br label %merge
1780 // bb2:
1781 // %v2 = ... ; constantrange<10, 20>
1782 // br label %merge
1783 // merge:
1784 // %phi = phi [%v1, %v2] ; constantrange<1,20>
1785 // %pred = icmp eq i32 %phi, 8
1786 // We can't tell from the lattice value for '%phi' that '%pred' is false
1787 // along each path, but by checking the predicate over each input separately,
1788 // we can.
1789 // We limit the search to one step backwards from the current BB and value.
1790 // We could consider extending this to search further backwards through the
1791 // CFG and/or value graph, but there are non-obvious compile time vs quality
1792 // tradeoffs.
1793 BasicBlock *BB = CxtI->getParent();
1794
1795 // Function entry or an unreachable block. Bail to avoid confusing
1796 // analysis below.
1797 pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
1798 if (PI == PE)
1799 return Unknown;
1800
1801 // If V is a PHI node in the same block as the context, we need to ask
1802 // questions about the predicate as applied to the incoming value along
1803 // each edge. This is useful for eliminating cases where the predicate is
1804 // known along all incoming edges.
1805 if (auto *PHI = dyn_cast<PHINode>(V))
1806 if (PHI->getParent() == BB) {
1807 Tristate Baseline = Unknown;
1808 for (unsigned i = 0, e = PHI->getNumIncomingValues(); i < e; i++) {
1809 Value *Incoming = PHI->getIncomingValue(i);
1810 BasicBlock *PredBB = PHI->getIncomingBlock(i);
1811 // Note that PredBB may be BB itself.
1812 Tristate Result =
1813 getPredicateOnEdge(Pred, Incoming, C, PredBB, BB, CxtI);
1814
1815 // Keep going as long as we've seen a consistent known result for
1816 // all inputs.
1817 Baseline = (i == 0) ? Result /* First iteration */
1818 : (Baseline == Result ? Baseline
1819 : Unknown); /* All others */
1820 if (Baseline == Unknown)
1821 break;
1822 }
1823 if (Baseline != Unknown)
1824 return Baseline;
1825 }
1826
1827 // For a comparison where the V is outside this block, it's possible
1828 // that we've branched on it before. Look to see if the value is known
1829 // on all incoming edges.
1830 if (!isa<Instruction>(V) || cast<Instruction>(V)->getParent() != BB) {
1831 // For predecessor edge, determine if the comparison is true or false
1832 // on that edge. If they're all true or all false, we can conclude
1833 // the value of the comparison in this block.
1834 Tristate Baseline = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
1835 if (Baseline != Unknown) {
1836 // Check that all remaining incoming values match the first one.
1837 while (++PI != PE) {
1838 Tristate Ret = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
1839 if (Ret != Baseline)
1840 break;
1841 }
1842 // If we terminated early, then one of the values didn't match.
1843 if (PI == PE) {
1844 return Baseline;
1845 }
1846 }
1847 }
1848
1849 return Unknown;
1850}
1851
1852LazyValueInfo::Tristate LazyValueInfo::getPredicateAt(unsigned P, Value *LHS,
1853 Value *RHS,
1854 Instruction *CxtI,
1855 bool UseBlockValue) {
1856 CmpInst::Predicate Pred = (CmpInst::Predicate)P;
1857
1858 if (auto *C = dyn_cast<Constant>(RHS))
1859 return getPredicateAt(P, LHS, C, CxtI, UseBlockValue);
1860 if (auto *C = dyn_cast<Constant>(LHS))
1861 return getPredicateAt(CmpInst::getSwappedPredicate(Pred), RHS, C, CxtI,
1862 UseBlockValue);
1863
1864 // Got two non-Constant values. While we could handle them somewhat,
1865 // by getting their constant ranges, and applying ConstantRange::icmp(),
1866 // so far it did not appear to be profitable.
1867 return LazyValueInfo::Unknown;
1868}
1869
1870void LazyValueInfo::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
1871 BasicBlock *NewSucc) {
1872 if (PImpl) {
1873 getImpl(PImpl, AC, PredBB->getModule())
1874 .threadEdge(PredBB, OldSucc, NewSucc);
1875 }
1876}
1877
1878void LazyValueInfo::eraseBlock(BasicBlock *BB) {
1879 if (PImpl) {
1880 getImpl(PImpl, AC, BB->getModule()).eraseBlock(BB);
1881 }
1882}
1883
1884
1885void LazyValueInfo::printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS) {
1886 if (PImpl) {
1887 getImpl(PImpl, AC, F.getParent()).printLVI(F, DTree, OS);
1888 }
1889}
1890
1891// Print the LVI for the function arguments at the start of each basic block.
1892void LazyValueInfoAnnotatedWriter::emitBasicBlockStartAnnot(
1893 const BasicBlock *BB, formatted_raw_ostream &OS) {
1894 // Find if there are latticevalues defined for arguments of the function.
1895 auto *F = BB->getParent();
1896 for (auto &Arg : F->args()) {
1897 ValueLatticeElement Result = LVIImpl->getValueInBlock(
1898 const_cast<Argument *>(&Arg), const_cast<BasicBlock *>(BB));
1899 if (Result.isUnknown())
1900 continue;
1901 OS << "; LatticeVal for: '" << Arg << "' is: " << Result << "\n";
1902 }
1903}
1904
1905// This function prints the LVI analysis for the instruction I at the beginning
1906// of various basic blocks. It relies on calculated values that are stored in
1907// the LazyValueInfoCache, and in the absence of cached values, recalculate the
1908// LazyValueInfo for `I`, and print that info.
1909void LazyValueInfoAnnotatedWriter::emitInstructionAnnot(
1910 const Instruction *I, formatted_raw_ostream &OS) {
1911
1912 auto *ParentBB = I->getParent();
1913 SmallPtrSet<const BasicBlock*, 16> BlocksContainingLVI;
1914 // We can generate (solve) LVI values only for blocks that are dominated by
1915 // the I's parent. However, to avoid generating LVI for all dominating blocks,
1916 // that contain redundant/uninteresting information, we print LVI for
1917 // blocks that may use this LVI information (such as immediate successor
1918 // blocks, and blocks that contain uses of `I`).
1919 auto printResult = [&](const BasicBlock *BB) {
1920 if (!BlocksContainingLVI.insert(BB).second)
1921 return;
1922 ValueLatticeElement Result = LVIImpl->getValueInBlock(
1923 const_cast<Instruction *>(I), const_cast<BasicBlock *>(BB));
1924 OS << "; LatticeVal for: '" << *I << "' in BB: '";
1925 BB->printAsOperand(OS, false);
1926 OS << "' is: " << Result << "\n";
1927 };
1928
1929 printResult(ParentBB);
1930 // Print the LVI analysis results for the immediate successor blocks, that
1931 // are dominated by `ParentBB`.
1932 for (auto *BBSucc : successors(ParentBB))
1933 if (DT.dominates(ParentBB, BBSucc))
1934 printResult(BBSucc);
1935
1936 // Print LVI in blocks where `I` is used.
1937 for (auto *U : I->users())
1938 if (auto *UseI = dyn_cast<Instruction>(U))
1939 if (!isa<PHINode>(UseI) || DT.dominates(ParentBB, UseI->getParent()))
1940 printResult(UseI->getParent());
1941
1942}
1943
1944namespace {
1945// Printer class for LazyValueInfo results.
1946class LazyValueInfoPrinter : public FunctionPass {
1947public:
1948 static char ID; // Pass identification, replacement for typeid
1949 LazyValueInfoPrinter() : FunctionPass(ID) {
1950 initializeLazyValueInfoPrinterPass(*PassRegistry::getPassRegistry());
1951 }
1952
1953 void getAnalysisUsage(AnalysisUsage &AU) const override {
1954 AU.setPreservesAll();
1955 AU.addRequired<LazyValueInfoWrapperPass>();
1956 AU.addRequired<DominatorTreeWrapperPass>();
1957 }
1958
1959 // Get the mandatory dominator tree analysis and pass this in to the
1960 // LVIPrinter. We cannot rely on the LVI's DT, since it's optional.
1961 bool runOnFunction(Function &F) override {
1962 dbgs() << "LVI for function '" << F.getName() << "':\n";
1963 auto &LVI = getAnalysis<LazyValueInfoWrapperPass>().getLVI();
1964 auto &DTree = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1965 LVI.printLVI(F, DTree, dbgs());
1966 return false;
1967 }
1968};
1969}
1970
1971char LazyValueInfoPrinter::ID = 0;
1972INITIALIZE_PASS_BEGIN(LazyValueInfoPrinter, "print-lazy-value-info",static void *initializeLazyValueInfoPrinterPassOnce(PassRegistry
&Registry) {
1973 "Lazy Value Info Printer Pass", false, false)static void *initializeLazyValueInfoPrinterPassOnce(PassRegistry
&Registry) {
1974INITIALIZE_PASS_DEPENDENCY(LazyValueInfoWrapperPass)initializeLazyValueInfoWrapperPassPass(Registry);
1975INITIALIZE_PASS_END(LazyValueInfoPrinter, "print-lazy-value-info",PassInfo *PI = new PassInfo( "Lazy Value Info Printer Pass", "print-lazy-value-info"
, &LazyValueInfoPrinter::ID, PassInfo::NormalCtor_t(callDefaultCtor
<LazyValueInfoPrinter>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeLazyValueInfoPrinterPassFlag
; void llvm::initializeLazyValueInfoPrinterPass(PassRegistry &
Registry) { llvm::call_once(InitializeLazyValueInfoPrinterPassFlag
, initializeLazyValueInfoPrinterPassOnce, std::ref(Registry))
; }
1976 "Lazy Value Info Printer Pass", false, false)PassInfo *PI = new PassInfo( "Lazy Value Info Printer Pass", "print-lazy-value-info"
, &LazyValueInfoPrinter::ID, PassInfo::NormalCtor_t(callDefaultCtor
<LazyValueInfoPrinter>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeLazyValueInfoPrinterPassFlag
; void llvm::initializeLazyValueInfoPrinterPass(PassRegistry &
Registry) { llvm::call_once(InitializeLazyValueInfoPrinterPassFlag
, initializeLazyValueInfoPrinterPassOnce, std::ref(Registry))
; }

/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/None.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/StringRef.h"
25#include "llvm/ADT/Twine.h"
26#include "llvm/ADT/iterator.h"
27#include "llvm/ADT/iterator_range.h"
28#include "llvm/IR/Attributes.h"
29#include "llvm/IR/BasicBlock.h"
30#include "llvm/IR/CallingConv.h"
31#include "llvm/IR/CFG.h"
32#include "llvm/IR/Constant.h"
33#include "llvm/IR/DerivedTypes.h"
34#include "llvm/IR/Function.h"
35#include "llvm/IR/InstrTypes.h"
36#include "llvm/IR/Instruction.h"
37#include "llvm/IR/OperandTraits.h"
38#include "llvm/IR/Type.h"
39#include "llvm/IR/Use.h"
40#include "llvm/IR/User.h"
41#include "llvm/IR/Value.h"
42#include "llvm/Support/AtomicOrdering.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/ErrorHandling.h"
45#include <cassert>
46#include <cstddef>
47#include <cstdint>
48#include <iterator>
49
50namespace llvm {
51
52class APInt;
53class ConstantInt;
54class DataLayout;
55class LLVMContext;
56
57//===----------------------------------------------------------------------===//
58// AllocaInst Class
59//===----------------------------------------------------------------------===//
60
61/// an instruction to allocate memory on the stack
62class AllocaInst : public UnaryInstruction {
63 Type *AllocatedType;
64
65 using AlignmentField = AlignmentBitfieldElementT<0>;
66 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
67 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>;
68 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
69 SwiftErrorField>(),
70 "Bitfields must be contiguous");
71
72protected:
73 // Note: Instruction needs to be a friend here to call cloneImpl.
74 friend class Instruction;
75
76 AllocaInst *cloneImpl() const;
77
78public:
79 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
80 const Twine &Name, Instruction *InsertBefore);
81 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
82 const Twine &Name, BasicBlock *InsertAtEnd);
83
84 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
85 Instruction *InsertBefore);
86 AllocaInst(Type *Ty, unsigned AddrSpace,
87 const Twine &Name, BasicBlock *InsertAtEnd);
88
89 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
90 const Twine &Name = "", Instruction *InsertBefore = nullptr);
91 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
92 const Twine &Name, BasicBlock *InsertAtEnd);
93
94 /// Return true if there is an allocation size parameter to the allocation
95 /// instruction that is not 1.
96 bool isArrayAllocation() const;
97
98 /// Get the number of elements allocated. For a simple allocation of a single
99 /// element, this will return a constant 1 value.
100 const Value *getArraySize() const { return getOperand(0); }
101 Value *getArraySize() { return getOperand(0); }
102
103 /// Overload to return most specific pointer type.
104 PointerType *getType() const {
105 return cast<PointerType>(Instruction::getType());
106 }
107
108 /// Return the address space for the allocation.
109 unsigned getAddressSpace() const {
110 return getType()->getAddressSpace();
111 }
112
113 /// Get allocation size in bits. Returns None if size can't be determined,
114 /// e.g. in case of a VLA.
115 Optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
116
117 /// Return the type that is being allocated by the instruction.
118 Type *getAllocatedType() const { return AllocatedType; }
119 /// for use only in special circumstances that need to generically
120 /// transform a whole instruction (eg: IR linking and vectorization).
121 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
122
123 /// Return the alignment of the memory that is being allocated by the
124 /// instruction.
125 Align getAlign() const {
126 return Align(1ULL << getSubclassData<AlignmentField>());
127 }
128
129 void setAlignment(Align Align) {
130 setSubclassData<AlignmentField>(Log2(Align));
131 }
132
133 // FIXME: Remove this one transition to Align is over.
134 uint64_t getAlignment() const { return getAlign().value(); }
135
136 /// Return true if this alloca is in the entry block of the function and is a
137 /// constant size. If so, the code generator will fold it into the
138 /// prolog/epilog code, so it is basically free.
139 bool isStaticAlloca() const;
140
141 /// Return true if this alloca is used as an inalloca argument to a call. Such
142 /// allocas are never considered static even if they are in the entry block.
143 bool isUsedWithInAlloca() const {
144 return getSubclassData<UsedWithInAllocaField>();
145 }
146
147 /// Specify whether this alloca is used to represent the arguments to a call.
148 void setUsedWithInAlloca(bool V) {
149 setSubclassData<UsedWithInAllocaField>(V);
150 }
151
152 /// Return true if this alloca is used as a swifterror argument to a call.
153 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
154 /// Specify whether this alloca is used to represent a swifterror.
155 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
156
157 // Methods for support type inquiry through isa, cast, and dyn_cast:
158 static bool classof(const Instruction *I) {
159 return (I->getOpcode() == Instruction::Alloca);
160 }
161 static bool classof(const Value *V) {
162 return isa<Instruction>(V) && classof(cast<Instruction>(V));
163 }
164
165private:
166 // Shadow Instruction::setInstructionSubclassData with a private forwarding
167 // method so that subclasses cannot accidentally use it.
168 template <typename Bitfield>
169 void setSubclassData(typename Bitfield::Type Value) {
170 Instruction::setSubclassData<Bitfield>(Value);
171 }
172};
173
174//===----------------------------------------------------------------------===//
175// LoadInst Class
176//===----------------------------------------------------------------------===//
177
178/// An instruction for reading from memory. This uses the SubclassData field in
179/// Value to store whether or not the load is volatile.
180class LoadInst : public UnaryInstruction {
181 using VolatileField = BoolBitfieldElementT<0>;
182 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
183 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
184 static_assert(
185 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
186 "Bitfields must be contiguous");
187
188 void AssertOK();
189
190protected:
191 // Note: Instruction needs to be a friend here to call cloneImpl.
192 friend class Instruction;
193
194 LoadInst *cloneImpl() const;
195
196public:
197 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
198 Instruction *InsertBefore);
199 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
200 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
201 Instruction *InsertBefore);
202 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
203 BasicBlock *InsertAtEnd);
204 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
205 Align Align, Instruction *InsertBefore = nullptr);
206 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
207 Align Align, BasicBlock *InsertAtEnd);
208 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
209 Align Align, AtomicOrdering Order,
210 SyncScope::ID SSID = SyncScope::System,
211 Instruction *InsertBefore = nullptr);
212 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
213 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
214 BasicBlock *InsertAtEnd);
215
216 /// Return true if this is a load from a volatile memory location.
217 bool isVolatile() const { return getSubclassData<VolatileField>(); }
218
219 /// Specify whether this is a volatile load or not.
220 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
221
222 /// Return the alignment of the access that is being performed.
223 /// FIXME: Remove this function once transition to Align is over.
224 /// Use getAlign() instead.
225 uint64_t getAlignment() const { return getAlign().value(); }
226
227 /// Return the alignment of the access that is being performed.
228 Align getAlign() const {
229 return Align(1ULL << (getSubclassData<AlignmentField>()));
230 }
231
232 void setAlignment(Align Align) {
233 setSubclassData<AlignmentField>(Log2(Align));
234 }
235
236 /// Returns the ordering constraint of this load instruction.
237 AtomicOrdering getOrdering() const {
238 return getSubclassData<OrderingField>();
239 }
240 /// Sets the ordering constraint of this load instruction. May not be Release
241 /// or AcquireRelease.
242 void setOrdering(AtomicOrdering Ordering) {
243 setSubclassData<OrderingField>(Ordering);
244 }
245
246 /// Returns the synchronization scope ID of this load instruction.
247 SyncScope::ID getSyncScopeID() const {
248 return SSID;
249 }
250
251 /// Sets the synchronization scope ID of this load instruction.
252 void setSyncScopeID(SyncScope::ID SSID) {
253 this->SSID = SSID;
254 }
255
256 /// Sets the ordering constraint and the synchronization scope ID of this load
257 /// instruction.
258 void setAtomic(AtomicOrdering Ordering,
259 SyncScope::ID SSID = SyncScope::System) {
260 setOrdering(Ordering);
261 setSyncScopeID(SSID);
262 }
263
264 bool isSimple() const { return !isAtomic() && !isVolatile(); }
265
266 bool isUnordered() const {
267 return (getOrdering() == AtomicOrdering::NotAtomic ||
268 getOrdering() == AtomicOrdering::Unordered) &&
269 !isVolatile();
270 }
271
272 Value *getPointerOperand() { return getOperand(0); }
273 const Value *getPointerOperand() const { return getOperand(0); }
274 static unsigned getPointerOperandIndex() { return 0U; }
275 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
276
277 /// Returns the address space of the pointer operand.
278 unsigned getPointerAddressSpace() const {
279 return getPointerOperandType()->getPointerAddressSpace();
280 }
281
282 // Methods for support type inquiry through isa, cast, and dyn_cast:
283 static bool classof(const Instruction *I) {
284 return I->getOpcode() == Instruction::Load;
285 }
286 static bool classof(const Value *V) {
287 return isa<Instruction>(V) && classof(cast<Instruction>(V));
288 }
289
290private:
291 // Shadow Instruction::setInstructionSubclassData with a private forwarding
292 // method so that subclasses cannot accidentally use it.
293 template <typename Bitfield>
294 void setSubclassData(typename Bitfield::Type Value) {
295 Instruction::setSubclassData<Bitfield>(Value);
296 }
297
298 /// The synchronization scope ID of this load instruction. Not quite enough
299 /// room in SubClassData for everything, so synchronization scope ID gets its
300 /// own field.
301 SyncScope::ID SSID;
302};
303
304//===----------------------------------------------------------------------===//
305// StoreInst Class
306//===----------------------------------------------------------------------===//
307
308/// An instruction for storing to memory.
309class StoreInst : public Instruction {
310 using VolatileField = BoolBitfieldElementT<0>;
311 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
312 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
313 static_assert(
314 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
315 "Bitfields must be contiguous");
316
317 void AssertOK();
318
319protected:
320 // Note: Instruction needs to be a friend here to call cloneImpl.
321 friend class Instruction;
322
323 StoreInst *cloneImpl() const;
324
325public:
326 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
327 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
328 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
329 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
330 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
331 Instruction *InsertBefore = nullptr);
332 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
333 BasicBlock *InsertAtEnd);
334 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
335 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
336 Instruction *InsertBefore = nullptr);
337 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
338 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
339
340 // allocate space for exactly two operands
341 void *operator new(size_t S) { return User::operator new(S, 2); }
342 void operator delete(void *Ptr) { User::operator delete(Ptr); }
343
344 /// Return true if this is a store to a volatile memory location.
345 bool isVolatile() const { return getSubclassData<VolatileField>(); }
346
347 /// Specify whether this is a volatile store or not.
348 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
349
350 /// Transparently provide more efficient getOperand methods.
351 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
352
353 /// Return the alignment of the access that is being performed
354 /// FIXME: Remove this function once transition to Align is over.
355 /// Use getAlign() instead.
356 uint64_t getAlignment() const { return getAlign().value(); }
357
358 Align getAlign() const {
359 return Align(1ULL << (getSubclassData<AlignmentField>()));
360 }
361
362 void setAlignment(Align Align) {
363 setSubclassData<AlignmentField>(Log2(Align));
364 }
365
366 /// Returns the ordering constraint of this store instruction.
367 AtomicOrdering getOrdering() const {
368 return getSubclassData<OrderingField>();
369 }
370
371 /// Sets the ordering constraint of this store instruction. May not be
372 /// Acquire or AcquireRelease.
373 void setOrdering(AtomicOrdering Ordering) {
374 setSubclassData<OrderingField>(Ordering);
375 }
376
377 /// Returns the synchronization scope ID of this store instruction.
378 SyncScope::ID getSyncScopeID() const {
379 return SSID;
380 }
381
382 /// Sets the synchronization scope ID of this store instruction.
383 void setSyncScopeID(SyncScope::ID SSID) {
384 this->SSID = SSID;
385 }
386
387 /// Sets the ordering constraint and the synchronization scope ID of this
388 /// store instruction.
389 void setAtomic(AtomicOrdering Ordering,
390 SyncScope::ID SSID = SyncScope::System) {
391 setOrdering(Ordering);
392 setSyncScopeID(SSID);
393 }
394
395 bool isSimple() const { return !isAtomic() && !isVolatile(); }
396
397 bool isUnordered() const {
398 return (getOrdering() == AtomicOrdering::NotAtomic ||
399 getOrdering() == AtomicOrdering::Unordered) &&
400 !isVolatile();
401 }
402
403 Value *getValueOperand() { return getOperand(0); }
404 const Value *getValueOperand() const { return getOperand(0); }
405
406 Value *getPointerOperand() { return getOperand(1); }
407 const Value *getPointerOperand() const { return getOperand(1); }
408 static unsigned getPointerOperandIndex() { return 1U; }
409 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
410
411 /// Returns the address space of the pointer operand.
412 unsigned getPointerAddressSpace() const {
413 return getPointerOperandType()->getPointerAddressSpace();
414 }
415
416 // Methods for support type inquiry through isa, cast, and dyn_cast:
417 static bool classof(const Instruction *I) {
418 return I->getOpcode() == Instruction::Store;
419 }
420 static bool classof(const Value *V) {
421 return isa<Instruction>(V) && classof(cast<Instruction>(V));
422 }
423
424private:
425 // Shadow Instruction::setInstructionSubclassData with a private forwarding
426 // method so that subclasses cannot accidentally use it.
427 template <typename Bitfield>
428 void setSubclassData(typename Bitfield::Type Value) {
429 Instruction::setSubclassData<Bitfield>(Value);
430 }
431
432 /// The synchronization scope ID of this store instruction. Not quite enough
433 /// room in SubClassData for everything, so synchronization scope ID gets its
434 /// own field.
435 SyncScope::ID SSID;
436};
437
438template <>
439struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
440};
441
442DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<StoreInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 442, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this))[i_nocapture
].get()); } void StoreInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<StoreInst>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 442, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<StoreInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned StoreInst::getNumOperands() const
{ return OperandTraits<StoreInst>::operands(this); } template
<int Idx_nocapture> Use &StoreInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &StoreInst::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
443
444//===----------------------------------------------------------------------===//
445// FenceInst Class
446//===----------------------------------------------------------------------===//
447
448/// An instruction for ordering other memory operations.
449class FenceInst : public Instruction {
450 using OrderingField = AtomicOrderingBitfieldElementT<0>;
451
452 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
453
454protected:
455 // Note: Instruction needs to be a friend here to call cloneImpl.
456 friend class Instruction;
457
458 FenceInst *cloneImpl() const;
459
460public:
461 // Ordering may only be Acquire, Release, AcquireRelease, or
462 // SequentiallyConsistent.
463 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
464 SyncScope::ID SSID = SyncScope::System,
465 Instruction *InsertBefore = nullptr);
466 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
467 BasicBlock *InsertAtEnd);
468
469 // allocate space for exactly zero operands
470 void *operator new(size_t S) { return User::operator new(S, 0); }
471 void operator delete(void *Ptr) { User::operator delete(Ptr); }
472
473 /// Returns the ordering constraint of this fence instruction.
474 AtomicOrdering getOrdering() const {
475 return getSubclassData<OrderingField>();
476 }
477
478 /// Sets the ordering constraint of this fence instruction. May only be
479 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
480 void setOrdering(AtomicOrdering Ordering) {
481 setSubclassData<OrderingField>(Ordering);
482 }
483
484 /// Returns the synchronization scope ID of this fence instruction.
485 SyncScope::ID getSyncScopeID() const {
486 return SSID;
487 }
488
489 /// Sets the synchronization scope ID of this fence instruction.
490 void setSyncScopeID(SyncScope::ID SSID) {
491 this->SSID = SSID;
492 }
493
494 // Methods for support type inquiry through isa, cast, and dyn_cast:
495 static bool classof(const Instruction *I) {
496 return I->getOpcode() == Instruction::Fence;
497 }
498 static bool classof(const Value *V) {
499 return isa<Instruction>(V) && classof(cast<Instruction>(V));
500 }
501
502private:
503 // Shadow Instruction::setInstructionSubclassData with a private forwarding
504 // method so that subclasses cannot accidentally use it.
505 template <typename Bitfield>
506 void setSubclassData(typename Bitfield::Type Value) {
507 Instruction::setSubclassData<Bitfield>(Value);
508 }
509
510 /// The synchronization scope ID of this fence instruction. Not quite enough
511 /// room in SubClassData for everything, so synchronization scope ID gets its
512 /// own field.
513 SyncScope::ID SSID;
514};
515
516//===----------------------------------------------------------------------===//
517// AtomicCmpXchgInst Class
518//===----------------------------------------------------------------------===//
519
520/// An instruction that atomically checks whether a
521/// specified value is in a memory location, and, if it is, stores a new value
522/// there. The value returned by this instruction is a pair containing the
523/// original value as first element, and an i1 indicating success (true) or
524/// failure (false) as second element.
525///
526class AtomicCmpXchgInst : public Instruction {
527 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
528 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
529 SyncScope::ID SSID);
530
531 template <unsigned Offset>
532 using AtomicOrderingBitfieldElement =
533 typename Bitfield::Element<AtomicOrdering, Offset, 3,
534 AtomicOrdering::LAST>;
535
536protected:
537 // Note: Instruction needs to be a friend here to call cloneImpl.
538 friend class Instruction;
539
540 AtomicCmpXchgInst *cloneImpl() const;
541
542public:
543 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
544 AtomicOrdering SuccessOrdering,
545 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
546 Instruction *InsertBefore = nullptr);
547 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
548 AtomicOrdering SuccessOrdering,
549 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
550 BasicBlock *InsertAtEnd);
551
552 // allocate space for exactly three operands
553 void *operator new(size_t S) { return User::operator new(S, 3); }
554 void operator delete(void *Ptr) { User::operator delete(Ptr); }
555
556 using VolatileField = BoolBitfieldElementT<0>;
557 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>;
558 using SuccessOrderingField =
559 AtomicOrderingBitfieldElementT<WeakField::NextBit>;
560 using FailureOrderingField =
561 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>;
562 using AlignmentField =
563 AlignmentBitfieldElementT<FailureOrderingField::NextBit>;
564 static_assert(
565 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField,
566 FailureOrderingField, AlignmentField>(),
567 "Bitfields must be contiguous");
568
569 /// Return the alignment of the memory that is being allocated by the
570 /// instruction.
571 Align getAlign() const {
572 return Align(1ULL << getSubclassData<AlignmentField>());
573 }
574
575 void setAlignment(Align Align) {
576 setSubclassData<AlignmentField>(Log2(Align));
577 }
578
579 /// Return true if this is a cmpxchg from a volatile memory
580 /// location.
581 ///
582 bool isVolatile() const { return getSubclassData<VolatileField>(); }
583
584 /// Specify whether this is a volatile cmpxchg.
585 ///
586 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
587
588 /// Return true if this cmpxchg may spuriously fail.
589 bool isWeak() const { return getSubclassData<WeakField>(); }
590
591 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
592
593 /// Transparently provide more efficient getOperand methods.
594 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
595
596 static bool isValidSuccessOrdering(AtomicOrdering Ordering) {
597 return Ordering != AtomicOrdering::NotAtomic &&
598 Ordering != AtomicOrdering::Unordered;
599 }
600
601 static bool isValidFailureOrdering(AtomicOrdering Ordering) {
602 return Ordering != AtomicOrdering::NotAtomic &&
603 Ordering != AtomicOrdering::Unordered &&
604 Ordering != AtomicOrdering::AcquireRelease &&
605 Ordering != AtomicOrdering::Release;
606 }
607
608 /// Returns the success ordering constraint of this cmpxchg instruction.
609 AtomicOrdering getSuccessOrdering() const {
610 return getSubclassData<SuccessOrderingField>();
611 }
612
613 /// Sets the success ordering constraint of this cmpxchg instruction.
614 void setSuccessOrdering(AtomicOrdering Ordering) {
615 assert(isValidSuccessOrdering(Ordering) &&(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "llvm/include/llvm/IR/Instructions.h", 616, __extension__ __PRETTY_FUNCTION__
))
616 "invalid CmpXchg success ordering")(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "llvm/include/llvm/IR/Instructions.h", 616, __extension__ __PRETTY_FUNCTION__
))
;
617 setSubclassData<SuccessOrderingField>(Ordering);
618 }
619
620 /// Returns the failure ordering constraint of this cmpxchg instruction.
621 AtomicOrdering getFailureOrdering() const {
622 return getSubclassData<FailureOrderingField>();
623 }
624
625 /// Sets the failure ordering constraint of this cmpxchg instruction.
626 void setFailureOrdering(AtomicOrdering Ordering) {
627 assert(isValidFailureOrdering(Ordering) &&(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "llvm/include/llvm/IR/Instructions.h", 628, __extension__ __PRETTY_FUNCTION__
))
628 "invalid CmpXchg failure ordering")(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "llvm/include/llvm/IR/Instructions.h", 628, __extension__ __PRETTY_FUNCTION__
))
;
629 setSubclassData<FailureOrderingField>(Ordering);
630 }
631
632 /// Returns a single ordering which is at least as strong as both the
633 /// success and failure orderings for this cmpxchg.
634 AtomicOrdering getMergedOrdering() const {
635 if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent)
636 return AtomicOrdering::SequentiallyConsistent;
637 if (getFailureOrdering() == AtomicOrdering::Acquire) {
638 if (getSuccessOrdering() == AtomicOrdering::Monotonic)
639 return AtomicOrdering::Acquire;
640 if (getSuccessOrdering() == AtomicOrdering::Release)
641 return AtomicOrdering::AcquireRelease;
642 }
643 return getSuccessOrdering();
644 }
645
646 /// Returns the synchronization scope ID of this cmpxchg instruction.
647 SyncScope::ID getSyncScopeID() const {
648 return SSID;
649 }
650
651 /// Sets the synchronization scope ID of this cmpxchg instruction.
652 void setSyncScopeID(SyncScope::ID SSID) {
653 this->SSID = SSID;
654 }
655
656 Value *getPointerOperand() { return getOperand(0); }
657 const Value *getPointerOperand() const { return getOperand(0); }
658 static unsigned getPointerOperandIndex() { return 0U; }
659
660 Value *getCompareOperand() { return getOperand(1); }
661 const Value *getCompareOperand() const { return getOperand(1); }
662
663 Value *getNewValOperand() { return getOperand(2); }
664 const Value *getNewValOperand() const { return getOperand(2); }
665
666 /// Returns the address space of the pointer operand.
667 unsigned getPointerAddressSpace() const {
668 return getPointerOperand()->getType()->getPointerAddressSpace();
669 }
670
671 /// Returns the strongest permitted ordering on failure, given the
672 /// desired ordering on success.
673 ///
674 /// If the comparison in a cmpxchg operation fails, there is no atomic store
675 /// so release semantics cannot be provided. So this function drops explicit
676 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
677 /// operation would remain SequentiallyConsistent.
678 static AtomicOrdering
679 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
680 switch (SuccessOrdering) {
681 default:
682 llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering"
, "llvm/include/llvm/IR/Instructions.h", 682)
;
683 case AtomicOrdering::Release:
684 case AtomicOrdering::Monotonic:
685 return AtomicOrdering::Monotonic;
686 case AtomicOrdering::AcquireRelease:
687 case AtomicOrdering::Acquire:
688 return AtomicOrdering::Acquire;
689 case AtomicOrdering::SequentiallyConsistent:
690 return AtomicOrdering::SequentiallyConsistent;
691 }
692 }
693
694 // Methods for support type inquiry through isa, cast, and dyn_cast:
695 static bool classof(const Instruction *I) {
696 return I->getOpcode() == Instruction::AtomicCmpXchg;
697 }
698 static bool classof(const Value *V) {
699 return isa<Instruction>(V) && classof(cast<Instruction>(V));
700 }
701
702private:
703 // Shadow Instruction::setInstructionSubclassData with a private forwarding
704 // method so that subclasses cannot accidentally use it.
705 template <typename Bitfield>
706 void setSubclassData(typename Bitfield::Type Value) {
707 Instruction::setSubclassData<Bitfield>(Value);
708 }
709
710 /// The synchronization scope ID of this cmpxchg instruction. Not quite
711 /// enough room in SubClassData for everything, so synchronization scope ID
712 /// gets its own field.
713 SyncScope::ID SSID;
714};
715
716template <>
717struct OperandTraits<AtomicCmpXchgInst> :
718 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
719};
720
721DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 721, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<AtomicCmpXchgInst
>::op_begin(const_cast<AtomicCmpXchgInst*>(this))[i_nocapture
].get()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 721, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<AtomicCmpXchgInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned AtomicCmpXchgInst::getNumOperands
() const { return OperandTraits<AtomicCmpXchgInst>::operands
(this); } template <int Idx_nocapture> Use &AtomicCmpXchgInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &AtomicCmpXchgInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
722
723//===----------------------------------------------------------------------===//
724// AtomicRMWInst Class
725//===----------------------------------------------------------------------===//
726
727/// an instruction that atomically reads a memory location,
728/// combines it with another value, and then stores the result back. Returns
729/// the old value.
730///
731class AtomicRMWInst : public Instruction {
732protected:
733 // Note: Instruction needs to be a friend here to call cloneImpl.
734 friend class Instruction;
735
736 AtomicRMWInst *cloneImpl() const;
737
738public:
739 /// This enumeration lists the possible modifications atomicrmw can make. In
740 /// the descriptions, 'p' is the pointer to the instruction's memory location,
741 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
742 /// instruction. These instructions always return 'old'.
743 enum BinOp : unsigned {
744 /// *p = v
745 Xchg,
746 /// *p = old + v
747 Add,
748 /// *p = old - v
749 Sub,
750 /// *p = old & v
751 And,
752 /// *p = ~(old & v)
753 Nand,
754 /// *p = old | v
755 Or,
756 /// *p = old ^ v
757 Xor,
758 /// *p = old >signed v ? old : v
759 Max,
760 /// *p = old <signed v ? old : v
761 Min,
762 /// *p = old >unsigned v ? old : v
763 UMax,
764 /// *p = old <unsigned v ? old : v
765 UMin,
766
767 /// *p = old + v
768 FAdd,
769
770 /// *p = old - v
771 FSub,
772
773 FIRST_BINOP = Xchg,
774 LAST_BINOP = FSub,
775 BAD_BINOP
776 };
777
778private:
779 template <unsigned Offset>
780 using AtomicOrderingBitfieldElement =
781 typename Bitfield::Element<AtomicOrdering, Offset, 3,
782 AtomicOrdering::LAST>;
783
784 template <unsigned Offset>
785 using BinOpBitfieldElement =
786 typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>;
787
788public:
789 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
790 AtomicOrdering Ordering, SyncScope::ID SSID,
791 Instruction *InsertBefore = nullptr);
792 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
793 AtomicOrdering Ordering, SyncScope::ID SSID,
794 BasicBlock *InsertAtEnd);
795
796 // allocate space for exactly two operands
797 void *operator new(size_t S) { return User::operator new(S, 2); }
798 void operator delete(void *Ptr) { User::operator delete(Ptr); }
799
800 using VolatileField = BoolBitfieldElementT<0>;
801 using AtomicOrderingField =
802 AtomicOrderingBitfieldElementT<VolatileField::NextBit>;
803 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
804 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>;
805 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField,
806 OperationField, AlignmentField>(),
807 "Bitfields must be contiguous");
808
809 BinOp getOperation() const { return getSubclassData<OperationField>(); }
810
811 static StringRef getOperationName(BinOp Op);
812
813 static bool isFPOperation(BinOp Op) {
814 switch (Op) {
815 case AtomicRMWInst::FAdd:
816 case AtomicRMWInst::FSub:
817 return true;
818 default:
819 return false;
820 }
821 }
822
823 void setOperation(BinOp Operation) {
824 setSubclassData<OperationField>(Operation);
825 }
826
827 /// Return the alignment of the memory that is being allocated by the
828 /// instruction.
829 Align getAlign() const {
830 return Align(1ULL << getSubclassData<AlignmentField>());
831 }
832
833 void setAlignment(Align Align) {
834 setSubclassData<AlignmentField>(Log2(Align));
835 }
836
837 /// Return true if this is a RMW on a volatile memory location.
838 ///
839 bool isVolatile() const { return getSubclassData<VolatileField>(); }
840
841 /// Specify whether this is a volatile RMW or not.
842 ///
843 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
844
845 /// Transparently provide more efficient getOperand methods.
846 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
847
848 /// Returns the ordering constraint of this rmw instruction.
849 AtomicOrdering getOrdering() const {
850 return getSubclassData<AtomicOrderingField>();
851 }
852
853 /// Sets the ordering constraint of this rmw instruction.
854 void setOrdering(AtomicOrdering Ordering) {
855 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "llvm/include/llvm/IR/Instructions.h", 856, __extension__ __PRETTY_FUNCTION__
))
856 "atomicrmw instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "llvm/include/llvm/IR/Instructions.h", 856, __extension__ __PRETTY_FUNCTION__
))
;
857 setSubclassData<AtomicOrderingField>(Ordering);
858 }
859
860 /// Returns the synchronization scope ID of this rmw instruction.
861 SyncScope::ID getSyncScopeID() const {
862 return SSID;
863 }
864
865 /// Sets the synchronization scope ID of this rmw instruction.
866 void setSyncScopeID(SyncScope::ID SSID) {
867 this->SSID = SSID;
868 }
869
870 Value *getPointerOperand() { return getOperand(0); }
871 const Value *getPointerOperand() const { return getOperand(0); }
872 static unsigned getPointerOperandIndex() { return 0U; }
873
874 Value *getValOperand() { return getOperand(1); }
875 const Value *getValOperand() const { return getOperand(1); }
876
877 /// Returns the address space of the pointer operand.
878 unsigned getPointerAddressSpace() const {
879 return getPointerOperand()->getType()->getPointerAddressSpace();
880 }
881
882 bool isFloatingPointOperation() const {
883 return isFPOperation(getOperation());
884 }
885
886 // Methods for support type inquiry through isa, cast, and dyn_cast:
887 static bool classof(const Instruction *I) {
888 return I->getOpcode() == Instruction::AtomicRMW;
889 }
890 static bool classof(const Value *V) {
891 return isa<Instruction>(V) && classof(cast<Instruction>(V));
892 }
893
894private:
895 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
896 AtomicOrdering Ordering, SyncScope::ID SSID);
897
898 // Shadow Instruction::setInstructionSubclassData with a private forwarding
899 // method so that subclasses cannot accidentally use it.
900 template <typename Bitfield>
901 void setSubclassData(typename Bitfield::Type Value) {
902 Instruction::setSubclassData<Bitfield>(Value);
903 }
904
905 /// The synchronization scope ID of this rmw instruction. Not quite enough
906 /// room in SubClassData for everything, so synchronization scope ID gets its
907 /// own field.
908 SyncScope::ID SSID;
909};
910
911template <>
912struct OperandTraits<AtomicRMWInst>
913 : public FixedNumOperandTraits<AtomicRMWInst,2> {
914};
915
916DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 916, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<AtomicRMWInst
>::op_begin(const_cast<AtomicRMWInst*>(this))[i_nocapture
].get()); } void AtomicRMWInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 916, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<AtomicRMWInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned AtomicRMWInst::getNumOperands()
const { return OperandTraits<AtomicRMWInst>::operands(
this); } template <int Idx_nocapture> Use &AtomicRMWInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &AtomicRMWInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
917
918//===----------------------------------------------------------------------===//
919// GetElementPtrInst Class
920//===----------------------------------------------------------------------===//
921
922// checkGEPType - Simple wrapper function to give a better assertion failure
923// message on bad indexes for a gep instruction.
924//
925inline Type *checkGEPType(Type *Ty) {
926 assert(Ty && "Invalid GetElementPtrInst indices for type!")(static_cast <bool> (Ty && "Invalid GetElementPtrInst indices for type!"
) ? void (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\""
, "llvm/include/llvm/IR/Instructions.h", 926, __extension__ __PRETTY_FUNCTION__
))
;
927 return Ty;
928}
929
930/// an instruction for type-safe pointer arithmetic to
931/// access elements of arrays and structs
932///
933class GetElementPtrInst : public Instruction {
934 Type *SourceElementType;
935 Type *ResultElementType;
936
937 GetElementPtrInst(const GetElementPtrInst &GEPI);
938
939 /// Constructors - Create a getelementptr instruction with a base pointer an
940 /// list of indices. The first ctor can optionally insert before an existing
941 /// instruction, the second appends the new instruction to the specified
942 /// BasicBlock.
943 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
944 ArrayRef<Value *> IdxList, unsigned Values,
945 const Twine &NameStr, Instruction *InsertBefore);
946 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
947 ArrayRef<Value *> IdxList, unsigned Values,
948 const Twine &NameStr, BasicBlock *InsertAtEnd);
949
950 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
951
952protected:
953 // Note: Instruction needs to be a friend here to call cloneImpl.
954 friend class Instruction;
955
956 GetElementPtrInst *cloneImpl() const;
957
958public:
959 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
960 ArrayRef<Value *> IdxList,
961 const Twine &NameStr = "",
962 Instruction *InsertBefore = nullptr) {
963 unsigned Values = 1 + unsigned(IdxList.size());
964 assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type"
) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\""
, "llvm/include/llvm/IR/Instructions.h", 964, __extension__ __PRETTY_FUNCTION__
))
;
965 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 966, __extension__ __PRETTY_FUNCTION__
))
966 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 966, __extension__ __PRETTY_FUNCTION__
))
;
967 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
968 NameStr, InsertBefore);
969 }
970
971 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
972 ArrayRef<Value *> IdxList,
973 const Twine &NameStr,
974 BasicBlock *InsertAtEnd) {
975 unsigned Values = 1 + unsigned(IdxList.size());
976 assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type"
) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\""
, "llvm/include/llvm/IR/Instructions.h", 976, __extension__ __PRETTY_FUNCTION__
))
;
977 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 978, __extension__ __PRETTY_FUNCTION__
))
978 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 978, __extension__ __PRETTY_FUNCTION__
))
;
979 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
980 NameStr, InsertAtEnd);
981 }
982
983 /// Create an "inbounds" getelementptr. See the documentation for the
984 /// "inbounds" flag in LangRef.html for details.
985 static GetElementPtrInst *
986 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
987 const Twine &NameStr = "",
988 Instruction *InsertBefore = nullptr) {
989 GetElementPtrInst *GEP =
990 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
991 GEP->setIsInBounds(true);
992 return GEP;
993 }
994
995 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
996 ArrayRef<Value *> IdxList,
997 const Twine &NameStr,
998 BasicBlock *InsertAtEnd) {
999 GetElementPtrInst *GEP =
1000 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
1001 GEP->setIsInBounds(true);
1002 return GEP;
1003 }
1004
1005 /// Transparently provide more efficient getOperand methods.
1006 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1007
1008 Type *getSourceElementType() const { return SourceElementType; }
1009
1010 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1011 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1012
1013 Type *getResultElementType() const {
1014 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1015, __extension__ __PRETTY_FUNCTION__
))
1015 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1015, __extension__ __PRETTY_FUNCTION__
))
;
1016 return ResultElementType;
1017 }
1018
1019 /// Returns the address space of this instruction's pointer type.
1020 unsigned getAddressSpace() const {
1021 // Note that this is always the same as the pointer operand's address space
1022 // and that is cheaper to compute, so cheat here.
1023 return getPointerAddressSpace();
1024 }
1025
1026 /// Returns the result type of a getelementptr with the given source
1027 /// element type and indexes.
1028 ///
1029 /// Null is returned if the indices are invalid for the specified
1030 /// source element type.
1031 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1032 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1033 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1034
1035 /// Return the type of the element at the given index of an indexable
1036 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1037 ///
1038 /// Returns null if the type can't be indexed, or the given index is not
1039 /// legal for the given type.
1040 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1041 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1042
1043 inline op_iterator idx_begin() { return op_begin()+1; }
1044 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1045 inline op_iterator idx_end() { return op_end(); }
1046 inline const_op_iterator idx_end() const { return op_end(); }
1047
1048 inline iterator_range<op_iterator> indices() {
1049 return make_range(idx_begin(), idx_end());
1050 }
1051
1052 inline iterator_range<const_op_iterator> indices() const {
1053 return make_range(idx_begin(), idx_end());
1054 }
1055
1056 Value *getPointerOperand() {
1057 return getOperand(0);
1058 }
1059 const Value *getPointerOperand() const {
1060 return getOperand(0);
1061 }
1062 static unsigned getPointerOperandIndex() {
1063 return 0U; // get index for modifying correct operand.
1064 }
1065
1066 /// Method to return the pointer operand as a
1067 /// PointerType.
1068 Type *getPointerOperandType() const {
1069 return getPointerOperand()->getType();
1070 }
1071
1072 /// Returns the address space of the pointer operand.
1073 unsigned getPointerAddressSpace() const {
1074 return getPointerOperandType()->getPointerAddressSpace();
1075 }
1076
1077 /// Returns the pointer type returned by the GEP
1078 /// instruction, which may be a vector of pointers.
1079 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1080 ArrayRef<Value *> IdxList) {
1081 PointerType *OrigPtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1082 unsigned AddrSpace = OrigPtrTy->getAddressSpace();
1083 Type *ResultElemTy = checkGEPType(getIndexedType(ElTy, IdxList));
1084 Type *PtrTy = OrigPtrTy->isOpaque()
1085 ? PointerType::get(OrigPtrTy->getContext(), AddrSpace)
1086 : PointerType::get(ResultElemTy, AddrSpace);
1087 // Vector GEP
1088 if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
1089 ElementCount EltCount = PtrVTy->getElementCount();
1090 return VectorType::get(PtrTy, EltCount);
1091 }
1092 for (Value *Index : IdxList)
1093 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1094 ElementCount EltCount = IndexVTy->getElementCount();
1095 return VectorType::get(PtrTy, EltCount);
1096 }
1097 // Scalar GEP
1098 return PtrTy;
1099 }
1100
1101 unsigned getNumIndices() const { // Note: always non-negative
1102 return getNumOperands() - 1;
1103 }
1104
1105 bool hasIndices() const {
1106 return getNumOperands() > 1;
1107 }
1108
1109 /// Return true if all of the indices of this GEP are
1110 /// zeros. If so, the result pointer and the first operand have the same
1111 /// value, just potentially different types.
1112 bool hasAllZeroIndices() const;
1113
1114 /// Return true if all of the indices of this GEP are
1115 /// constant integers. If so, the result pointer and the first operand have
1116 /// a constant offset between them.
1117 bool hasAllConstantIndices() const;
1118
1119 /// Set or clear the inbounds flag on this GEP instruction.
1120 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1121 void setIsInBounds(bool b = true);
1122
1123 /// Determine whether the GEP has the inbounds flag.
1124 bool isInBounds() const;
1125
1126 /// Accumulate the constant address offset of this GEP if possible.
1127 ///
1128 /// This routine accepts an APInt into which it will accumulate the constant
1129 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1130 /// all-constant, it returns false and the value of the offset APInt is
1131 /// undefined (it is *not* preserved!). The APInt passed into this routine
1132 /// must be at least as wide as the IntPtr type for the address space of
1133 /// the base GEP pointer.
1134 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1135 bool collectOffset(const DataLayout &DL, unsigned BitWidth,
1136 MapVector<Value *, APInt> &VariableOffsets,
1137 APInt &ConstantOffset) const;
1138 // Methods for support type inquiry through isa, cast, and dyn_cast:
1139 static bool classof(const Instruction *I) {
1140 return (I->getOpcode() == Instruction::GetElementPtr);
1141 }
1142 static bool classof(const Value *V) {
1143 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1144 }
1145};
1146
1147template <>
1148struct OperandTraits<GetElementPtrInst> :
1149 public VariadicOperandTraits<GetElementPtrInst, 1> {
1150};
1151
1152GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1153 ArrayRef<Value *> IdxList, unsigned Values,
1154 const Twine &NameStr,
1155 Instruction *InsertBefore)
1156 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1157 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1158 Values, InsertBefore),
1159 SourceElementType(PointeeType),
1160 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1161 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1162, __extension__ __PRETTY_FUNCTION__
))
1162 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1162, __extension__ __PRETTY_FUNCTION__
))
;
1163 init(Ptr, IdxList, NameStr);
1164}
1165
1166GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1167 ArrayRef<Value *> IdxList, unsigned Values,
1168 const Twine &NameStr,
1169 BasicBlock *InsertAtEnd)
1170 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1171 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1172 Values, InsertAtEnd),
1173 SourceElementType(PointeeType),
1174 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1175 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1176, __extension__ __PRETTY_FUNCTION__
))
1176 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1176, __extension__ __PRETTY_FUNCTION__
))
;
1177 init(Ptr, IdxList, NameStr);
1178}
1179
1180DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<GetElementPtrInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1180, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<GetElementPtrInst
>::op_begin(const_cast<GetElementPtrInst*>(this))[i_nocapture
].get()); } void GetElementPtrInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<GetElementPtrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1180, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<GetElementPtrInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned GetElementPtrInst::getNumOperands
() const { return OperandTraits<GetElementPtrInst>::operands
(this); } template <int Idx_nocapture> Use &GetElementPtrInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &GetElementPtrInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1181
1182//===----------------------------------------------------------------------===//
1183// ICmpInst Class
1184//===----------------------------------------------------------------------===//
1185
1186/// This instruction compares its operands according to the predicate given
1187/// to the constructor. It only operates on integers or pointers. The operands
1188/// must be identical types.
1189/// Represent an integer comparison operator.
1190class ICmpInst: public CmpInst {
1191 void AssertOK() {
1192 assert(isIntPredicate() &&(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "llvm/include/llvm/IR/Instructions.h", 1193, __extension__ __PRETTY_FUNCTION__
))
1193 "Invalid ICmp predicate value")(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "llvm/include/llvm/IR/Instructions.h", 1193, __extension__ __PRETTY_FUNCTION__
))
;
1194 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1195, __extension__ __PRETTY_FUNCTION__
))
1195 "Both operands to ICmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1195, __extension__ __PRETTY_FUNCTION__
))
;
1196 // Check that the operands are the right type
1197 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1199, __extension__ __PRETTY_FUNCTION__
))
1198 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1199, __extension__ __PRETTY_FUNCTION__
))
1199 "Invalid operand types for ICmp instruction")(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1199, __extension__ __PRETTY_FUNCTION__
))
;
1200 }
1201
1202protected:
1203 // Note: Instruction needs to be a friend here to call cloneImpl.
1204 friend class Instruction;
1205
1206 /// Clone an identical ICmpInst
1207 ICmpInst *cloneImpl() const;
1208
1209public:
1210 /// Constructor with insert-before-instruction semantics.
1211 ICmpInst(
1212 Instruction *InsertBefore, ///< Where to insert
1213 Predicate pred, ///< The predicate to use for the comparison
1214 Value *LHS, ///< The left-hand-side of the expression
1215 Value *RHS, ///< The right-hand-side of the expression
1216 const Twine &NameStr = "" ///< Name of the instruction
1217 ) : CmpInst(makeCmpResultType(LHS->getType()),
1218 Instruction::ICmp, pred, LHS, RHS, NameStr,
1219 InsertBefore) {
1220#ifndef NDEBUG
1221 AssertOK();
1222#endif
1223 }
1224
1225 /// Constructor with insert-at-end semantics.
1226 ICmpInst(
1227 BasicBlock &InsertAtEnd, ///< Block to insert into.
1228 Predicate pred, ///< The predicate to use for the comparison
1229 Value *LHS, ///< The left-hand-side of the expression
1230 Value *RHS, ///< The right-hand-side of the expression
1231 const Twine &NameStr = "" ///< Name of the instruction
1232 ) : CmpInst(makeCmpResultType(LHS->getType()),
1233 Instruction::ICmp, pred, LHS, RHS, NameStr,
1234 &InsertAtEnd) {
1235#ifndef NDEBUG
1236 AssertOK();
1237#endif
1238 }
1239
1240 /// Constructor with no-insertion semantics
1241 ICmpInst(
1242 Predicate pred, ///< The predicate to use for the comparison
1243 Value *LHS, ///< The left-hand-side of the expression
1244 Value *RHS, ///< The right-hand-side of the expression
1245 const Twine &NameStr = "" ///< Name of the instruction
1246 ) : CmpInst(makeCmpResultType(LHS->getType()),
1247 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1248#ifndef NDEBUG
1249 AssertOK();
1250#endif
1251 }
1252
1253 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1254 /// @returns the predicate that would be the result if the operand were
1255 /// regarded as signed.
1256 /// Return the signed version of the predicate
1257 Predicate getSignedPredicate() const {
1258 return getSignedPredicate(getPredicate());
1259 }
1260
1261 /// This is a static version that you can use without an instruction.
1262 /// Return the signed version of the predicate.
1263 static Predicate getSignedPredicate(Predicate pred);
1264
1265 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1266 /// @returns the predicate that would be the result if the operand were
1267 /// regarded as unsigned.
1268 /// Return the unsigned version of the predicate
1269 Predicate getUnsignedPredicate() const {
1270 return getUnsignedPredicate(getPredicate());
1271 }
1272
1273 /// This is a static version that you can use without an instruction.
1274 /// Return the unsigned version of the predicate.
1275 static Predicate getUnsignedPredicate(Predicate pred);
1276
1277 /// Return true if this predicate is either EQ or NE. This also
1278 /// tests for commutativity.
1279 static bool isEquality(Predicate P) {
1280 return P == ICMP_EQ || P == ICMP_NE;
12
Assuming 'P' is not equal to ICMP_EQ
13
Assuming 'P' is equal to ICMP_NE
14
Returning the value 1, which participates in a condition later
1281 }
1282
1283 /// Return true if this predicate is either EQ or NE. This also
1284 /// tests for commutativity.
1285 bool isEquality() const {
1286 return isEquality(getPredicate());
11
Calling 'ICmpInst::isEquality'
15
Returning from 'ICmpInst::isEquality'
16
Returning the value 1, which participates in a condition later
1287 }
1288
1289 /// @returns true if the predicate of this ICmpInst is commutative
1290 /// Determine if this relation is commutative.
1291 bool isCommutative() const { return isEquality(); }
1292
1293 /// Return true if the predicate is relational (not EQ or NE).
1294 ///
1295 bool isRelational() const {
1296 return !isEquality();
1297 }
1298
1299 /// Return true if the predicate is relational (not EQ or NE).
1300 ///
1301 static bool isRelational(Predicate P) {
1302 return !isEquality(P);
1303 }
1304
1305 /// Return true if the predicate is SGT or UGT.
1306 ///
1307 static bool isGT(Predicate P) {
1308 return P == ICMP_SGT || P == ICMP_UGT;
1309 }
1310
1311 /// Return true if the predicate is SLT or ULT.
1312 ///
1313 static bool isLT(Predicate P) {
1314 return P == ICMP_SLT || P == ICMP_ULT;
1315 }
1316
1317 /// Return true if the predicate is SGE or UGE.
1318 ///
1319 static bool isGE(Predicate P) {
1320 return P == ICMP_SGE || P == ICMP_UGE;
1321 }
1322
1323 /// Return true if the predicate is SLE or ULE.
1324 ///
1325 static bool isLE(Predicate P) {
1326 return P == ICMP_SLE || P == ICMP_ULE;
1327 }
1328
1329 /// Returns the sequence of all ICmp predicates.
1330 ///
1331 static auto predicates() { return ICmpPredicates(); }
1332
1333 /// Exchange the two operands to this instruction in such a way that it does
1334 /// not modify the semantics of the instruction. The predicate value may be
1335 /// changed to retain the same result if the predicate is order dependent
1336 /// (e.g. ult).
1337 /// Swap operands and adjust predicate.
1338 void swapOperands() {
1339 setPredicate(getSwappedPredicate());
1340 Op<0>().swap(Op<1>());
1341 }
1342
1343 /// Return result of `LHS Pred RHS` comparison.
1344 static bool compare(const APInt &LHS, const APInt &RHS,
1345 ICmpInst::Predicate Pred);
1346
1347 // Methods for support type inquiry through isa, cast, and dyn_cast:
1348 static bool classof(const Instruction *I) {
1349 return I->getOpcode() == Instruction::ICmp;
1350 }
1351 static bool classof(const Value *V) {
1352 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1353 }
1354};
1355
1356//===----------------------------------------------------------------------===//
1357// FCmpInst Class
1358//===----------------------------------------------------------------------===//
1359
1360/// This instruction compares its operands according to the predicate given
1361/// to the constructor. It only operates on floating point values or packed
1362/// vectors of floating point values. The operands must be identical types.
1363/// Represents a floating point comparison operator.
1364class FCmpInst: public CmpInst {
1365 void AssertOK() {
1366 assert(isFPPredicate() && "Invalid FCmp predicate value")(static_cast <bool> (isFPPredicate() && "Invalid FCmp predicate value"
) ? void (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\""
, "llvm/include/llvm/IR/Instructions.h", 1366, __extension__ __PRETTY_FUNCTION__
))
;
1367 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1368, __extension__ __PRETTY_FUNCTION__
))
1368 "Both operands to FCmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1368, __extension__ __PRETTY_FUNCTION__
))
;
1369 // Check that the operands are the right type
1370 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1371, __extension__ __PRETTY_FUNCTION__
))
1371 "Invalid operand types for FCmp instruction")(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1371, __extension__ __PRETTY_FUNCTION__
))
;
1372 }
1373
1374protected:
1375 // Note: Instruction needs to be a friend here to call cloneImpl.
1376 friend class Instruction;
1377
1378 /// Clone an identical FCmpInst
1379 FCmpInst *cloneImpl() const;
1380
1381public:
1382 /// Constructor with insert-before-instruction semantics.
1383 FCmpInst(
1384 Instruction *InsertBefore, ///< Where to insert
1385 Predicate pred, ///< The predicate to use for the comparison
1386 Value *LHS, ///< The left-hand-side of the expression
1387 Value *RHS, ///< The right-hand-side of the expression
1388 const Twine &NameStr = "" ///< Name of the instruction
1389 ) : CmpInst(makeCmpResultType(LHS->getType()),
1390 Instruction::FCmp, pred, LHS, RHS, NameStr,
1391 InsertBefore) {
1392 AssertOK();
1393 }
1394
1395 /// Constructor with insert-at-end semantics.
1396 FCmpInst(
1397 BasicBlock &InsertAtEnd, ///< Block to insert into.
1398 Predicate pred, ///< The predicate to use for the comparison
1399 Value *LHS, ///< The left-hand-side of the expression
1400 Value *RHS, ///< The right-hand-side of the expression
1401 const Twine &NameStr = "" ///< Name of the instruction
1402 ) : CmpInst(makeCmpResultType(LHS->getType()),
1403 Instruction::FCmp, pred, LHS, RHS, NameStr,
1404 &InsertAtEnd) {
1405 AssertOK();
1406 }
1407
1408 /// Constructor with no-insertion semantics
1409 FCmpInst(
1410 Predicate Pred, ///< The predicate to use for the comparison
1411 Value *LHS, ///< The left-hand-side of the expression
1412 Value *RHS, ///< The right-hand-side of the expression
1413 const Twine &NameStr = "", ///< Name of the instruction
1414 Instruction *FlagsSource = nullptr
1415 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1416 RHS, NameStr, nullptr, FlagsSource) {
1417 AssertOK();
1418 }
1419
1420 /// @returns true if the predicate of this instruction is EQ or NE.
1421 /// Determine if this is an equality predicate.
1422 static bool isEquality(Predicate Pred) {
1423 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1424 Pred == FCMP_UNE;
1425 }
1426
1427 /// @returns true if the predicate of this instruction is EQ or NE.
1428 /// Determine if this is an equality predicate.
1429 bool isEquality() const { return isEquality(getPredicate()); }
1430
1431 /// @returns true if the predicate of this instruction is commutative.
1432 /// Determine if this is a commutative predicate.
1433 bool isCommutative() const {
1434 return isEquality() ||
1435 getPredicate() == FCMP_FALSE ||
1436 getPredicate() == FCMP_TRUE ||
1437 getPredicate() == FCMP_ORD ||
1438 getPredicate() == FCMP_UNO;
1439 }
1440
1441 /// @returns true if the predicate is relational (not EQ or NE).
1442 /// Determine if this a relational predicate.
1443 bool isRelational() const { return !isEquality(); }
1444
1445 /// Exchange the two operands to this instruction in such a way that it does
1446 /// not modify the semantics of the instruction. The predicate value may be
1447 /// changed to retain the same result if the predicate is order dependent
1448 /// (e.g. ult).
1449 /// Swap operands and adjust predicate.
1450 void swapOperands() {
1451 setPredicate(getSwappedPredicate());
1452 Op<0>().swap(Op<1>());
1453 }
1454
1455 /// Returns the sequence of all FCmp predicates.
1456 ///
1457 static auto predicates() { return FCmpPredicates(); }
1458
1459 /// Return result of `LHS Pred RHS` comparison.
1460 static bool compare(const APFloat &LHS, const APFloat &RHS,
1461 FCmpInst::Predicate Pred);
1462
1463 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1464 static bool classof(const Instruction *I) {
1465 return I->getOpcode() == Instruction::FCmp;
1466 }
1467 static bool classof(const Value *V) {
1468 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1469 }
1470};
1471
1472//===----------------------------------------------------------------------===//
1473/// This class represents a function call, abstracting a target
1474/// machine's calling convention. This class uses low bit of the SubClassData
1475/// field to indicate whether or not this is a tail call. The rest of the bits
1476/// hold the calling convention of the call.
1477///
1478class CallInst : public CallBase {
1479 CallInst(const CallInst &CI);
1480
1481 /// Construct a CallInst given a range of arguments.
1482 /// Construct a CallInst from a range of arguments
1483 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1484 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1485 Instruction *InsertBefore);
1486
1487 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1488 const Twine &NameStr, Instruction *InsertBefore)
1489 : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {}
1490
1491 /// Construct a CallInst given a range of arguments.
1492 /// Construct a CallInst from a range of arguments
1493 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1494 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1495 BasicBlock *InsertAtEnd);
1496
1497 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1498 Instruction *InsertBefore);
1499
1500 CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1501 BasicBlock *InsertAtEnd);
1502
1503 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1504 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1505 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1506
1507 /// Compute the number of operands to allocate.
1508 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1509 // We need one operand for the called function, plus the input operand
1510 // counts provided.
1511 return 1 + NumArgs + NumBundleInputs;
1512 }
1513
1514protected:
1515 // Note: Instruction needs to be a friend here to call cloneImpl.
1516 friend class Instruction;
1517
1518 CallInst *cloneImpl() const;
1519
1520public:
1521 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1522 Instruction *InsertBefore = nullptr) {
1523 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1524 }
1525
1526 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1527 const Twine &NameStr,
1528 Instruction *InsertBefore = nullptr) {
1529 return new (ComputeNumOperands(Args.size()))
1530 CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1531 }
1532
1533 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1534 ArrayRef<OperandBundleDef> Bundles = None,
1535 const Twine &NameStr = "",
1536 Instruction *InsertBefore = nullptr) {
1537 const int NumOperands =
1538 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1539 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1540
1541 return new (NumOperands, DescriptorBytes)
1542 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1543 }
1544
1545 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1546 BasicBlock *InsertAtEnd) {
1547 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1548 }
1549
1550 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1551 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1552 return new (ComputeNumOperands(Args.size()))
1553 CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd);
1554 }
1555
1556 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1557 ArrayRef<OperandBundleDef> Bundles,
1558 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1559 const int NumOperands =
1560 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1561 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1562
1563 return new (NumOperands, DescriptorBytes)
1564 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1565 }
1566
1567 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1568 Instruction *InsertBefore = nullptr) {
1569 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1570 InsertBefore);
1571 }
1572
1573 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1574 ArrayRef<OperandBundleDef> Bundles = None,
1575 const Twine &NameStr = "",
1576 Instruction *InsertBefore = nullptr) {
1577 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1578 NameStr, InsertBefore);
1579 }
1580
1581 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1582 const Twine &NameStr,
1583 Instruction *InsertBefore = nullptr) {
1584 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1585 InsertBefore);
1586 }
1587
1588 static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1589 BasicBlock *InsertAtEnd) {
1590 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1591 InsertAtEnd);
1592 }
1593
1594 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1595 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1596 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1597 InsertAtEnd);
1598 }
1599
1600 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1601 ArrayRef<OperandBundleDef> Bundles,
1602 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1603 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1604 NameStr, InsertAtEnd);
1605 }
1606
1607 /// Create a clone of \p CI with a different set of operand bundles and
1608 /// insert it before \p InsertPt.
1609 ///
1610 /// The returned call instruction is identical \p CI in every way except that
1611 /// the operand bundles for the new instruction are set to the operand bundles
1612 /// in \p Bundles.
1613 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1614 Instruction *InsertPt = nullptr);
1615
1616 /// Generate the IR for a call to malloc:
1617 /// 1. Compute the malloc call's argument as the specified type's size,
1618 /// possibly multiplied by the array size if the array size is not
1619 /// constant 1.
1620 /// 2. Call malloc with that argument.
1621 /// 3. Bitcast the result of the malloc call to the specified type.
1622 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1623 Type *AllocTy, Value *AllocSize,
1624 Value *ArraySize = nullptr,
1625 Function *MallocF = nullptr,
1626 const Twine &Name = "");
1627 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1628 Type *AllocTy, Value *AllocSize,
1629 Value *ArraySize = nullptr,
1630 Function *MallocF = nullptr,
1631 const Twine &Name = "");
1632 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1633 Type *AllocTy, Value *AllocSize,
1634 Value *ArraySize = nullptr,
1635 ArrayRef<OperandBundleDef> Bundles = None,
1636 Function *MallocF = nullptr,
1637 const Twine &Name = "");
1638 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1639 Type *AllocTy, Value *AllocSize,
1640 Value *ArraySize = nullptr,
1641 ArrayRef<OperandBundleDef> Bundles = None,
1642 Function *MallocF = nullptr,
1643 const Twine &Name = "");
1644 /// Generate the IR for a call to the builtin free function.
1645 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
1646 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
1647 static Instruction *CreateFree(Value *Source,
1648 ArrayRef<OperandBundleDef> Bundles,
1649 Instruction *InsertBefore);
1650 static Instruction *CreateFree(Value *Source,
1651 ArrayRef<OperandBundleDef> Bundles,
1652 BasicBlock *InsertAtEnd);
1653
1654 // Note that 'musttail' implies 'tail'.
1655 enum TailCallKind : unsigned {
1656 TCK_None = 0,
1657 TCK_Tail = 1,
1658 TCK_MustTail = 2,
1659 TCK_NoTail = 3,
1660 TCK_LAST = TCK_NoTail
1661 };
1662
1663 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>;
1664 static_assert(
1665 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1666 "Bitfields must be contiguous");
1667
1668 TailCallKind getTailCallKind() const {
1669 return getSubclassData<TailCallKindField>();
1670 }
1671
1672 bool isTailCall() const {
1673 TailCallKind Kind = getTailCallKind();
1674 return Kind == TCK_Tail || Kind == TCK_MustTail;
1675 }
1676
1677 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1678
1679 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1680
1681 void setTailCallKind(TailCallKind TCK) {
1682 setSubclassData<TailCallKindField>(TCK);
1683 }
1684
1685 void setTailCall(bool IsTc = true) {
1686 setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1687 }
1688
1689 /// Return true if the call can return twice
1690 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1691 void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); }
1692
1693 // Methods for support type inquiry through isa, cast, and dyn_cast:
1694 static bool classof(const Instruction *I) {
1695 return I->getOpcode() == Instruction::Call;
1696 }
1697 static bool classof(const Value *V) {
1698 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1699 }
1700
1701 /// Updates profile metadata by scaling it by \p S / \p T.
1702 void updateProfWeight(uint64_t S, uint64_t T);
1703
1704private:
1705 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1706 // method so that subclasses cannot accidentally use it.
1707 template <typename Bitfield>
1708 void setSubclassData(typename Bitfield::Type Value) {
1709 Instruction::setSubclassData<Bitfield>(Value);
1710 }
1711};
1712
1713CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1714 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1715 BasicBlock *InsertAtEnd)
1716 : CallBase(Ty->getReturnType(), Instruction::Call,
1717 OperandTraits<CallBase>::op_end(this) -
1718 (Args.size() + CountBundleInputs(Bundles) + 1),
1719 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1720 InsertAtEnd) {
1721 init(Ty, Func, Args, Bundles, NameStr);
1722}
1723
1724CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1725 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1726 Instruction *InsertBefore)
1727 : CallBase(Ty->getReturnType(), Instruction::Call,
1728 OperandTraits<CallBase>::op_end(this) -
1729 (Args.size() + CountBundleInputs(Bundles) + 1),
1730 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1731 InsertBefore) {
1732 init(Ty, Func, Args, Bundles, NameStr);
1733}
1734
1735//===----------------------------------------------------------------------===//
1736// SelectInst Class
1737//===----------------------------------------------------------------------===//
1738
1739/// This class represents the LLVM 'select' instruction.
1740///
1741class SelectInst : public Instruction {
1742 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1743 Instruction *InsertBefore)
1744 : Instruction(S1->getType(), Instruction::Select,
1745 &Op<0>(), 3, InsertBefore) {
1746 init(C, S1, S2);
1747 setName(NameStr);
1748 }
1749
1750 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1751 BasicBlock *InsertAtEnd)
1752 : Instruction(S1->getType(), Instruction::Select,
1753 &Op<0>(), 3, InsertAtEnd) {
1754 init(C, S1, S2);
1755 setName(NameStr);
1756 }
1757
1758 void init(Value *C, Value *S1, Value *S2) {
1759 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")(static_cast <bool> (!areInvalidOperands(C, S1, S2) &&
"Invalid operands for select") ? void (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\""
, "llvm/include/llvm/IR/Instructions.h", 1759, __extension__ __PRETTY_FUNCTION__
))
;
1760 Op<0>() = C;
1761 Op<1>() = S1;
1762 Op<2>() = S2;
1763 }
1764
1765protected:
1766 // Note: Instruction needs to be a friend here to call cloneImpl.
1767 friend class Instruction;
1768
1769 SelectInst *cloneImpl() const;
1770
1771public:
1772 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1773 const Twine &NameStr = "",
1774 Instruction *InsertBefore = nullptr,
1775 Instruction *MDFrom = nullptr) {
1776 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1777 if (MDFrom)
1778 Sel->copyMetadata(*MDFrom);
1779 return Sel;
1780 }
1781
1782 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1783 const Twine &NameStr,
1784 BasicBlock *InsertAtEnd) {
1785 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1786 }
1787
1788 const Value *getCondition() const { return Op<0>(); }
1789 const Value *getTrueValue() const { return Op<1>(); }
1790 const Value *getFalseValue() const { return Op<2>(); }
1791 Value *getCondition() { return Op<0>(); }
1792 Value *getTrueValue() { return Op<1>(); }
1793 Value *getFalseValue() { return Op<2>(); }
1794
1795 void setCondition(Value *V) { Op<0>() = V; }
1796 void setTrueValue(Value *V) { Op<1>() = V; }
1797 void setFalseValue(Value *V) { Op<2>() = V; }
1798
1799 /// Swap the true and false values of the select instruction.
1800 /// This doesn't swap prof metadata.
1801 void swapValues() { Op<1>().swap(Op<2>()); }
1802
1803 /// Return a string if the specified operands are invalid
1804 /// for a select operation, otherwise return null.
1805 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1806
1807 /// Transparently provide more efficient getOperand methods.
1808 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1809
1810 OtherOps getOpcode() const {
1811 return static_cast<OtherOps>(Instruction::getOpcode());
1812 }
1813
1814 // Methods for support type inquiry through isa, cast, and dyn_cast:
1815 static bool classof(const Instruction *I) {
1816 return I->getOpcode() == Instruction::Select;
1817 }
1818 static bool classof(const Value *V) {
1819 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1820 }
1821};
1822
1823template <>
1824struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1825};
1826
1827DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits
<SelectInst>::op_begin(this); } SelectInst::const_op_iterator
SelectInst::op_begin() const { return OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this)); } SelectInst
::op_iterator SelectInst::op_end() { return OperandTraits<
SelectInst>::op_end(this); } SelectInst::const_op_iterator
SelectInst::op_end() const { return OperandTraits<SelectInst
>::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SelectInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1827, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this))[i_nocapture
].get()); } void SelectInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<SelectInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1827, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<SelectInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned SelectInst::getNumOperands() const
{ return OperandTraits<SelectInst>::operands(this); } template
<int Idx_nocapture> Use &SelectInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &SelectInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
1828
1829//===----------------------------------------------------------------------===//
1830// VAArgInst Class
1831//===----------------------------------------------------------------------===//
1832
1833/// This class represents the va_arg llvm instruction, which returns
1834/// an argument of the specified type given a va_list and increments that list
1835///
1836class VAArgInst : public UnaryInstruction {
1837protected:
1838 // Note: Instruction needs to be a friend here to call cloneImpl.
1839 friend class Instruction;
1840
1841 VAArgInst *cloneImpl() const;
1842
1843public:
1844 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1845 Instruction *InsertBefore = nullptr)
1846 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1847 setName(NameStr);
1848 }
1849
1850 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1851 BasicBlock *InsertAtEnd)
1852 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1853 setName(NameStr);
1854 }
1855
1856 Value *getPointerOperand() { return getOperand(0); }
1857 const Value *getPointerOperand() const { return getOperand(0); }
1858 static unsigned getPointerOperandIndex() { return 0U; }
1859
1860 // Methods for support type inquiry through isa, cast, and dyn_cast:
1861 static bool classof(const Instruction *I) {
1862 return I->getOpcode() == VAArg;
1863 }
1864 static bool classof(const Value *V) {
1865 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1866 }
1867};
1868
1869//===----------------------------------------------------------------------===//
1870// ExtractElementInst Class
1871//===----------------------------------------------------------------------===//
1872
1873/// This instruction extracts a single (scalar)
1874/// element from a VectorType value
1875///
1876class ExtractElementInst : public Instruction {
1877 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1878 Instruction *InsertBefore = nullptr);
1879 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1880 BasicBlock *InsertAtEnd);
1881
1882protected:
1883 // Note: Instruction needs to be a friend here to call cloneImpl.
1884 friend class Instruction;
1885
1886 ExtractElementInst *cloneImpl() const;
1887
1888public:
1889 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1890 const Twine &NameStr = "",
1891 Instruction *InsertBefore = nullptr) {
1892 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1893 }
1894
1895 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1896 const Twine &NameStr,
1897 BasicBlock *InsertAtEnd) {
1898 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1899 }
1900
1901 /// Return true if an extractelement instruction can be
1902 /// formed with the specified operands.
1903 static bool isValidOperands(const Value *Vec, const Value *Idx);
1904
1905 Value *getVectorOperand() { return Op<0>(); }
1906 Value *getIndexOperand() { return Op<1>(); }
1907 const Value *getVectorOperand() const { return Op<0>(); }
1908 const Value *getIndexOperand() const { return Op<1>(); }
1909
1910 VectorType *getVectorOperandType() const {
1911 return cast<VectorType>(getVectorOperand()->getType());
1912 }
1913
1914 /// Transparently provide more efficient getOperand methods.
1915 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1916
1917 // Methods for support type inquiry through isa, cast, and dyn_cast:
1918 static bool classof(const Instruction *I) {
1919 return I->getOpcode() == Instruction::ExtractElement;
1920 }
1921 static bool classof(const Value *V) {
1922 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1923 }
1924};
1925
1926template <>
1927struct OperandTraits<ExtractElementInst> :
1928 public FixedNumOperandTraits<ExtractElementInst, 2> {
1929};
1930
1931DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin(
) { return OperandTraits<ExtractElementInst>::op_begin(
this); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_begin() const { return OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this)); }
ExtractElementInst::op_iterator ExtractElementInst::op_end()
{ return OperandTraits<ExtractElementInst>::op_end(this
); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_end() const { return OperandTraits<ExtractElementInst
>::op_end(const_cast<ExtractElementInst*>(this)); } Value
*ExtractElementInst::getOperand(unsigned i_nocapture) const {
(static_cast <bool> (i_nocapture < OperandTraits<
ExtractElementInst>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1931, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this))[i_nocapture
].get()); } void ExtractElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ExtractElementInst>::operands(this)
&& "setOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1931, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ExtractElementInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ExtractElementInst::getNumOperands
() const { return OperandTraits<ExtractElementInst>::operands
(this); } template <int Idx_nocapture> Use &ExtractElementInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &ExtractElementInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1932
1933//===----------------------------------------------------------------------===//
1934// InsertElementInst Class
1935//===----------------------------------------------------------------------===//
1936
1937/// This instruction inserts a single (scalar)
1938/// element into a VectorType value
1939///
1940class InsertElementInst : public Instruction {
1941 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1942 const Twine &NameStr = "",
1943 Instruction *InsertBefore = nullptr);
1944 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1945 BasicBlock *InsertAtEnd);
1946
1947protected:
1948 // Note: Instruction needs to be a friend here to call cloneImpl.
1949 friend class Instruction;
1950
1951 InsertElementInst *cloneImpl() const;
1952
1953public:
1954 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1955 const Twine &NameStr = "",
1956 Instruction *InsertBefore = nullptr) {
1957 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1958 }
1959
1960 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1961 const Twine &NameStr,
1962 BasicBlock *InsertAtEnd) {
1963 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1964 }
1965
1966 /// Return true if an insertelement instruction can be
1967 /// formed with the specified operands.
1968 static bool isValidOperands(const Value *Vec, const Value *NewElt,
1969 const Value *Idx);
1970
1971 /// Overload to return most specific vector type.
1972 ///
1973 VectorType *getType() const {
1974 return cast<VectorType>(Instruction::getType());
1975 }
1976
1977 /// Transparently provide more efficient getOperand methods.
1978 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1979
1980 // Methods for support type inquiry through isa, cast, and dyn_cast:
1981 static bool classof(const Instruction *I) {
1982 return I->getOpcode() == Instruction::InsertElement;
1983 }
1984 static bool classof(const Value *V) {
1985 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1986 }
1987};
1988
1989template <>
1990struct OperandTraits<InsertElementInst> :
1991 public FixedNumOperandTraits<InsertElementInst, 3> {
1992};
1993
1994DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() {
return OperandTraits<InsertElementInst>::op_begin(this
); } InsertElementInst::const_op_iterator InsertElementInst::
op_begin() const { return OperandTraits<InsertElementInst>
::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst
::op_iterator InsertElementInst::op_end() { return OperandTraits
<InsertElementInst>::op_end(this); } InsertElementInst::
const_op_iterator InsertElementInst::op_end() const { return OperandTraits
<InsertElementInst>::op_end(const_cast<InsertElementInst
*>(this)); } Value *InsertElementInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<InsertElementInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1994, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<InsertElementInst
>::op_begin(const_cast<InsertElementInst*>(this))[i_nocapture
].get()); } void InsertElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<InsertElementInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1994, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<InsertElementInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned InsertElementInst::getNumOperands
() const { return OperandTraits<InsertElementInst>::operands
(this); } template <int Idx_nocapture> Use &InsertElementInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &InsertElementInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1995
1996//===----------------------------------------------------------------------===//
1997// ShuffleVectorInst Class
1998//===----------------------------------------------------------------------===//
1999
2000constexpr int UndefMaskElem = -1;
2001
2002/// This instruction constructs a fixed permutation of two
2003/// input vectors.
2004///
2005/// For each element of the result vector, the shuffle mask selects an element
2006/// from one of the input vectors to copy to the result. Non-negative elements
2007/// in the mask represent an index into the concatenated pair of input vectors.
2008/// UndefMaskElem (-1) specifies that the result element is undefined.
2009///
2010/// For scalable vectors, all the elements of the mask must be 0 or -1. This
2011/// requirement may be relaxed in the future.
2012class ShuffleVectorInst : public Instruction {
2013 SmallVector<int, 4> ShuffleMask;
2014 Constant *ShuffleMaskForBitcode;
2015
2016protected:
2017 // Note: Instruction needs to be a friend here to call cloneImpl.
2018 friend class Instruction;
2019
2020 ShuffleVectorInst *cloneImpl() const;
2021
2022public:
2023 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "",
2024 Instruction *InsertBefore = nullptr);
2025 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr,
2026 BasicBlock *InsertAtEnd);
2027 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "",
2028 Instruction *InsertBefore = nullptr);
2029 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr,
2030 BasicBlock *InsertAtEnd);
2031 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2032 const Twine &NameStr = "",
2033 Instruction *InsertBefor = nullptr);
2034 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2035 const Twine &NameStr, BasicBlock *InsertAtEnd);
2036 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2037 const Twine &NameStr = "",
2038 Instruction *InsertBefor = nullptr);
2039 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2040 const Twine &NameStr, BasicBlock *InsertAtEnd);
2041
2042 void *operator new(size_t S) { return User::operator new(S, 2); }
2043 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
2044
2045 /// Swap the operands and adjust the mask to preserve the semantics
2046 /// of the instruction.
2047 void commute();
2048
2049 /// Return true if a shufflevector instruction can be
2050 /// formed with the specified operands.
2051 static bool isValidOperands(const Value *V1, const Value *V2,
2052 const Value *Mask);
2053 static bool isValidOperands(const Value *V1, const Value *V2,
2054 ArrayRef<int> Mask);
2055
2056 /// Overload to return most specific vector type.
2057 ///
2058 VectorType *getType() const {
2059 return cast<VectorType>(Instruction::getType());
2060 }
2061
2062 /// Transparently provide more efficient getOperand methods.
2063 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2064
2065 /// Return the shuffle mask value of this instruction for the given element
2066 /// index. Return UndefMaskElem if the element is undef.
2067 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2068
2069 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2070 /// elements of the mask are returned as UndefMaskElem.
2071 static void getShuffleMask(const Constant *Mask,
2072 SmallVectorImpl<int> &Result);
2073
2074 /// Return the mask for this instruction as a vector of integers. Undefined
2075 /// elements of the mask are returned as UndefMaskElem.
2076 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2077 Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2078 }
2079
2080 /// Return the mask for this instruction, for use in bitcode.
2081 ///
2082 /// TODO: This is temporary until we decide a new bitcode encoding for
2083 /// shufflevector.
2084 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2085
2086 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2087 Type *ResultTy);
2088
2089 void setShuffleMask(ArrayRef<int> Mask);
2090
2091 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2092
2093 /// Return true if this shuffle returns a vector with a different number of
2094 /// elements than its source vectors.
2095 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2096 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2097 bool changesLength() const {
2098 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2099 ->getElementCount()
2100 .getKnownMinValue();
2101 unsigned NumMaskElts = ShuffleMask.size();
2102 return NumSourceElts != NumMaskElts;
2103 }
2104
2105 /// Return true if this shuffle returns a vector with a greater number of
2106 /// elements than its source vectors.
2107 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2108 bool increasesLength() const {
2109 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2110 ->getElementCount()
2111 .getKnownMinValue();
2112 unsigned NumMaskElts = ShuffleMask.size();
2113 return NumSourceElts < NumMaskElts;
2114 }
2115
2116 /// Return true if this shuffle mask chooses elements from exactly one source
2117 /// vector.
2118 /// Example: <7,5,undef,7>
2119 /// This assumes that vector operands are the same length as the mask.
2120 static bool isSingleSourceMask(ArrayRef<int> Mask);
2121 static bool isSingleSourceMask(const Constant *Mask) {
2122 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2122, __extension__ __PRETTY_FUNCTION__
))
;
2123 SmallVector<int, 16> MaskAsInts;
2124 getShuffleMask(Mask, MaskAsInts);
2125 return isSingleSourceMask(MaskAsInts);
2126 }
2127
2128 /// Return true if this shuffle chooses elements from exactly one source
2129 /// vector without changing the length of that vector.
2130 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2131 /// TODO: Optionally allow length-changing shuffles.
2132 bool isSingleSource() const {
2133 return !changesLength() && isSingleSourceMask(ShuffleMask);
2134 }
2135
2136 /// Return true if this shuffle mask chooses elements from exactly one source
2137 /// vector without lane crossings. A shuffle using this mask is not
2138 /// necessarily a no-op because it may change the number of elements from its
2139 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2140 /// Example: <undef,undef,2,3>
2141 static bool isIdentityMask(ArrayRef<int> Mask);
2142 static bool isIdentityMask(const Constant *Mask) {
2143 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2143, __extension__ __PRETTY_FUNCTION__
))
;
2144 SmallVector<int, 16> MaskAsInts;
2145 getShuffleMask(Mask, MaskAsInts);
2146 return isIdentityMask(MaskAsInts);
2147 }
2148
2149 /// Return true if this shuffle chooses elements from exactly one source
2150 /// vector without lane crossings and does not change the number of elements
2151 /// from its input vectors.
2152 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2153 bool isIdentity() const {
2154 return !changesLength() && isIdentityMask(ShuffleMask);
2155 }
2156
2157 /// Return true if this shuffle lengthens exactly one source vector with
2158 /// undefs in the high elements.
2159 bool isIdentityWithPadding() const;
2160
2161 /// Return true if this shuffle extracts the first N elements of exactly one
2162 /// source vector.
2163 bool isIdentityWithExtract() const;
2164
2165 /// Return true if this shuffle concatenates its 2 source vectors. This
2166 /// returns false if either input is undefined. In that case, the shuffle is
2167 /// is better classified as an identity with padding operation.
2168 bool isConcat() const;
2169
2170 /// Return true if this shuffle mask chooses elements from its source vectors
2171 /// without lane crossings. A shuffle using this mask would be
2172 /// equivalent to a vector select with a constant condition operand.
2173 /// Example: <4,1,6,undef>
2174 /// This returns false if the mask does not choose from both input vectors.
2175 /// In that case, the shuffle is better classified as an identity shuffle.
2176 /// This assumes that vector operands are the same length as the mask
2177 /// (a length-changing shuffle can never be equivalent to a vector select).
2178 static bool isSelectMask(ArrayRef<int> Mask);
2179 static bool isSelectMask(const Constant *Mask) {
2180 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2180, __extension__ __PRETTY_FUNCTION__
))
;
2181 SmallVector<int, 16> MaskAsInts;
2182 getShuffleMask(Mask, MaskAsInts);
2183 return isSelectMask(MaskAsInts);
2184 }
2185
2186 /// Return true if this shuffle chooses elements from its source vectors
2187 /// without lane crossings and all operands have the same number of elements.
2188 /// In other words, this shuffle is equivalent to a vector select with a
2189 /// constant condition operand.
2190 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2191 /// This returns false if the mask does not choose from both input vectors.
2192 /// In that case, the shuffle is better classified as an identity shuffle.
2193 /// TODO: Optionally allow length-changing shuffles.
2194 bool isSelect() const {
2195 return !changesLength() && isSelectMask(ShuffleMask);
2196 }
2197
2198 /// Return true if this shuffle mask swaps the order of elements from exactly
2199 /// one source vector.
2200 /// Example: <7,6,undef,4>
2201 /// This assumes that vector operands are the same length as the mask.
2202 static bool isReverseMask(ArrayRef<int> Mask);
2203 static bool isReverseMask(const Constant *Mask) {
2204 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2204, __extension__ __PRETTY_FUNCTION__
))
;
2205 SmallVector<int, 16> MaskAsInts;
2206 getShuffleMask(Mask, MaskAsInts);
2207 return isReverseMask(MaskAsInts);
2208 }
2209
2210 /// Return true if this shuffle swaps the order of elements from exactly
2211 /// one source vector.
2212 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2213 /// TODO: Optionally allow length-changing shuffles.
2214 bool isReverse() const {
2215 return !changesLength() && isReverseMask(ShuffleMask);
2216 }
2217
2218 /// Return true if this shuffle mask chooses all elements with the same value
2219 /// as the first element of exactly one source vector.
2220 /// Example: <4,undef,undef,4>
2221 /// This assumes that vector operands are the same length as the mask.
2222 static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2223 static bool isZeroEltSplatMask(const Constant *Mask) {
2224 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2224, __extension__ __PRETTY_FUNCTION__
))
;
2225 SmallVector<int, 16> MaskAsInts;
2226 getShuffleMask(Mask, MaskAsInts);
2227 return isZeroEltSplatMask(MaskAsInts);
2228 }
2229
2230 /// Return true if all elements of this shuffle are the same value as the
2231 /// first element of exactly one source vector without changing the length
2232 /// of that vector.
2233 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2234 /// TODO: Optionally allow length-changing shuffles.
2235 /// TODO: Optionally allow splats from other elements.
2236 bool isZeroEltSplat() const {
2237 return !changesLength() && isZeroEltSplatMask(ShuffleMask);
2238 }
2239
2240 /// Return true if this shuffle mask is a transpose mask.
2241 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2242 /// even- or odd-numbered vector elements from two n-dimensional source
2243 /// vectors and write each result into consecutive elements of an
2244 /// n-dimensional destination vector. Two shuffles are necessary to complete
2245 /// the transpose, one for the even elements and another for the odd elements.
2246 /// This description closely follows how the TRN1 and TRN2 AArch64
2247 /// instructions operate.
2248 ///
2249 /// For example, a simple 2x2 matrix can be transposed with:
2250 ///
2251 /// ; Original matrix
2252 /// m0 = < a, b >
2253 /// m1 = < c, d >
2254 ///
2255 /// ; Transposed matrix
2256 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2257 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2258 ///
2259 /// For matrices having greater than n columns, the resulting nx2 transposed
2260 /// matrix is stored in two result vectors such that one vector contains
2261 /// interleaved elements from all the even-numbered rows and the other vector
2262 /// contains interleaved elements from all the odd-numbered rows. For example,
2263 /// a 2x4 matrix can be transposed with:
2264 ///
2265 /// ; Original matrix
2266 /// m0 = < a, b, c, d >
2267 /// m1 = < e, f, g, h >
2268 ///
2269 /// ; Transposed matrix
2270 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2271 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2272 static bool isTransposeMask(ArrayRef<int> Mask);
2273 static bool isTransposeMask(const Constant *Mask) {
2274 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2274, __extension__ __PRETTY_FUNCTION__
))
;
2275 SmallVector<int, 16> MaskAsInts;
2276 getShuffleMask(Mask, MaskAsInts);
2277 return isTransposeMask(MaskAsInts);
2278 }
2279
2280 /// Return true if this shuffle transposes the elements of its inputs without
2281 /// changing the length of the vectors. This operation may also be known as a
2282 /// merge or interleave. See the description for isTransposeMask() for the
2283 /// exact specification.
2284 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2285 bool isTranspose() const {
2286 return !changesLength() && isTransposeMask(ShuffleMask);
2287 }
2288
2289 /// Return true if this shuffle mask is an extract subvector mask.
2290 /// A valid extract subvector mask returns a smaller vector from a single
2291 /// source operand. The base extraction index is returned as well.
2292 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2293 int &Index);
2294 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2295 int &Index) {
2296 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2296, __extension__ __PRETTY_FUNCTION__
))
;
2297 // Not possible to express a shuffle mask for a scalable vector for this
2298 // case.
2299 if (isa<ScalableVectorType>(Mask->getType()))
2300 return false;
2301 SmallVector<int, 16> MaskAsInts;
2302 getShuffleMask(Mask, MaskAsInts);
2303 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2304 }
2305
2306 /// Return true if this shuffle mask is an extract subvector mask.
2307 bool isExtractSubvectorMask(int &Index) const {
2308 // Not possible to express a shuffle mask for a scalable vector for this
2309 // case.
2310 if (isa<ScalableVectorType>(getType()))
2311 return false;
2312
2313 int NumSrcElts =
2314 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2315 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2316 }
2317
2318 /// Return true if this shuffle mask is an insert subvector mask.
2319 /// A valid insert subvector mask inserts the lowest elements of a second
2320 /// source operand into an in-place first source operand operand.
2321 /// Both the sub vector width and the insertion index is returned.
2322 static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2323 int &NumSubElts, int &Index);
2324 static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts,
2325 int &NumSubElts, int &Index) {
2326 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2326, __extension__ __PRETTY_FUNCTION__
))
;
2327 // Not possible to express a shuffle mask for a scalable vector for this
2328 // case.
2329 if (isa<ScalableVectorType>(Mask->getType()))
2330 return false;
2331 SmallVector<int, 16> MaskAsInts;
2332 getShuffleMask(Mask, MaskAsInts);
2333 return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index);
2334 }
2335
2336 /// Return true if this shuffle mask is an insert subvector mask.
2337 bool isInsertSubvectorMask(int &NumSubElts, int &Index) const {
2338 // Not possible to express a shuffle mask for a scalable vector for this
2339 // case.
2340 if (isa<ScalableVectorType>(getType()))
2341 return false;
2342
2343 int NumSrcElts =
2344 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2345 return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index);
2346 }
2347
2348 /// Return true if this shuffle mask replicates each of the \p VF elements
2349 /// in a vector \p ReplicationFactor times.
2350 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
2351 /// <0,0,0,1,1,1,2,2,2,3,3,3>
2352 static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor,
2353 int &VF);
2354 static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor,
2355 int &VF) {
2356 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2356, __extension__ __PRETTY_FUNCTION__
))
;
2357 // Not possible to express a shuffle mask for a scalable vector for this
2358 // case.
2359 if (isa<ScalableVectorType>(Mask->getType()))
2360 return false;
2361 SmallVector<int, 16> MaskAsInts;
2362 getShuffleMask(Mask, MaskAsInts);
2363 return isReplicationMask(MaskAsInts, ReplicationFactor, VF);
2364 }
2365
2366 /// Return true if this shuffle mask is a replication mask.
2367 bool isReplicationMask(int &ReplicationFactor, int &VF) const;
2368
2369 /// Change values in a shuffle permute mask assuming the two vector operands
2370 /// of length InVecNumElts have swapped position.
2371 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2372 unsigned InVecNumElts) {
2373 for (int &Idx : Mask) {
2374 if (Idx == -1)
2375 continue;
2376 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2377 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "llvm/include/llvm/IR/Instructions.h", 2378, __extension__ __PRETTY_FUNCTION__
))
2378 "shufflevector mask index out of range")(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "llvm/include/llvm/IR/Instructions.h", 2378, __extension__ __PRETTY_FUNCTION__
))
;
2379 }
2380 }
2381
2382 // Methods for support type inquiry through isa, cast, and dyn_cast:
2383 static bool classof(const Instruction *I) {
2384 return I->getOpcode() == Instruction::ShuffleVector;
2385 }
2386 static bool classof(const Value *V) {
2387 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2388 }
2389};
2390
2391template <>
2392struct OperandTraits<ShuffleVectorInst>
2393 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2394
2395DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() {
return OperandTraits<ShuffleVectorInst>::op_begin(this
); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst::
op_begin() const { return OperandTraits<ShuffleVectorInst>
::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst
::op_iterator ShuffleVectorInst::op_end() { return OperandTraits
<ShuffleVectorInst>::op_end(this); } ShuffleVectorInst::
const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits
<ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst
*>(this)); } Value *ShuffleVectorInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<ShuffleVectorInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2395, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ShuffleVectorInst
>::op_begin(const_cast<ShuffleVectorInst*>(this))[i_nocapture
].get()); } void ShuffleVectorInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ShuffleVectorInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2395, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ShuffleVectorInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ShuffleVectorInst::getNumOperands
() const { return OperandTraits<ShuffleVectorInst>::operands
(this); } template <int Idx_nocapture> Use &ShuffleVectorInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &ShuffleVectorInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2396
2397//===----------------------------------------------------------------------===//
2398// ExtractValueInst Class
2399//===----------------------------------------------------------------------===//
2400
2401/// This instruction extracts a struct member or array
2402/// element value from an aggregate value.
2403///
2404class ExtractValueInst : public UnaryInstruction {
2405 SmallVector<unsigned, 4> Indices;
2406
2407 ExtractValueInst(const ExtractValueInst &EVI);
2408
2409 /// Constructors - Create a extractvalue instruction with a base aggregate
2410 /// value and a list of indices. The first ctor can optionally insert before
2411 /// an existing instruction, the second appends the new instruction to the
2412 /// specified BasicBlock.
2413 inline ExtractValueInst(Value *Agg,
2414 ArrayRef<unsigned> Idxs,
2415 const Twine &NameStr,
2416 Instruction *InsertBefore);
2417 inline ExtractValueInst(Value *Agg,
2418 ArrayRef<unsigned> Idxs,
2419 const Twine &NameStr, BasicBlock *InsertAtEnd);
2420
2421 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2422
2423protected:
2424 // Note: Instruction needs to be a friend here to call cloneImpl.
2425 friend class Instruction;
2426
2427 ExtractValueInst *cloneImpl() const;
2428
2429public:
2430 static ExtractValueInst *Create(Value *Agg,
2431 ArrayRef<unsigned> Idxs,
2432 const Twine &NameStr = "",
2433 Instruction *InsertBefore = nullptr) {
2434 return new
2435 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2436 }
2437
2438 static ExtractValueInst *Create(Value *Agg,
2439 ArrayRef<unsigned> Idxs,
2440 const Twine &NameStr,
2441 BasicBlock *InsertAtEnd) {
2442 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2443 }
2444
2445 /// Returns the type of the element that would be extracted
2446 /// with an extractvalue instruction with the specified parameters.
2447 ///
2448 /// Null is returned if the indices are invalid for the specified type.
2449 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2450
2451 using idx_iterator = const unsigned*;
2452
2453 inline idx_iterator idx_begin() const { return Indices.begin(); }
2454 inline idx_iterator idx_end() const { return Indices.end(); }
2455 inline iterator_range<idx_iterator> indices() const {
2456 return make_range(idx_begin(), idx_end());
2457 }
2458
2459 Value *getAggregateOperand() {
2460 return getOperand(0);
2461 }
2462 const Value *getAggregateOperand() const {
2463 return getOperand(0);
2464 }
2465 static unsigned getAggregateOperandIndex() {
2466 return 0U; // get index for modifying correct operand
2467 }
2468
2469 ArrayRef<unsigned> getIndices() const {
2470 return Indices;
2471 }
2472
2473 unsigned getNumIndices() const {
2474 return (unsigned)Indices.size();
2475 }
2476
2477 bool hasIndices() const {
2478 return true;
2479 }
2480
2481 // Methods for support type inquiry through isa, cast, and dyn_cast:
2482 static bool classof(const Instruction *I) {
2483 return I->getOpcode() == Instruction::ExtractValue;
2484 }
2485 static bool classof(const Value *V) {
2486 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2487 }
2488};
2489
2490ExtractValueInst::ExtractValueInst(Value *Agg,
2491 ArrayRef<unsigned> Idxs,
2492 const Twine &NameStr,
2493 Instruction *InsertBefore)
2494 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2495 ExtractValue, Agg, InsertBefore) {
2496 init(Idxs, NameStr);
2497}
2498
2499ExtractValueInst::ExtractValueInst(Value *Agg,
2500 ArrayRef<unsigned> Idxs,
2501 const Twine &NameStr,
2502 BasicBlock *InsertAtEnd)
2503 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2504 ExtractValue, Agg, InsertAtEnd) {
2505 init(Idxs, NameStr);
2506}
2507
2508//===----------------------------------------------------------------------===//
2509// InsertValueInst Class
2510//===----------------------------------------------------------------------===//
2511
2512/// This instruction inserts a struct field of array element
2513/// value into an aggregate value.
2514///
2515class InsertValueInst : public Instruction {
2516 SmallVector<unsigned, 4> Indices;
2517
2518 InsertValueInst(const InsertValueInst &IVI);
2519
2520 /// Constructors - Create a insertvalue instruction with a base aggregate
2521 /// value, a value to insert, and a list of indices. The first ctor can
2522 /// optionally insert before an existing instruction, the second appends
2523 /// the new instruction to the specified BasicBlock.
2524 inline InsertValueInst(Value *Agg, Value *Val,
2525 ArrayRef<unsigned> Idxs,
2526 const Twine &NameStr,
2527 Instruction *InsertBefore);
2528 inline InsertValueInst(Value *Agg, Value *Val,
2529 ArrayRef<unsigned> Idxs,
2530 const Twine &NameStr, BasicBlock *InsertAtEnd);
2531
2532 /// Constructors - These two constructors are convenience methods because one
2533 /// and two index insertvalue instructions are so common.
2534 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2535 const Twine &NameStr = "",
2536 Instruction *InsertBefore = nullptr);
2537 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2538 BasicBlock *InsertAtEnd);
2539
2540 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2541 const Twine &NameStr);
2542
2543protected:
2544 // Note: Instruction needs to be a friend here to call cloneImpl.
2545 friend class Instruction;
2546
2547 InsertValueInst *cloneImpl() const;
2548
2549public:
2550 // allocate space for exactly two operands
2551 void *operator new(size_t S) { return User::operator new(S, 2); }
2552 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2553
2554 static InsertValueInst *Create(Value *Agg, Value *Val,
2555 ArrayRef<unsigned> Idxs,
2556 const Twine &NameStr = "",
2557 Instruction *InsertBefore = nullptr) {
2558 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2559 }
2560
2561 static InsertValueInst *Create(Value *Agg, Value *Val,
2562 ArrayRef<unsigned> Idxs,
2563 const Twine &NameStr,
2564 BasicBlock *InsertAtEnd) {
2565 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2566 }
2567
2568 /// Transparently provide more efficient getOperand methods.
2569 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2570
2571 using idx_iterator = const unsigned*;
2572
2573 inline idx_iterator idx_begin() const { return Indices.begin(); }
2574 inline idx_iterator idx_end() const { return Indices.end(); }
2575 inline iterator_range<idx_iterator> indices() const {
2576 return make_range(idx_begin(), idx_end());
2577 }
2578
2579 Value *getAggregateOperand() {
2580 return getOperand(0);
2581 }
2582 const Value *getAggregateOperand() const {
2583 return getOperand(0);
2584 }
2585 static unsigned getAggregateOperandIndex() {
2586 return 0U; // get index for modifying correct operand
2587 }
2588
2589 Value *getInsertedValueOperand() {
2590 return getOperand(1);
2591 }
2592 const Value *getInsertedValueOperand() const {
2593 return getOperand(1);
2594 }
2595 static unsigned getInsertedValueOperandIndex() {
2596 return 1U; // get index for modifying correct operand
2597 }
2598
2599 ArrayRef<unsigned> getIndices() const {
2600 return Indices;
2601 }
2602
2603 unsigned getNumIndices() const {
2604 return (unsigned)Indices.size();
2605 }
2606
2607 bool hasIndices() const {
2608 return true;
2609 }
2610
2611 // Methods for support type inquiry through isa, cast, and dyn_cast:
2612 static bool classof(const Instruction *I) {
2613 return I->getOpcode() == Instruction::InsertValue;
2614 }
2615 static bool classof(const Value *V) {
2616 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2617 }
2618};
2619
2620template <>
2621struct OperandTraits<InsertValueInst> :
2622 public FixedNumOperandTraits<InsertValueInst, 2> {
2623};
2624
2625InsertValueInst::InsertValueInst(Value *Agg,
2626 Value *Val,
2627 ArrayRef<unsigned> Idxs,
2628 const Twine &NameStr,
2629 Instruction *InsertBefore)
2630 : Instruction(Agg->getType(), InsertValue,
2631 OperandTraits<InsertValueInst>::op_begin(this),
2632 2, InsertBefore) {
2633 init(Agg, Val, Idxs, NameStr);
2634}
2635
2636InsertValueInst::InsertValueInst(Value *Agg,
2637 Value *Val,
2638 ArrayRef<unsigned> Idxs,
2639 const Twine &NameStr,
2640 BasicBlock *InsertAtEnd)
2641 : Instruction(Agg->getType(), InsertValue,
2642 OperandTraits<InsertValueInst>::op_begin(this),
2643 2, InsertAtEnd) {
2644 init(Agg, Val, Idxs, NameStr);
2645}
2646
2647DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return
OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst
::const_op_iterator InsertValueInst::op_begin() const { return
OperandTraits<InsertValueInst>::op_begin(const_cast<
InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst
::op_end() { return OperandTraits<InsertValueInst>::op_end
(this); } InsertValueInst::const_op_iterator InsertValueInst::
op_end() const { return OperandTraits<InsertValueInst>::
op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<InsertValueInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2647, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<InsertValueInst
>::op_begin(const_cast<InsertValueInst*>(this))[i_nocapture
].get()); } void InsertValueInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<InsertValueInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2647, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<InsertValueInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned InsertValueInst::getNumOperands
() const { return OperandTraits<InsertValueInst>::operands
(this); } template <int Idx_nocapture> Use &InsertValueInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &InsertValueInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2648
2649//===----------------------------------------------------------------------===//
2650// PHINode Class
2651//===----------------------------------------------------------------------===//
2652
2653// PHINode - The PHINode class is used to represent the magical mystical PHI
2654// node, that can not exist in nature, but can be synthesized in a computer
2655// scientist's overactive imagination.
2656//
2657class PHINode : public Instruction {
2658 /// The number of operands actually allocated. NumOperands is
2659 /// the number actually in use.
2660 unsigned ReservedSpace;
2661
2662 PHINode(const PHINode &PN);
2663
2664 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2665 const Twine &NameStr = "",
2666 Instruction *InsertBefore = nullptr)
2667 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2668 ReservedSpace(NumReservedValues) {
2669 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!"
) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\""
, "llvm/include/llvm/IR/Instructions.h", 2669, __extension__ __PRETTY_FUNCTION__
))
;
2670 setName(NameStr);
2671 allocHungoffUses(ReservedSpace);
2672 }
2673
2674 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2675 BasicBlock *InsertAtEnd)
2676 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2677 ReservedSpace(NumReservedValues) {
2678 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!"
) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\""
, "llvm/include/llvm/IR/Instructions.h", 2678, __extension__ __PRETTY_FUNCTION__
))
;
2679 setName(NameStr);
2680 allocHungoffUses(ReservedSpace);
2681 }
2682
2683protected:
2684 // Note: Instruction needs to be a friend here to call cloneImpl.
2685 friend class Instruction;
2686
2687 PHINode *cloneImpl() const;
2688
2689 // allocHungoffUses - this is more complicated than the generic
2690 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2691 // values and pointers to the incoming blocks, all in one allocation.
2692 void allocHungoffUses(unsigned N) {
2693 User::allocHungoffUses(N, /* IsPhi */ true);
2694 }
2695
2696public:
2697 /// Constructors - NumReservedValues is a hint for the number of incoming
2698 /// edges that this phi node will have (use 0 if you really have no idea).
2699 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2700 const Twine &NameStr = "",
2701 Instruction *InsertBefore = nullptr) {
2702 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2703 }
2704
2705 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2706 const Twine &NameStr, BasicBlock *InsertAtEnd) {
2707 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2708 }
2709
2710 /// Provide fast operand accessors
2711 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2712
2713 // Block iterator interface. This provides access to the list of incoming
2714 // basic blocks, which parallels the list of incoming values.
2715
2716 using block_iterator = BasicBlock **;
2717 using const_block_iterator = BasicBlock * const *;
2718
2719 block_iterator block_begin() {
2720 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
2721 }
2722
2723 const_block_iterator block_begin() const {
2724 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2725 }
2726
2727 block_iterator block_end() {
2728 return block_begin() + getNumOperands();
2729 }
2730
2731 const_block_iterator block_end() const {
2732 return block_begin() + getNumOperands();
2733 }
2734
2735 iterator_range<block_iterator> blocks() {
2736 return make_range(block_begin(), block_end());
2737 }
2738
2739 iterator_range<const_block_iterator> blocks() const {
2740 return make_range(block_begin(), block_end());
2741 }
2742
2743 op_range incoming_values() { return operands(); }
2744
2745 const_op_range incoming_values() const { return operands(); }
2746
2747 /// Return the number of incoming edges
2748 ///
2749 unsigned getNumIncomingValues() const { return getNumOperands(); }
2750
2751 /// Return incoming value number x
2752 ///
2753 Value *getIncomingValue(unsigned i) const {
2754 return getOperand(i);
2755 }
2756 void setIncomingValue(unsigned i, Value *V) {
2757 assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!"
) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\""
, "llvm/include/llvm/IR/Instructions.h", 2757, __extension__ __PRETTY_FUNCTION__
))
;
2758 assert(getType() == V->getType() &&(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "llvm/include/llvm/IR/Instructions.h", 2759, __extension__ __PRETTY_FUNCTION__
))
2759 "All operands to PHI node must be the same type as the PHI node!")(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "llvm/include/llvm/IR/Instructions.h", 2759, __extension__ __PRETTY_FUNCTION__
))
;
2760 setOperand(i, V);
2761 }
2762
2763 static unsigned getOperandNumForIncomingValue(unsigned i) {
2764 return i;
2765 }
2766
2767 static unsigned getIncomingValueNumForOperand(unsigned i) {
2768 return i;
2769 }
2770
2771 /// Return incoming basic block number @p i.
2772 ///
2773 BasicBlock *getIncomingBlock(unsigned i) const {
2774 return block_begin()[i];
2775 }
2776
2777 /// Return incoming basic block corresponding
2778 /// to an operand of the PHI.
2779 ///
2780 BasicBlock *getIncomingBlock(const Use &U) const {
2781 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?"
) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\""
, "llvm/include/llvm/IR/Instructions.h", 2781, __extension__ __PRETTY_FUNCTION__
))
;
2782 return getIncomingBlock(unsigned(&U - op_begin()));
2783 }
2784
2785 /// Return incoming basic block corresponding
2786 /// to value use iterator.
2787 ///
2788 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
2789 return getIncomingBlock(I.getUse());
2790 }
2791
2792 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2793 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "llvm/include/llvm/IR/Instructions.h", 2793, __extension__ __PRETTY_FUNCTION__
))
;
2794 block_begin()[i] = BB;
2795 }
2796
2797 /// Replace every incoming basic block \p Old to basic block \p New.
2798 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) {
2799 assert(New && Old && "PHI node got a null basic block!")(static_cast <bool> (New && Old && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("New && Old && \"PHI node got a null basic block!\""
, "llvm/include/llvm/IR/Instructions.h", 2799, __extension__ __PRETTY_FUNCTION__
))
;
2800 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2801 if (getIncomingBlock(Op) == Old)
2802 setIncomingBlock(Op, New);
2803 }
2804
2805 /// Add an incoming value to the end of the PHI list
2806 ///
2807 void addIncoming(Value *V, BasicBlock *BB) {
2808 if (getNumOperands() == ReservedSpace)
2809 growOperands(); // Get more space!
2810 // Initialize some new operands.
2811 setNumHungOffUseOperands(getNumOperands() + 1);
2812 setIncomingValue(getNumOperands() - 1, V);
2813 setIncomingBlock(getNumOperands() - 1, BB);
2814 }
2815
2816 /// Remove an incoming value. This is useful if a
2817 /// predecessor basic block is deleted. The value removed is returned.
2818 ///
2819 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2820 /// is true), the PHI node is destroyed and any uses of it are replaced with
2821 /// dummy values. The only time there should be zero incoming values to a PHI
2822 /// node is when the block is dead, so this strategy is sound.
2823 ///
2824 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2825
2826 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2827 int Idx = getBasicBlockIndex(BB);
2828 assert(Idx >= 0 && "Invalid basic block argument to remove!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument to remove!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\""
, "llvm/include/llvm/IR/Instructions.h", 2828, __extension__ __PRETTY_FUNCTION__
))
;
2829 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2830 }
2831
2832 /// Return the first index of the specified basic
2833 /// block in the value list for this PHI. Returns -1 if no instance.
2834 ///
2835 int getBasicBlockIndex(const BasicBlock *BB) const {
2836 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2837 if (block_begin()[i] == BB)
2838 return i;
2839 return -1;
2840 }
2841
2842 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
2843 int Idx = getBasicBlockIndex(BB);
2844 assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\""
, "llvm/include/llvm/IR/Instructions.h", 2844, __extension__ __PRETTY_FUNCTION__
))
;
2845 return getIncomingValue(Idx);
2846 }
2847
2848 /// Set every incoming value(s) for block \p BB to \p V.
2849 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) {
2850 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "llvm/include/llvm/IR/Instructions.h", 2850, __extension__ __PRETTY_FUNCTION__
))
;
2851 bool Found = false;
2852 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2853 if (getIncomingBlock(Op) == BB) {
2854 Found = true;
2855 setIncomingValue(Op, V);
2856 }
2857 (void)Found;
2858 assert(Found && "Invalid basic block argument to set!")(static_cast <bool> (Found && "Invalid basic block argument to set!"
) ? void (0) : __assert_fail ("Found && \"Invalid basic block argument to set!\""
, "llvm/include/llvm/IR/Instructions.h", 2858, __extension__ __PRETTY_FUNCTION__
))
;
2859 }
2860
2861 /// If the specified PHI node always merges together the
2862 /// same value, return the value, otherwise return null.
2863 Value *hasConstantValue() const;
2864
2865 /// Whether the specified PHI node always merges
2866 /// together the same value, assuming undefs are equal to a unique
2867 /// non-undef value.
2868 bool hasConstantOrUndefValue() const;
2869
2870 /// If the PHI node is complete which means all of its parent's predecessors
2871 /// have incoming value in this PHI, return true, otherwise return false.
2872 bool isComplete() const {
2873 return llvm::all_of(predecessors(getParent()),
2874 [this](const BasicBlock *Pred) {
2875 return getBasicBlockIndex(Pred) >= 0;
2876 });
2877 }
2878
2879 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2880 static bool classof(const Instruction *I) {
2881 return I->getOpcode() == Instruction::PHI;
2882 }
2883 static bool classof(const Value *V) {
2884 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2885 }
2886
2887private:
2888 void growOperands();
2889};
2890
2891template <>
2892struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
2893};
2894
2895DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits
<PHINode>::op_begin(this); } PHINode::const_op_iterator
PHINode::op_begin() const { return OperandTraits<PHINode>
::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator
PHINode::op_end() { return OperandTraits<PHINode>::op_end
(this); } PHINode::const_op_iterator PHINode::op_end() const {
return OperandTraits<PHINode>::op_end(const_cast<PHINode
*>(this)); } Value *PHINode::getOperand(unsigned i_nocapture
) const { (static_cast <bool> (i_nocapture < OperandTraits
<PHINode>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2895, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<PHINode
>::op_begin(const_cast<PHINode*>(this))[i_nocapture]
.get()); } void PHINode::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<PHINode>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2895, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<PHINode>::op_begin(this)[i_nocapture]
= Val_nocapture; } unsigned PHINode::getNumOperands() const {
return OperandTraits<PHINode>::operands(this); } template
<int Idx_nocapture> Use &PHINode::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &PHINode::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
2896
2897//===----------------------------------------------------------------------===//
2898// LandingPadInst Class
2899//===----------------------------------------------------------------------===//
2900
2901//===---------------------------------------------------------------------------
2902/// The landingpad instruction holds all of the information
2903/// necessary to generate correct exception handling. The landingpad instruction
2904/// cannot be moved from the top of a landing pad block, which itself is
2905/// accessible only from the 'unwind' edge of an invoke. This uses the
2906/// SubclassData field in Value to store whether or not the landingpad is a
2907/// cleanup.
2908///
2909class LandingPadInst : public Instruction {
2910 using CleanupField = BoolBitfieldElementT<0>;
2911
2912 /// The number of operands actually allocated. NumOperands is
2913 /// the number actually in use.
2914 unsigned ReservedSpace;
2915
2916 LandingPadInst(const LandingPadInst &LP);
2917
2918public:
2919 enum ClauseType { Catch, Filter };
2920
2921private:
2922 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2923 const Twine &NameStr, Instruction *InsertBefore);
2924 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2925 const Twine &NameStr, BasicBlock *InsertAtEnd);
2926
2927 // Allocate space for exactly zero operands.
2928 void *operator new(size_t S) { return User::operator new(S); }
2929
2930 void growOperands(unsigned Size);
2931 void init(unsigned NumReservedValues, const Twine &NameStr);
2932
2933protected:
2934 // Note: Instruction needs to be a friend here to call cloneImpl.
2935 friend class Instruction;
2936
2937 LandingPadInst *cloneImpl() const;
2938
2939public:
2940 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2941
2942 /// Constructors - NumReservedClauses is a hint for the number of incoming
2943 /// clauses that this landingpad will have (use 0 if you really have no idea).
2944 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2945 const Twine &NameStr = "",
2946 Instruction *InsertBefore = nullptr);
2947 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2948 const Twine &NameStr, BasicBlock *InsertAtEnd);
2949
2950 /// Provide fast operand accessors
2951 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2952
2953 /// Return 'true' if this landingpad instruction is a
2954 /// cleanup. I.e., it should be run when unwinding even if its landing pad
2955 /// doesn't catch the exception.
2956 bool isCleanup() const { return getSubclassData<CleanupField>(); }
2957
2958 /// Indicate that this landingpad instruction is a cleanup.
2959 void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
2960
2961 /// Add a catch or filter clause to the landing pad.
2962 void addClause(Constant *ClauseVal);
2963
2964 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
2965 /// determine what type of clause this is.
2966 Constant *getClause(unsigned Idx) const {
2967 return cast<Constant>(getOperandList()[Idx]);
2968 }
2969
2970 /// Return 'true' if the clause and index Idx is a catch clause.
2971 bool isCatch(unsigned Idx) const {
2972 return !isa<ArrayType>(getOperandList()[Idx]->getType());
2973 }
2974
2975 /// Return 'true' if the clause and index Idx is a filter clause.
2976 bool isFilter(unsigned Idx) const {
2977 return isa<ArrayType>(getOperandList()[Idx]->getType());
2978 }
2979
2980 /// Get the number of clauses for this landing pad.
2981 unsigned getNumClauses() const { return getNumOperands(); }
2982
2983 /// Grow the size of the operand list to accommodate the new
2984 /// number of clauses.
2985 void reserveClauses(unsigned Size) { growOperands(Size); }
2986
2987 // Methods for support type inquiry through isa, cast, and dyn_cast:
2988 static bool classof(const Instruction *I) {
2989 return I->getOpcode() == Instruction::LandingPad;
2990 }
2991 static bool classof(const Value *V) {
2992 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2993 }
2994};
2995
2996template <>
2997struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
2998};
2999
3000DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return
OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst
::const_op_iterator LandingPadInst::op_begin() const { return
OperandTraits<LandingPadInst>::op_begin(const_cast<
LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst
::op_end() { return OperandTraits<LandingPadInst>::op_end
(this); } LandingPadInst::const_op_iterator LandingPadInst::op_end
() const { return OperandTraits<LandingPadInst>::op_end
(const_cast<LandingPadInst*>(this)); } Value *LandingPadInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<LandingPadInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3000, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<LandingPadInst
>::op_begin(const_cast<LandingPadInst*>(this))[i_nocapture
].get()); } void LandingPadInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<LandingPadInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3000, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<LandingPadInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned LandingPadInst::getNumOperands(
) const { return OperandTraits<LandingPadInst>::operands
(this); } template <int Idx_nocapture> Use &LandingPadInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &LandingPadInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
3001
3002//===----------------------------------------------------------------------===//
3003// ReturnInst Class
3004//===----------------------------------------------------------------------===//
3005
3006//===---------------------------------------------------------------------------
3007/// Return a value (possibly void), from a function. Execution
3008/// does not continue in this function any longer.
3009///
3010class ReturnInst : public Instruction {
3011 ReturnInst(const ReturnInst &RI);
3012
3013private:
3014 // ReturnInst constructors:
3015 // ReturnInst() - 'ret void' instruction
3016 // ReturnInst( null) - 'ret void' instruction
3017 // ReturnInst(Value* X) - 'ret X' instruction
3018 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
3019 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
3020 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
3021 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
3022 //
3023 // NOTE: If the Value* passed is of type void then the constructor behaves as
3024 // if it was passed NULL.
3025 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
3026 Instruction *InsertBefore = nullptr);
3027 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
3028 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
3029
3030protected:
3031 // Note: Instruction needs to be a friend here to call cloneImpl.
3032 friend class Instruction;
3033
3034 ReturnInst *cloneImpl() const;
3035
3036public:
3037 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
3038 Instruction *InsertBefore = nullptr) {
3039 return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
3040 }
3041
3042 static ReturnInst* Create(LLVMContext &C, Value *retVal,
3043 BasicBlock *InsertAtEnd) {
3044 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
3045 }
3046
3047 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
3048 return new(0) ReturnInst(C, InsertAtEnd);
3049 }
3050
3051 /// Provide fast operand accessors
3052 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3053
3054 /// Convenience accessor. Returns null if there is no return value.
3055 Value *getReturnValue() const {
3056 return getNumOperands() != 0 ? getOperand(0) : nullptr;
3057 }
3058
3059 unsigned getNumSuccessors() const { return 0; }
3060
3061 // Methods for support type inquiry through isa, cast, and dyn_cast:
3062 static bool classof(const Instruction *I) {
3063 return (I->getOpcode() == Instruction::Ret);
3064 }
3065 static bool classof(const Value *V) {
3066 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3067 }
3068
3069private:
3070 BasicBlock *getSuccessor(unsigned idx) const {
3071 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 3071)
;
3072 }
3073
3074 void setSuccessor(unsigned idx, BasicBlock *B) {
3075 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 3075)
;
3076 }
3077};
3078
3079template <>
3080struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3081};
3082
3083DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits
<ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator
ReturnInst::op_begin() const { return OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst
::op_iterator ReturnInst::op_end() { return OperandTraits<
ReturnInst>::op_end(this); } ReturnInst::const_op_iterator
ReturnInst::op_end() const { return OperandTraits<ReturnInst
>::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<ReturnInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3083, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this))[i_nocapture
].get()); } void ReturnInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ReturnInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3083, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ReturnInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ReturnInst::getNumOperands() const
{ return OperandTraits<ReturnInst>::operands(this); } template
<int Idx_nocapture> Use &ReturnInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &ReturnInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
3084
3085//===----------------------------------------------------------------------===//
3086// BranchInst Class
3087//===----------------------------------------------------------------------===//
3088
3089//===---------------------------------------------------------------------------
3090/// Conditional or Unconditional Branch instruction.
3091///
3092class BranchInst : public Instruction {
3093 /// Ops list - Branches are strange. The operands are ordered:
3094 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3095 /// they don't have to check for cond/uncond branchness. These are mostly
3096 /// accessed relative from op_end().
3097 BranchInst(const BranchInst &BI);
3098 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3099 // BranchInst(BB *B) - 'br B'
3100 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3101 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3102 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3103 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3104 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3105 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3106 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3107 Instruction *InsertBefore = nullptr);
3108 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3109 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3110 BasicBlock *InsertAtEnd);
3111
3112 void AssertOK();
3113
3114protected:
3115 // Note: Instruction needs to be a friend here to call cloneImpl.
3116 friend class Instruction;
3117
3118 BranchInst *cloneImpl() const;
3119
3120public:
3121 /// Iterator type that casts an operand to a basic block.
3122 ///
3123 /// This only makes sense because the successors are stored as adjacent
3124 /// operands for branch instructions.
3125 struct succ_op_iterator
3126 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3127 std::random_access_iterator_tag, BasicBlock *,
3128 ptrdiff_t, BasicBlock *, BasicBlock *> {
3129 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3130
3131 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3132 BasicBlock *operator->() const { return operator*(); }
3133 };
3134
3135 /// The const version of `succ_op_iterator`.
3136 struct const_succ_op_iterator
3137 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3138 std::random_access_iterator_tag,
3139 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3140 const BasicBlock *> {
3141 explicit const_succ_op_iterator(const_value_op_iterator I)
3142 : iterator_adaptor_base(I) {}
3143
3144 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3145 const BasicBlock *operator->() const { return operator*(); }
3146 };
3147
3148 static BranchInst *Create(BasicBlock *IfTrue,
3149 Instruction *InsertBefore = nullptr) {
3150 return new(1) BranchInst(IfTrue, InsertBefore);
3151 }
3152
3153 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3154 Value *Cond, Instruction *InsertBefore = nullptr) {
3155 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3156 }
3157
3158 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3159 return new(1) BranchInst(IfTrue, InsertAtEnd);
3160 }
3161
3162 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3163 Value *Cond, BasicBlock *InsertAtEnd) {
3164 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3165 }
3166
3167 /// Transparently provide more efficient getOperand methods.
3168 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3169
3170 bool isUnconditional() const { return getNumOperands() == 1; }
3171 bool isConditional() const { return getNumOperands() == 3; }
3172
3173 Value *getCondition() const {
3174 assert(isConditional() && "Cannot get condition of an uncond branch!")(static_cast <bool> (isConditional() && "Cannot get condition of an uncond branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot get condition of an uncond branch!\""
, "llvm/include/llvm/IR/Instructions.h", 3174, __extension__ __PRETTY_FUNCTION__
))
;
3175 return Op<-3>();
3176 }
3177
3178 void setCondition(Value *V) {
3179 assert(isConditional() && "Cannot set condition of unconditional branch!")(static_cast <bool> (isConditional() && "Cannot set condition of unconditional branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot set condition of unconditional branch!\""
, "llvm/include/llvm/IR/Instructions.h", 3179, __extension__ __PRETTY_FUNCTION__
))
;
3180 Op<-3>() = V;
3181 }
3182
3183 unsigned getNumSuccessors() const { return 1+isConditional(); }
3184
3185 BasicBlock *getSuccessor(unsigned i) const {
3186 assert(i < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (i < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("i < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "llvm/include/llvm/IR/Instructions.h", 3186, __extension__ __PRETTY_FUNCTION__
))
;
3187 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3188 }
3189
3190 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3191 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "llvm/include/llvm/IR/Instructions.h", 3191, __extension__ __PRETTY_FUNCTION__
))
;
3192 *(&Op<-1>() - idx) = NewSucc;
3193 }
3194
3195 /// Swap the successors of this branch instruction.
3196 ///
3197 /// Swaps the successors of the branch instruction. This also swaps any
3198 /// branch weight metadata associated with the instruction so that it
3199 /// continues to map correctly to each operand.
3200 void swapSuccessors();
3201
3202 iterator_range<succ_op_iterator> successors() {
3203 return make_range(
3204 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3205 succ_op_iterator(value_op_end()));
3206 }
3207
3208 iterator_range<const_succ_op_iterator> successors() const {
3209 return make_range(const_succ_op_iterator(
3210 std::next(value_op_begin(), isConditional() ? 1 : 0)),
3211 const_succ_op_iterator(value_op_end()));
3212 }
3213
3214 // Methods for support type inquiry through isa, cast, and dyn_cast:
3215 static bool classof(const Instruction *I) {
3216 return (I->getOpcode() == Instruction::Br);
3217 }
3218 static bool classof(const Value *V) {
3219 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3220 }
3221};
3222
3223template <>
3224struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3225};
3226
3227DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits
<BranchInst>::op_begin(this); } BranchInst::const_op_iterator
BranchInst::op_begin() const { return OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this)); } BranchInst
::op_iterator BranchInst::op_end() { return OperandTraits<
BranchInst>::op_end(this); } BranchInst::const_op_iterator
BranchInst::op_end() const { return OperandTraits<BranchInst
>::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<BranchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3227, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this))[i_nocapture
].get()); } void BranchInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<BranchInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3227, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<BranchInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned BranchInst::getNumOperands() const
{ return OperandTraits<BranchInst>::operands(this); } template
<int Idx_nocapture> Use &BranchInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &BranchInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
3228
3229//===----------------------------------------------------------------------===//
3230// SwitchInst Class
3231//===----------------------------------------------------------------------===//
3232
3233//===---------------------------------------------------------------------------
3234/// Multiway switch
3235///
3236class SwitchInst : public Instruction {
3237 unsigned ReservedSpace;
3238
3239 // Operand[0] = Value to switch on
3240 // Operand[1] = Default basic block destination
3241 // Operand[2n ] = Value to match
3242 // Operand[2n+1] = BasicBlock to go to on match
3243 SwitchInst(const SwitchInst &SI);
3244
3245 /// Create a new switch instruction, specifying a value to switch on and a
3246 /// default destination. The number of additional cases can be specified here
3247 /// to make memory allocation more efficient. This constructor can also
3248 /// auto-insert before another instruction.
3249 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3250 Instruction *InsertBefore);
3251
3252 /// Create a new switch instruction, specifying a value to switch on and a
3253 /// default destination. The number of additional cases can be specified here
3254 /// to make memory allocation more efficient. This constructor also
3255 /// auto-inserts at the end of the specified BasicBlock.
3256 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3257 BasicBlock *InsertAtEnd);
3258
3259 // allocate space for exactly zero operands
3260 void *operator new(size_t S) { return User::operator new(S); }
3261
3262 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3263 void growOperands();
3264
3265protected:
3266 // Note: Instruction needs to be a friend here to call cloneImpl.
3267 friend class Instruction;
3268
3269 SwitchInst *cloneImpl() const;
3270
3271public:
3272 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3273
3274 // -2
3275 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3276
3277 template <typename CaseHandleT> class CaseIteratorImpl;
3278
3279 /// A handle to a particular switch case. It exposes a convenient interface
3280 /// to both the case value and the successor block.
3281 ///
3282 /// We define this as a template and instantiate it to form both a const and
3283 /// non-const handle.
3284 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3285 class CaseHandleImpl {
3286 // Directly befriend both const and non-const iterators.
3287 friend class SwitchInst::CaseIteratorImpl<
3288 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3289
3290 protected:
3291 // Expose the switch type we're parameterized with to the iterator.
3292 using SwitchInstType = SwitchInstT;
3293
3294 SwitchInstT *SI;
3295 ptrdiff_t Index;
3296
3297 CaseHandleImpl() = default;
3298 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3299
3300 public:
3301 /// Resolves case value for current case.
3302 ConstantIntT *getCaseValue() const {
3303 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3304, __extension__ __PRETTY_FUNCTION__
))
3304 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3304, __extension__ __PRETTY_FUNCTION__
))
;
3305 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3306 }
3307
3308 /// Resolves successor for current case.
3309 BasicBlockT *getCaseSuccessor() const {
3310 assert(((unsigned)Index < SI->getNumCases() ||(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3312, __extension__ __PRETTY_FUNCTION__
))
3311 (unsigned)Index == DefaultPseudoIndex) &&(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3312, __extension__ __PRETTY_FUNCTION__
))
3312 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3312, __extension__ __PRETTY_FUNCTION__
))
;
3313 return SI->getSuccessor(getSuccessorIndex());
3314 }
3315
3316 /// Returns number of current case.
3317 unsigned getCaseIndex() const { return Index; }
3318
3319 /// Returns successor index for current case successor.
3320 unsigned getSuccessorIndex() const {
3321 assert(((unsigned)Index == DefaultPseudoIndex ||(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3323, __extension__ __PRETTY_FUNCTION__
))
3322 (unsigned)Index < SI->getNumCases()) &&(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3323, __extension__ __PRETTY_FUNCTION__
))
3323 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3323, __extension__ __PRETTY_FUNCTION__
))
;
3324 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3325 }
3326
3327 bool operator==(const CaseHandleImpl &RHS) const {
3328 assert(SI == RHS.SI && "Incompatible operators.")(static_cast <bool> (SI == RHS.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("SI == RHS.SI && \"Incompatible operators.\""
, "llvm/include/llvm/IR/Instructions.h", 3328, __extension__ __PRETTY_FUNCTION__
))
;
3329 return Index == RHS.Index;
3330 }
3331 };
3332
3333 using ConstCaseHandle =
3334 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>;
3335
3336 class CaseHandle
3337 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3338 friend class SwitchInst::CaseIteratorImpl<CaseHandle>;
3339
3340 public:
3341 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {}
3342
3343 /// Sets the new value for current case.
3344 void setValue(ConstantInt *V) const {
3345 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3346, __extension__ __PRETTY_FUNCTION__
))
3346 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3346, __extension__ __PRETTY_FUNCTION__
))
;
3347 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3348 }
3349
3350 /// Sets the new successor for current case.
3351 void setSuccessor(BasicBlock *S) const {
3352 SI->setSuccessor(getSuccessorIndex(), S);
3353 }
3354 };
3355
3356 template <typename CaseHandleT>
3357 class CaseIteratorImpl
3358 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3359 std::random_access_iterator_tag,
3360 const CaseHandleT> {
3361 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3362
3363 CaseHandleT Case;
3364
3365 public:
3366 /// Default constructed iterator is in an invalid state until assigned to
3367 /// a case for a particular switch.
3368 CaseIteratorImpl() = default;
3369
3370 /// Initializes case iterator for given SwitchInst and for given
3371 /// case number.
3372 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3373
3374 /// Initializes case iterator for given SwitchInst and for given
3375 /// successor index.
3376 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
3377 unsigned SuccessorIndex) {
3378 assert(SuccessorIndex < SI->getNumSuccessors() &&(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3379, __extension__ __PRETTY_FUNCTION__
))
3379 "Successor index # out of range!")(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3379, __extension__ __PRETTY_FUNCTION__
))
;
3380 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3381 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3382 }
3383
3384 /// Support converting to the const variant. This will be a no-op for const
3385 /// variant.
3386 operator CaseIteratorImpl<ConstCaseHandle>() const {
3387 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3388 }
3389
3390 CaseIteratorImpl &operator+=(ptrdiff_t N) {
3391 // Check index correctness after addition.
3392 // Note: Index == getNumCases() means end().
3393 assert(Case.Index + N >= 0 &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3395, __extension__ __PRETTY_FUNCTION__
))
3394 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3395, __extension__ __PRETTY_FUNCTION__
))
3395 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3395, __extension__ __PRETTY_FUNCTION__
))
;
3396 Case.Index += N;
3397 return *this;
3398 }
3399 CaseIteratorImpl &operator-=(ptrdiff_t N) {
3400 // Check index correctness after subtraction.
3401 // Note: Case.Index == getNumCases() means end().
3402 assert(Case.Index - N >= 0 &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3404, __extension__ __PRETTY_FUNCTION__
))
3403 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3404, __extension__ __PRETTY_FUNCTION__
))
3404 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3404, __extension__ __PRETTY_FUNCTION__
))
;
3405 Case.Index -= N;
3406 return *this;
3407 }
3408 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const {
3409 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "llvm/include/llvm/IR/Instructions.h", 3409, __extension__ __PRETTY_FUNCTION__
))
;
3410 return Case.Index - RHS.Case.Index;
3411 }
3412 bool operator==(const CaseIteratorImpl &RHS) const {
3413 return Case == RHS.Case;
3414 }
3415 bool operator<(const CaseIteratorImpl &RHS) const {
3416 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "llvm/include/llvm/IR/Instructions.h", 3416, __extension__ __PRETTY_FUNCTION__
))
;
3417 return Case.Index < RHS.Case.Index;
3418 }
3419 const CaseHandleT &operator*() const { return Case; }
3420 };
3421
3422 using CaseIt = CaseIteratorImpl<CaseHandle>;
3423 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>;
3424
3425 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3426 unsigned NumCases,
3427 Instruction *InsertBefore = nullptr) {
3428 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3429 }
3430
3431 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3432 unsigned NumCases, BasicBlock *InsertAtEnd) {
3433 return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3434 }
3435
3436 /// Provide fast operand accessors
3437 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3438
3439 // Accessor Methods for Switch stmt
3440 Value *getCondition() const { return getOperand(0); }
3441 void setCondition(Value *V) { setOperand(0, V); }
3442
3443 BasicBlock *getDefaultDest() const {
3444 return cast<BasicBlock>(getOperand(1));
3445 }
3446
3447 void setDefaultDest(BasicBlock *DefaultCase) {
3448 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3449 }
3450
3451 /// Return the number of 'cases' in this switch instruction, excluding the
3452 /// default case.
3453 unsigned getNumCases() const {
3454 return getNumOperands()/2 - 1;
3455 }
3456
3457 /// Returns a read/write iterator that points to the first case in the
3458 /// SwitchInst.
3459 CaseIt case_begin() {
3460 return CaseIt(this, 0);
3461 }
3462
3463 /// Returns a read-only iterator that points to the first case in the
3464 /// SwitchInst.
3465 ConstCaseIt case_begin() const {
3466 return ConstCaseIt(this, 0);
3467 }
3468
3469 /// Returns a read/write iterator that points one past the last in the
3470 /// SwitchInst.
3471 CaseIt case_end() {
3472 return CaseIt(this, getNumCases());
3473 }
3474
3475 /// Returns a read-only iterator that points one past the last in the
3476 /// SwitchInst.
3477 ConstCaseIt case_end() const {
3478 return ConstCaseIt(this, getNumCases());
3479 }
3480
3481 /// Iteration adapter for range-for loops.
3482 iterator_range<CaseIt> cases() {
3483 return make_range(case_begin(), case_end());
3484 }
3485
3486 /// Constant iteration adapter for range-for loops.
3487 iterator_range<ConstCaseIt> cases() const {
3488 return make_range(case_begin(), case_end());
3489 }
3490
3491 /// Returns an iterator that points to the default case.
3492 /// Note: this iterator allows to resolve successor only. Attempt
3493 /// to resolve case value causes an assertion.
3494 /// Also note, that increment and decrement also causes an assertion and
3495 /// makes iterator invalid.
3496 CaseIt case_default() {
3497 return CaseIt(this, DefaultPseudoIndex);
3498 }
3499 ConstCaseIt case_default() const {
3500 return ConstCaseIt(this, DefaultPseudoIndex);
3501 }
3502
3503 /// Search all of the case values for the specified constant. If it is
3504 /// explicitly handled, return the case iterator of it, otherwise return
3505 /// default case iterator to indicate that it is handled by the default
3506 /// handler.
3507 CaseIt findCaseValue(const ConstantInt *C) {
3508 return CaseIt(
3509 this,
3510 const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex());
3511 }
3512 ConstCaseIt findCaseValue(const ConstantInt *C) const {
3513 ConstCaseIt I = llvm::find_if(cases(), [C](const ConstCaseHandle &Case) {
3514 return Case.getCaseValue() == C;
3515 });
3516 if (I != case_end())
3517 return I;
3518
3519 return case_default();
3520 }
3521
3522 /// Finds the unique case value for a given successor. Returns null if the
3523 /// successor is not found, not unique, or is the default case.
3524 ConstantInt *findCaseDest(BasicBlock *BB) {
3525 if (BB == getDefaultDest())
3526 return nullptr;
3527
3528 ConstantInt *CI = nullptr;
3529 for (auto Case : cases()) {
3530 if (Case.getCaseSuccessor() != BB)
3531 continue;
3532
3533 if (CI)
3534 return nullptr; // Multiple cases lead to BB.
3535
3536 CI = Case.getCaseValue();
3537 }
3538
3539 return CI;
3540 }
3541
3542 /// Add an entry to the switch instruction.
3543 /// Note:
3544 /// This action invalidates case_end(). Old case_end() iterator will
3545 /// point to the added case.
3546 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3547
3548 /// This method removes the specified case and its successor from the switch
3549 /// instruction. Note that this operation may reorder the remaining cases at
3550 /// index idx and above.
3551 /// Note:
3552 /// This action invalidates iterators for all cases following the one removed,
3553 /// including the case_end() iterator. It returns an iterator for the next
3554 /// case.
3555 CaseIt removeCase(CaseIt I);
3556
3557 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3558 BasicBlock *getSuccessor(unsigned idx) const {
3559 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor idx out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() &&\"Successor idx out of range for switch!\""
, "llvm/include/llvm/IR/Instructions.h", 3559, __extension__ __PRETTY_FUNCTION__
))
;
3560 return cast<BasicBlock>(getOperand(idx*2+1));
3561 }
3562 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3563 assert(idx < getNumSuccessors() && "Successor # out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for switch!\""
, "llvm/include/llvm/IR/Instructions.h", 3563, __extension__ __PRETTY_FUNCTION__
))
;
3564 setOperand(idx * 2 + 1, NewSucc);
3565 }
3566
3567 // Methods for support type inquiry through isa, cast, and dyn_cast:
3568 static bool classof(const Instruction *I) {
3569 return I->getOpcode() == Instruction::Switch;
3570 }
3571 static bool classof(const Value *V) {
3572 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3573 }
3574};
3575
3576/// A wrapper class to simplify modification of SwitchInst cases along with
3577/// their prof branch_weights metadata.
3578class SwitchInstProfUpdateWrapper {
3579 SwitchInst &SI;
3580 Optional<SmallVector<uint32_t, 8> > Weights = None;
3581 bool Changed = false;
3582
3583protected:
3584 static MDNode *getProfBranchWeightsMD(const SwitchInst &SI);
3585
3586 MDNode *buildProfBranchWeightsMD();
3587
3588 void init();
3589
3590public:
3591 using CaseWeightOpt = Optional<uint32_t>;
3592 SwitchInst *operator->() { return &SI; }
3593 SwitchInst &operator*() { return SI; }
3594 operator SwitchInst *() { return &SI; }
3595
3596 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); }
3597
3598 ~SwitchInstProfUpdateWrapper() {
3599 if (Changed)
3600 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3601 }
3602
3603 /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3604 /// correspondent branch weight.
3605 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I);
3606
3607 /// Delegate the call to the underlying SwitchInst::addCase() and set the
3608 /// specified branch weight for the added case.
3609 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3610
3611 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3612 /// this object to not touch the underlying SwitchInst in destructor.
3613 SymbolTableList<Instruction>::iterator eraseFromParent();
3614
3615 void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3616 CaseWeightOpt getSuccessorWeight(unsigned idx);
3617
3618 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3619};
3620
3621template <>
3622struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> {
3623};
3624
3625DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)SwitchInst::op_iterator SwitchInst::op_begin() { return OperandTraits
<SwitchInst>::op_begin(this); } SwitchInst::const_op_iterator
SwitchInst::op_begin() const { return OperandTraits<SwitchInst
>::op_begin(const_cast<SwitchInst*>(this)); } SwitchInst
::op_iterator SwitchInst::op_end() { return OperandTraits<
SwitchInst>::op_end(this); } SwitchInst::const_op_iterator
SwitchInst::op_end() const { return OperandTraits<SwitchInst
>::op_end(const_cast<SwitchInst*>(this)); } Value *SwitchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SwitchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3625, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<SwitchInst
>::op_begin(const_cast<SwitchInst*>(this))[i_nocapture
].get()); } void SwitchInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<SwitchInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3625, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<SwitchInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned SwitchInst::getNumOperands() const
{ return OperandTraits<SwitchInst>::operands(this); } template
<int Idx_nocapture> Use &SwitchInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &SwitchInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
3626
3627//===----------------------------------------------------------------------===//
3628// IndirectBrInst Class
3629//===----------------------------------------------------------------------===//
3630
3631//===---------------------------------------------------------------------------
3632/// Indirect Branch Instruction.
3633///
3634class IndirectBrInst : public Instruction {
3635 unsigned ReservedSpace;
3636
3637 // Operand[0] = Address to jump to
3638 // Operand[n+1] = n-th destination
3639 IndirectBrInst(const IndirectBrInst &IBI);
3640
3641 /// Create a new indirectbr instruction, specifying an
3642 /// Address to jump to. The number of expected destinations can be specified
3643 /// here to make memory allocation more efficient. This constructor can also
3644 /// autoinsert before another instruction.
3645 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3646
3647 /// Create a new indirectbr instruction, specifying an
3648 /// Address to jump to. The number of expected destinations can be specified
3649 /// here to make memory allocation more efficient. This constructor also
3650 /// autoinserts at the end of the specified BasicBlock.
3651 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3652
3653 // allocate space for exactly zero operands
3654 void *operator new(size_t S) { return User::operator new(S); }
3655
3656 void init(Value *Address, unsigned NumDests);
3657 void growOperands();
3658
3659protected:
3660 // Note: Instruction needs to be a friend here to call cloneImpl.
3661 friend class Instruction;
3662
3663 IndirectBrInst *cloneImpl() const;
3664
3665public:
3666 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3667
3668 /// Iterator type that casts an operand to a basic block.
3669 ///
3670 /// This only makes sense because the successors are stored as adjacent
3671 /// operands for indirectbr instructions.
3672 struct succ_op_iterator
3673 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3674 std::random_access_iterator_tag, BasicBlock *,
3675 ptrdiff_t, BasicBlock *, BasicBlock *> {
3676 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3677
3678 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3679 BasicBlock *operator->() const { return operator*(); }
3680 };
3681
3682 /// The const version of `succ_op_iterator`.
3683 struct const_succ_op_iterator
3684 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3685 std::random_access_iterator_tag,
3686 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3687 const BasicBlock *> {
3688 explicit const_succ_op_iterator(const_value_op_iterator I)
3689 : iterator_adaptor_base(I) {}
3690
3691 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3692 const BasicBlock *operator->() const { return operator*(); }
3693 };
3694
3695 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3696 Instruction *InsertBefore = nullptr) {
3697 return new IndirectBrInst(Address, NumDests, InsertBefore);
3698 }
3699
3700 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3701 BasicBlock *InsertAtEnd) {
3702 return new IndirectBrInst(Address, NumDests, InsertAtEnd);
3703 }
3704
3705 /// Provide fast operand accessors.
3706 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3707
3708 // Accessor Methods for IndirectBrInst instruction.
3709 Value *getAddress() { return getOperand(0); }
3710 const Value *getAddress() const { return getOperand(0); }
3711 void setAddress(Value *V) { setOperand(0, V); }
3712
3713 /// return the number of possible destinations in this
3714 /// indirectbr instruction.
3715 unsigned getNumDestinations() const { return getNumOperands()-1; }
3716
3717 /// Return the specified destination.
3718 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3719 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3720
3721 /// Add a destination.
3722 ///
3723 void addDestination(BasicBlock *Dest);
3724
3725 /// This method removes the specified successor from the
3726 /// indirectbr instruction.
3727 void removeDestination(unsigned i);
3728
3729 unsigned getNumSuccessors() const { return getNumOperands()-1; }
3730 BasicBlock *getSuccessor(unsigned i) const {
3731 return cast<BasicBlock>(getOperand(i+1));
3732 }
3733 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3734 setOperand(i + 1, NewSucc);
3735 }
3736
3737 iterator_range<succ_op_iterator> successors() {
3738 return make_range(succ_op_iterator(std::next(value_op_begin())),
3739 succ_op_iterator(value_op_end()));
3740 }
3741
3742 iterator_range<const_succ_op_iterator> successors() const {
3743 return make_range(const_succ_op_iterator(std::next(value_op_begin())),
3744 const_succ_op_iterator(value_op_end()));
3745 }
3746
3747 // Methods for support type inquiry through isa, cast, and dyn_cast:
3748 static bool classof(const Instruction *I) {
3749 return I->getOpcode() == Instruction::IndirectBr;
3750 }
3751 static bool classof(const Value *V) {
3752 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3753 }
3754};
3755
3756template <>
3757struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> {
3758};
3759
3760DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)IndirectBrInst::op_iterator IndirectBrInst::op_begin() { return
OperandTraits<IndirectBrInst>::op_begin(this); } IndirectBrInst
::const_op_iterator IndirectBrInst::op_begin() const { return
OperandTraits<IndirectBrInst>::op_begin(const_cast<
IndirectBrInst*>(this)); } IndirectBrInst::op_iterator IndirectBrInst
::op_end() { return OperandTraits<IndirectBrInst>::op_end
(this); } IndirectBrInst::const_op_iterator IndirectBrInst::op_end
() const { return OperandTraits<IndirectBrInst>::op_end
(const_cast<IndirectBrInst*>(this)); } Value *IndirectBrInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<IndirectBrInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3760, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<IndirectBrInst
>::op_begin(const_cast<IndirectBrInst*>(this))[i_nocapture
].get()); } void IndirectBrInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<IndirectBrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3760, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<IndirectBrInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned IndirectBrInst::getNumOperands(
) const { return OperandTraits<IndirectBrInst>::operands
(this); } template <int Idx_nocapture> Use &IndirectBrInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &IndirectBrInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
3761
3762//===----------------------------------------------------------------------===//
3763// InvokeInst Class
3764//===----------------------------------------------------------------------===//
3765
3766/// Invoke instruction. The SubclassData field is used to hold the
3767/// calling convention of the call.
3768///
3769class InvokeInst : public CallBase {
3770 /// The number of operands for this call beyond the called function,
3771 /// arguments, and operand bundles.
3772 static constexpr int NumExtraOperands = 2;
3773
3774 /// The index from the end of the operand array to the normal destination.
3775 static constexpr int NormalDestOpEndIdx = -3;
3776
3777 /// The index from the end of the operand array to the unwind destination.
3778 static constexpr int UnwindDestOpEndIdx = -2;
3779
3780 InvokeInst(const InvokeInst &BI);
3781
3782 /// Construct an InvokeInst given a range of arguments.
3783 ///
3784 /// Construct an InvokeInst from a range of arguments
3785 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3786 BasicBlock *IfException, ArrayRef<Value *> Args,
3787 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3788 const Twine &NameStr, Instruction *InsertBefore);
3789
3790 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3791 BasicBlock *IfException, ArrayRef<Value *> Args,
3792 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3793 const Twine &NameStr, BasicBlock *InsertAtEnd);
3794
3795 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3796 BasicBlock *IfException, ArrayRef<Value *> Args,
3797 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3798
3799 /// Compute the number of operands to allocate.
3800 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
3801 // We need one operand for the called function, plus our extra operands and
3802 // the input operand counts provided.
3803 return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
3804 }
3805
3806protected:
3807 // Note: Instruction needs to be a friend here to call cloneImpl.
3808 friend class Instruction;
3809
3810 InvokeInst *cloneImpl() const;
3811
3812public:
3813 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3814 BasicBlock *IfException, ArrayRef<Value *> Args,
3815 const Twine &NameStr,
3816 Instruction *InsertBefore = nullptr) {
3817 int NumOperands = ComputeNumOperands(Args.size());
3818 return new (NumOperands)
3819 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3820 NameStr, InsertBefore);
3821 }
3822
3823 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3824 BasicBlock *IfException, ArrayRef<Value *> Args,
3825 ArrayRef<OperandBundleDef> Bundles = None,
3826 const Twine &NameStr = "",
3827 Instruction *InsertBefore = nullptr) {
3828 int NumOperands =
3829 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3830 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3831
3832 return new (NumOperands, DescriptorBytes)
3833 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3834 NameStr, InsertBefore);
3835 }
3836
3837 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3838 BasicBlock *IfException, ArrayRef<Value *> Args,
3839 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3840 int NumOperands = ComputeNumOperands(Args.size());
3841 return new (NumOperands)
3842 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3843 NameStr, InsertAtEnd);
3844 }
3845
3846 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3847 BasicBlock *IfException, ArrayRef<Value *> Args,
3848 ArrayRef<OperandBundleDef> Bundles,
3849 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3850 int NumOperands =
3851 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3852 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3853
3854 return new (NumOperands, DescriptorBytes)
3855 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3856 NameStr, InsertAtEnd);
3857 }
3858
3859 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3860 BasicBlock *IfException, ArrayRef<Value *> Args,
3861 const Twine &NameStr,
3862 Instruction *InsertBefore = nullptr) {
3863 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3864 IfException, Args, None, NameStr, InsertBefore);
3865 }
3866
3867 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3868 BasicBlock *IfException, ArrayRef<Value *> Args,
3869 ArrayRef<OperandBundleDef> Bundles = None,
3870 const Twine &NameStr = "",
3871 Instruction *InsertBefore = nullptr) {
3872 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3873 IfException, Args, Bundles, NameStr, InsertBefore);
3874 }
3875
3876 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3877 BasicBlock *IfException, ArrayRef<Value *> Args,
3878 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3879 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3880 IfException, Args, NameStr, InsertAtEnd);
3881 }
3882
3883 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3884 BasicBlock *IfException, ArrayRef<Value *> Args,
3885 ArrayRef<OperandBundleDef> Bundles,
3886 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3887 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3888 IfException, Args, Bundles, NameStr, InsertAtEnd);
3889 }
3890
3891 /// Create a clone of \p II with a different set of operand bundles and
3892 /// insert it before \p InsertPt.
3893 ///
3894 /// The returned invoke instruction is identical to \p II in every way except
3895 /// that the operand bundles for the new instruction are set to the operand
3896 /// bundles in \p Bundles.
3897 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3898 Instruction *InsertPt = nullptr);
3899
3900 // get*Dest - Return the destination basic blocks...
3901 BasicBlock *getNormalDest() const {
3902 return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
3903 }
3904 BasicBlock *getUnwindDest() const {
3905 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
3906 }
3907 void setNormalDest(BasicBlock *B) {
3908 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3909 }
3910 void setUnwindDest(BasicBlock *B) {
3911 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3912 }
3913
3914 /// Get the landingpad instruction from the landing pad
3915 /// block (the unwind destination).
3916 LandingPadInst *getLandingPadInst() const;
3917
3918 BasicBlock *getSuccessor(unsigned i) const {
3919 assert(i < 2 && "Successor # out of range for invoke!")(static_cast <bool> (i < 2 && "Successor # out of range for invoke!"
) ? void (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\""
, "llvm/include/llvm/IR/Instructions.h", 3919, __extension__ __PRETTY_FUNCTION__
))
;
3920 return i == 0 ? getNormalDest() : getUnwindDest();
3921 }
3922
3923 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3924 assert(i < 2 && "Successor # out of range for invoke!")(static_cast <bool> (i < 2 && "Successor # out of range for invoke!"
) ? void (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\""
, "llvm/include/llvm/IR/Instructions.h", 3924, __extension__ __PRETTY_FUNCTION__
))
;
3925 if (i == 0)
3926 setNormalDest(NewSucc);
3927 else
3928 setUnwindDest(NewSucc);
3929 }
3930
3931 unsigned getNumSuccessors() const { return 2; }
3932
3933 // Methods for support type inquiry through isa, cast, and dyn_cast:
3934 static bool classof(const Instruction *I) {
3935 return (I->getOpcode() == Instruction::Invoke);
3936 }
3937 static bool classof(const Value *V) {
3938 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3939 }
3940
3941private:
3942 // Shadow Instruction::setInstructionSubclassData with a private forwarding
3943 // method so that subclasses cannot accidentally use it.
3944 template <typename Bitfield>
3945 void setSubclassData(typename Bitfield::Type Value) {
3946 Instruction::setSubclassData<Bitfield>(Value);
3947 }
3948};
3949
3950InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3951 BasicBlock *IfException, ArrayRef<Value *> Args,
3952 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3953 const Twine &NameStr, Instruction *InsertBefore)
3954 : CallBase(Ty->getReturnType(), Instruction::Invoke,
3955 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3956 InsertBefore) {
3957 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3958}
3959
3960InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3961 BasicBlock *IfException, ArrayRef<Value *> Args,
3962 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3963 const Twine &NameStr, BasicBlock *InsertAtEnd)
3964 : CallBase(Ty->getReturnType(), Instruction::Invoke,
3965 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3966 InsertAtEnd) {
3967 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3968}
3969
3970//===----------------------------------------------------------------------===//
3971// CallBrInst Class
3972//===----------------------------------------------------------------------===//
3973
3974/// CallBr instruction, tracking function calls that may not return control but
3975/// instead transfer it to a third location. The SubclassData field is used to
3976/// hold the calling convention of the call.
3977///
3978class CallBrInst : public CallBase {
3979
3980 unsigned NumIndirectDests;
3981
3982 CallBrInst(const CallBrInst &BI);
3983
3984 /// Construct a CallBrInst given a range of arguments.
3985 ///
3986 /// Construct a CallBrInst from a range of arguments
3987 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3988 ArrayRef<BasicBlock *> IndirectDests,
3989 ArrayRef<Value *> Args,
3990 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3991 const Twine &NameStr, Instruction *InsertBefore);
3992
3993 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3994 ArrayRef<BasicBlock *> IndirectDests,
3995 ArrayRef<Value *> Args,
3996 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3997 const Twine &NameStr, BasicBlock *InsertAtEnd);
3998
3999 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
4000 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
4001 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
4002
4003 /// Should the Indirect Destinations change, scan + update the Arg list.
4004 void updateArgBlockAddresses(unsigned i, BasicBlock *B);
4005
4006 /// Compute the number of operands to allocate.
4007 static int ComputeNumOperands(int NumArgs, int NumIndirectDests,
4008 int NumBundleInputs = 0) {
4009 // We need one operand for the called function, plus our extra operands and
4010 // the input operand counts provided.
4011 return 2 + NumIndirectDests + NumArgs + NumBundleInputs;
4012 }
4013
4014protected:
4015 // Note: Instruction needs to be a friend here to call cloneImpl.
4016 friend class Instruction;
4017
4018 CallBrInst *cloneImpl() const;
4019
4020public:
4021 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4022 BasicBlock *DefaultDest,
4023 ArrayRef<BasicBlock *> IndirectDests,
4024 ArrayRef<Value *> Args, const Twine &NameStr,
4025 Instruction *InsertBefore = nullptr) {
4026 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4027 return new (NumOperands)
4028 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
4029 NumOperands, NameStr, InsertBefore);
4030 }
4031
4032 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4033 BasicBlock *DefaultDest,
4034 ArrayRef<BasicBlock *> IndirectDests,
4035 ArrayRef<Value *> Args,
4036 ArrayRef<OperandBundleDef> Bundles = None,
4037 const Twine &NameStr = "",
4038 Instruction *InsertBefore = nullptr) {
4039 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4040 CountBundleInputs(Bundles));
4041 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4042
4043 return new (NumOperands, DescriptorBytes)
4044 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4045 NumOperands, NameStr, InsertBefore);
4046 }
4047
4048 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4049 BasicBlock *DefaultDest,
4050 ArrayRef<BasicBlock *> IndirectDests,
4051 ArrayRef<Value *> Args, const Twine &NameStr,
4052 BasicBlock *InsertAtEnd) {
4053 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4054 return new (NumOperands)
4055 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
4056 NumOperands, NameStr, InsertAtEnd);
4057 }
4058
4059 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4060 BasicBlock *DefaultDest,
4061 ArrayRef<BasicBlock *> IndirectDests,
4062 ArrayRef<Value *> Args,
4063 ArrayRef<OperandBundleDef> Bundles,
4064 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4065 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4066 CountBundleInputs(Bundles));
4067 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4068
4069 return new (NumOperands, DescriptorBytes)
4070 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4071 NumOperands, NameStr, InsertAtEnd);
4072 }
4073
4074 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4075 ArrayRef<BasicBlock *> IndirectDests,
4076 ArrayRef<Value *> Args, const Twine &NameStr,
4077 Instruction *InsertBefore = nullptr) {
4078 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4079 IndirectDests, Args, NameStr, InsertBefore);
4080 }
4081
4082 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4083 ArrayRef<BasicBlock *> IndirectDests,
4084 ArrayRef<Value *> Args,
4085 ArrayRef<OperandBundleDef> Bundles = None,
4086 const Twine &NameStr = "",
4087 Instruction *InsertBefore = nullptr) {
4088 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4089 IndirectDests, Args, Bundles, NameStr, InsertBefore);
4090 }
4091
4092 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4093 ArrayRef<BasicBlock *> IndirectDests,
4094 ArrayRef<Value *> Args, const Twine &NameStr,
4095 BasicBlock *InsertAtEnd) {
4096 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4097 IndirectDests, Args, NameStr, InsertAtEnd);
4098 }
4099
4100 static CallBrInst *Create(FunctionCallee Func,
4101 BasicBlock *DefaultDest,
4102 ArrayRef<BasicBlock *> IndirectDests,
4103 ArrayRef<Value *> Args,
4104 ArrayRef<OperandBundleDef> Bundles,
4105 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4106 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4107 IndirectDests, Args, Bundles, NameStr, InsertAtEnd);
4108 }
4109
4110 /// Create a clone of \p CBI with a different set of operand bundles and
4111 /// insert it before \p InsertPt.
4112 ///
4113 /// The returned callbr instruction is identical to \p CBI in every way
4114 /// except that the operand bundles for the new instruction are set to the
4115 /// operand bundles in \p Bundles.
4116 static CallBrInst *Create(CallBrInst *CBI,
4117 ArrayRef<OperandBundleDef> Bundles,
4118 Instruction *InsertPt = nullptr);
4119
4120 /// Return the number of callbr indirect dest labels.
4121 ///
4122 unsigned getNumIndirectDests() const { return NumIndirectDests; }
4123
4124 /// getIndirectDestLabel - Return the i-th indirect dest label.
4125 ///
4126 Value *getIndirectDestLabel(unsigned i) const {
4127 assert(i < getNumIndirectDests() && "Out of bounds!")(static_cast <bool> (i < getNumIndirectDests() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\""
, "llvm/include/llvm/IR/Instructions.h", 4127, __extension__ __PRETTY_FUNCTION__
))
;
4128 return getOperand(i + arg_size() + getNumTotalBundleOperands() + 1);
4129 }
4130
4131 Value *getIndirectDestLabelUse(unsigned i) const {
4132 assert(i < getNumIndirectDests() && "Out of bounds!")(static_cast <bool> (i < getNumIndirectDests() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\""
, "llvm/include/llvm/IR/Instructions.h", 4132, __extension__ __PRETTY_FUNCTION__
))
;
4133 return getOperandUse(i + arg_size() + getNumTotalBundleOperands() + 1);
4134 }
4135
4136 // Return the destination basic blocks...
4137 BasicBlock *getDefaultDest() const {
4138 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
4139 }
4140 BasicBlock *getIndirectDest(unsigned i) const {
4141 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
4142 }
4143 SmallVector<BasicBlock *, 16> getIndirectDests() const {
4144 SmallVector<BasicBlock *, 16> IndirectDests;
4145 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
4146 IndirectDests.push_back(getIndirectDest(i));
4147 return IndirectDests;
4148 }
4149 void setDefaultDest(BasicBlock *B) {
4150 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
4151 }
4152 void setIndirectDest(unsigned i, BasicBlock *B) {
4153 updateArgBlockAddresses(i, B);
4154 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
4155 }
4156
4157 BasicBlock *getSuccessor(unsigned i) const {
4158 assert(i < getNumSuccessors() + 1 &&(static_cast <bool> (i < getNumSuccessors() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\""
, "llvm/include/llvm/IR/Instructions.h", 4159, __extension__ __PRETTY_FUNCTION__
))
4159 "Successor # out of range for callbr!")(static_cast <bool> (i < getNumSuccessors() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\""
, "llvm/include/llvm/IR/Instructions.h", 4159, __extension__ __PRETTY_FUNCTION__
))
;
4160 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
4161 }
4162
4163 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4164 assert(i < getNumIndirectDests() + 1 &&(static_cast <bool> (i < getNumIndirectDests() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\""
, "llvm/include/llvm/IR/Instructions.h", 4165, __extension__ __PRETTY_FUNCTION__
))
4165 "Successor # out of range for callbr!")(static_cast <bool> (i < getNumIndirectDests() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\""
, "llvm/include/llvm/IR/Instructions.h", 4165, __extension__ __PRETTY_FUNCTION__
))
;
4166 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
4167 }
4168
4169 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
4170
4171 // Methods for support type inquiry through isa, cast, and dyn_cast:
4172 static bool classof(const Instruction *I) {
4173 return (I->getOpcode() == Instruction::CallBr);
4174 }
4175 static bool classof(const Value *V) {
4176 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4177 }
4178
4179private:
4180 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4181 // method so that subclasses cannot accidentally use it.
4182 template <typename Bitfield>
4183 void setSubclassData(typename Bitfield::Type Value) {
4184 Instruction::setSubclassData<Bitfield>(Value);
4185 }
4186};
4187
4188CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4189 ArrayRef<BasicBlock *> IndirectDests,
4190 ArrayRef<Value *> Args,
4191 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4192 const Twine &NameStr, Instruction *InsertBefore)
4193 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4194 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4195 InsertBefore) {
4196 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4197}
4198
4199CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4200 ArrayRef<BasicBlock *> IndirectDests,
4201 ArrayRef<Value *> Args,
4202 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4203 const Twine &NameStr, BasicBlock *InsertAtEnd)
4204 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4205 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4206 InsertAtEnd) {
4207 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4208}
4209
4210//===----------------------------------------------------------------------===//
4211// ResumeInst Class
4212//===----------------------------------------------------------------------===//
4213
4214//===---------------------------------------------------------------------------
4215/// Resume the propagation of an exception.
4216///
4217class ResumeInst : public Instruction {
4218 ResumeInst(const ResumeInst &RI);
4219
4220 explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
4221 ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
4222
4223protected:
4224 // Note: Instruction needs to be a friend here to call cloneImpl.
4225 friend class Instruction;
4226
4227 ResumeInst *cloneImpl() const;
4228
4229public:
4230 static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
4231 return new(1) ResumeInst(Exn, InsertBefore);
4232 }
4233
4234 static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
4235 return new(1) ResumeInst(Exn, InsertAtEnd);
4236 }
4237
4238 /// Provide fast operand accessors
4239 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4240
4241 /// Convenience accessor.
4242 Value *getValue() const { return Op<0>(); }
4243
4244 unsigned getNumSuccessors() const { return 0; }
4245
4246 // Methods for support type inquiry through isa, cast, and dyn_cast:
4247 static bool classof(const Instruction *I) {
4248 return I->getOpcode() == Instruction::Resume;
4249 }
4250 static bool classof(const Value *V) {
4251 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4252 }
4253
4254private:
4255 BasicBlock *getSuccessor(unsigned idx) const {
4256 llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 4256)
;
4257 }
4258
4259 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4260 llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 4260)
;
4261 }
4262};
4263
4264template <>
4265struct OperandTraits<ResumeInst> :
4266 public FixedNumOperandTraits<ResumeInst, 1> {
4267};
4268
4269DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)ResumeInst::op_iterator ResumeInst::op_begin() { return OperandTraits
<ResumeInst>::op_begin(this); } ResumeInst::const_op_iterator
ResumeInst::op_begin() const { return OperandTraits<ResumeInst
>::op_begin(const_cast<ResumeInst*>(this)); } ResumeInst
::op_iterator ResumeInst::op_end() { return OperandTraits<
ResumeInst>::op_end(this); } ResumeInst::const_op_iterator
ResumeInst::op_end() const { return OperandTraits<ResumeInst
>::op_end(const_cast<ResumeInst*>(this)); } Value *ResumeInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<ResumeInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4269, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ResumeInst
>::op_begin(const_cast<ResumeInst*>(this))[i_nocapture
].get()); } void ResumeInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ResumeInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4269, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ResumeInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ResumeInst::getNumOperands() const
{ return OperandTraits<ResumeInst>::operands(this); } template
<int Idx_nocapture> Use &ResumeInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &ResumeInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
4270
4271//===----------------------------------------------------------------------===//
4272// CatchSwitchInst Class
4273//===----------------------------------------------------------------------===//
4274class CatchSwitchInst : public Instruction {
4275 using UnwindDestField = BoolBitfieldElementT<0>;
4276
4277 /// The number of operands actually allocated. NumOperands is
4278 /// the number actually in use.
4279 unsigned ReservedSpace;
4280
4281 // Operand[0] = Outer scope
4282 // Operand[1] = Unwind block destination
4283 // Operand[n] = BasicBlock to go to on match
4284 CatchSwitchInst(const CatchSwitchInst &CSI);
4285
4286 /// Create a new switch instruction, specifying a
4287 /// default destination. The number of additional handlers can be specified
4288 /// here to make memory allocation more efficient.
4289 /// This constructor can also autoinsert before another instruction.
4290 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4291 unsigned NumHandlers, const Twine &NameStr,
4292 Instruction *InsertBefore);
4293
4294 /// Create a new switch instruction, specifying a
4295 /// default destination. The number of additional handlers can be specified
4296 /// here to make memory allocation more efficient.
4297 /// This constructor also autoinserts at the end of the specified BasicBlock.
4298 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4299 unsigned NumHandlers, const Twine &NameStr,
4300 BasicBlock *InsertAtEnd);
4301
4302 // allocate space for exactly zero operands
4303 void *operator new(size_t S) { return User::operator new(S); }
4304
4305 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4306 void growOperands(unsigned Size);
4307
4308protected:
4309 // Note: Instruction needs to be a friend here to call cloneImpl.
4310 friend class Instruction;
4311
4312 CatchSwitchInst *cloneImpl() const;
4313
4314public:
4315 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
4316
4317 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4318 unsigned NumHandlers,
4319 const Twine &NameStr = "",
4320 Instruction *InsertBefore = nullptr) {
4321 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4322 InsertBefore);
4323 }
4324
4325 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4326 unsigned NumHandlers, const Twine &NameStr,
4327 BasicBlock *InsertAtEnd) {
4328 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4329 InsertAtEnd);
4330 }
4331
4332 /// Provide fast operand accessors
4333 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4334
4335 // Accessor Methods for CatchSwitch stmt
4336 Value *getParentPad() const { return getOperand(0); }
4337 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4338
4339 // Accessor Methods for CatchSwitch stmt
4340 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4341 bool unwindsToCaller() const { return !hasUnwindDest(); }
4342 BasicBlock *getUnwindDest() const {
4343 if (hasUnwindDest())
4344 return cast<BasicBlock>(getOperand(1));
4345 return nullptr;
4346 }
4347 void setUnwindDest(BasicBlock *UnwindDest) {
4348 assert(UnwindDest)(static_cast <bool> (UnwindDest) ? void (0) : __assert_fail
("UnwindDest", "llvm/include/llvm/IR/Instructions.h", 4348, __extension__
__PRETTY_FUNCTION__))
;
4349 assert(hasUnwindDest())(static_cast <bool> (hasUnwindDest()) ? void (0) : __assert_fail
("hasUnwindDest()", "llvm/include/llvm/IR/Instructions.h", 4349
, __extension__ __PRETTY_FUNCTION__))
;
4350 setOperand(1, UnwindDest);
4351 }
4352
4353 /// return the number of 'handlers' in this catchswitch
4354 /// instruction, except the default handler
4355 unsigned getNumHandlers() const {
4356 if (hasUnwindDest())
4357 return getNumOperands() - 2;
4358 return getNumOperands() - 1;
4359 }
4360
4361private:
4362 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
4363 static const BasicBlock *handler_helper(const Value *V) {
4364 return cast<BasicBlock>(V);
4365 }
4366
4367public:
4368 using DerefFnTy = BasicBlock *(*)(Value *);
4369 using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>;
4370 using handler_range = iterator_range<handler_iterator>;
4371 using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4372 using const_handler_iterator =
4373 mapped_iterator<const_op_iterator, ConstDerefFnTy>;
4374 using const_handler_range = iterator_range<const_handler_iterator>;
4375
4376 /// Returns an iterator that points to the first handler in CatchSwitchInst.
4377 handler_iterator handler_begin() {
4378 op_iterator It = op_begin() + 1;
4379 if (hasUnwindDest())
4380 ++It;
4381 return handler_iterator(It, DerefFnTy(handler_helper));
4382 }
4383
4384 /// Returns an iterator that points to the first handler in the
4385 /// CatchSwitchInst.
4386 const_handler_iterator handler_begin() const {
4387 const_op_iterator It = op_begin() + 1;
4388 if (hasUnwindDest())
4389 ++It;
4390 return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4391 }
4392
4393 /// Returns a read-only iterator that points one past the last
4394 /// handler in the CatchSwitchInst.
4395 handler_iterator handler_end() {
4396 return handler_iterator(op_end(), DerefFnTy(handler_helper));
4397 }
4398
4399 /// Returns an iterator that points one past the last handler in the
4400 /// CatchSwitchInst.
4401 const_handler_iterator handler_end() const {
4402 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4403 }
4404
4405 /// iteration adapter for range-for loops.
4406 handler_range handlers() {
4407 return make_range(handler_begin(), handler_end());
4408 }
4409
4410 /// iteration adapter for range-for loops.
4411 const_handler_range handlers() const {
4412 return make_range(handler_begin(), handler_end());
4413 }
4414
4415 /// Add an entry to the switch instruction...
4416 /// Note:
4417 /// This action invalidates handler_end(). Old handler_end() iterator will
4418 /// point to the added handler.
4419 void addHandler(BasicBlock *Dest);
4420
4421 void removeHandler(handler_iterator HI);
4422
4423 unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4424 BasicBlock *getSuccessor(unsigned Idx) const {
4425 assert(Idx < getNumSuccessors() &&(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "llvm/include/llvm/IR/Instructions.h", 4426, __extension__ __PRETTY_FUNCTION__
))
4426 "Successor # out of range for catchswitch!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "llvm/include/llvm/IR/Instructions.h", 4426, __extension__ __PRETTY_FUNCTION__
))
;
4427 return cast<BasicBlock>(getOperand(Idx + 1));
4428 }
4429 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4430 assert(Idx < getNumSuccessors() &&(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "llvm/include/llvm/IR/Instructions.h", 4431, __extension__ __PRETTY_FUNCTION__
))
4431 "Successor # out of range for catchswitch!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "llvm/include/llvm/IR/Instructions.h", 4431, __extension__ __PRETTY_FUNCTION__
))
;
4432 setOperand(Idx + 1, NewSucc);
4433 }
4434
4435 // Methods for support type inquiry through isa, cast, and dyn_cast:
4436 static bool classof(const Instruction *I) {
4437 return I->getOpcode() == Instruction::CatchSwitch;
4438 }
4439 static bool classof(const Value *V) {
4440 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4441 }
4442};
4443
4444template <>
4445struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {};
4446
4447DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)CatchSwitchInst::op_iterator CatchSwitchInst::op_begin() { return
OperandTraits<CatchSwitchInst>::op_begin(this); } CatchSwitchInst
::const_op_iterator CatchSwitchInst::op_begin() const { return
OperandTraits<CatchSwitchInst>::op_begin(const_cast<
CatchSwitchInst*>(this)); } CatchSwitchInst::op_iterator CatchSwitchInst
::op_end() { return OperandTraits<CatchSwitchInst>::op_end
(this); } CatchSwitchInst::const_op_iterator CatchSwitchInst::
op_end() const { return OperandTraits<CatchSwitchInst>::
op_end(const_cast<CatchSwitchInst*>(this)); } Value *CatchSwitchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<CatchSwitchInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4447, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<CatchSwitchInst
>::op_begin(const_cast<CatchSwitchInst*>(this))[i_nocapture
].get()); } void CatchSwitchInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<CatchSwitchInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4447, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<CatchSwitchInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned CatchSwitchInst::getNumOperands
() const { return OperandTraits<CatchSwitchInst>::operands
(this); } template <int Idx_nocapture> Use &CatchSwitchInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &CatchSwitchInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
4448
4449//===----------------------------------------------------------------------===//
4450// CleanupPadInst Class
4451//===----------------------------------------------------------------------===//
4452class CleanupPadInst : public FuncletPadInst {
4453private:
4454 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4455 unsigned Values, const Twine &NameStr,
4456 Instruction *InsertBefore)
4457 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4458 NameStr, InsertBefore) {}
4459 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4460 unsigned Values, const Twine &NameStr,
4461 BasicBlock *InsertAtEnd)
4462 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4463 NameStr, InsertAtEnd) {}
4464
4465public:
4466 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = None,
4467 const Twine &NameStr = "",
4468 Instruction *InsertBefore = nullptr) {
4469 unsigned Values = 1 + Args.size();
4470 return new (Values)
4471 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
4472 }
4473
4474 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args,
4475 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4476 unsigned Values = 1 + Args.size();
4477 return new (Values)
4478 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd);
4479 }
4480
4481 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4482 static bool classof(const Instruction *I) {
4483 return I->getOpcode() == Instruction::CleanupPad;
4484 }
4485 static bool classof(const Value *V) {
4486 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4487 }
4488};
4489
4490//===----------------------------------------------------------------------===//
4491// CatchPadInst Class
4492//===----------------------------------------------------------------------===//
4493class CatchPadInst : public FuncletPadInst {
4494private:
4495 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4496 unsigned Values, const Twine &NameStr,
4497 Instruction *InsertBefore)
4498 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4499 NameStr, InsertBefore) {}
4500 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4501 unsigned Values, const Twine &NameStr,
4502 BasicBlock *InsertAtEnd)
4503 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4504 NameStr, InsertAtEnd) {}
4505
4506public:
4507 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4508 const Twine &NameStr = "",
4509 Instruction *InsertBefore = nullptr) {
4510 unsigned Values = 1 + Args.size();
4511 return new (Values)
4512 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
4513 }
4514
4515 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4516 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4517 unsigned Values = 1 + Args.size();
4518 return new (Values)
4519 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd);
4520 }
4521
4522 /// Convenience accessors
4523 CatchSwitchInst *getCatchSwitch() const {
4524 return cast<CatchSwitchInst>(Op<-1>());
4525 }
4526 void setCatchSwitch(Value *CatchSwitch) {
4527 assert(CatchSwitch)(static_cast <bool> (CatchSwitch) ? void (0) : __assert_fail
("CatchSwitch", "llvm/include/llvm/IR/Instructions.h", 4527,
__extension__ __PRETTY_FUNCTION__))
;
4528 Op<-1>() = CatchSwitch;
4529 }
4530
4531 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4532 static bool classof(const Instruction *I) {
4533 return I->getOpcode() == Instruction::CatchPad;
4534 }
4535 static bool classof(const Value *V) {
4536 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4537 }
4538};
4539
4540//===----------------------------------------------------------------------===//
4541// CatchReturnInst Class
4542//===----------------------------------------------------------------------===//
4543
4544class CatchReturnInst : public Instruction {
4545 CatchReturnInst(const CatchReturnInst &RI);
4546 CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
4547 CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);
4548
4549 void init(Value *CatchPad, BasicBlock *BB);
4550
4551protected:
4552 // Note: Instruction needs to be a friend here to call cloneImpl.
4553 friend class Instruction;
4554
4555 CatchReturnInst *cloneImpl() const;
4556
4557public:
4558 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4559 Instruction *InsertBefore = nullptr) {
4560 assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail
("CatchPad", "llvm/include/llvm/IR/Instructions.h", 4560, __extension__
__PRETTY_FUNCTION__))
;
4561 assert(BB)(static_cast <bool> (BB) ? void (0) : __assert_fail ("BB"
, "llvm/include/llvm/IR/Instructions.h", 4561, __extension__ __PRETTY_FUNCTION__
))
;
4562 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
4563 }
4564
4565 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4566 BasicBlock *InsertAtEnd) {
4567 assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail
("CatchPad", "llvm/include/llvm/IR/Instructions.h", 4567, __extension__
__PRETTY_FUNCTION__))
;
4568 assert(BB)(static_cast <bool> (BB) ? void (0) : __assert_fail ("BB"
, "llvm/include/llvm/IR/Instructions.h", 4568, __extension__ __PRETTY_FUNCTION__
))
;
4569 return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd);
4570 }
4571
4572 /// Provide fast operand accessors
4573 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4574
4575 /// Convenience accessors.
4576 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); }
4577 void setCatchPad(CatchPadInst *CatchPad) {
4578 assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail
("CatchPad", "llvm/include/llvm/IR/Instructions.h", 4578, __extension__
__PRETTY_FUNCTION__))
;
4579 Op<0>() = CatchPad;
4580 }
4581
4582 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); }
4583 void setSuccessor(BasicBlock *NewSucc) {
4584 assert(NewSucc)(static_cast <bool> (NewSucc) ? void (0) : __assert_fail
("NewSucc", "llvm/include/llvm/IR/Instructions.h", 4584, __extension__
__PRETTY_FUNCTION__))
;
4585 Op<1>() = NewSucc;
4586 }
4587 unsigned getNumSuccessors() const { return 1; }
4588
4589 /// Get the parentPad of this catchret's catchpad's catchswitch.
4590 /// The successor block is implicitly a member of this funclet.
4591 Value *getCatchSwitchParentPad() const {
4592 return getCatchPad()->getCatchSwitch()->getParentPad();
4593 }
4594
4595 // Methods for support type inquiry through isa, cast, and dyn_cast:
4596 static bool classof(const Instruction *I) {
4597 return (I->getOpcode() == Instruction::CatchRet);
4598 }
4599 static bool classof(const Value *V) {
4600 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4601 }
4602
4603private:
4604 BasicBlock *getSuccessor(unsigned Idx) const {
4605 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchret!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\""
, "llvm/include/llvm/IR/Instructions.h", 4605, __extension__ __PRETTY_FUNCTION__
))
;
4606 return getSuccessor();
4607 }
4608
4609 void setSuccessor(unsigned Idx, BasicBlock *B) {
4610 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchret!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\""
, "llvm/include/llvm/IR/Instructions.h", 4610, __extension__ __PRETTY_FUNCTION__
))
;
4611 setSuccessor(B);
4612 }
4613};
4614
4615template <>
4616struct OperandTraits<CatchReturnInst>
4617 : public FixedNumOperandTraits<CatchReturnInst, 2> {};
4618
4619DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)CatchReturnInst::op_iterator CatchReturnInst::op_begin() { return
OperandTraits<CatchReturnInst>::op_begin(this); } CatchReturnInst
::const_op_iterator CatchReturnInst::op_begin() const { return
OperandTraits<CatchReturnInst>::op_begin(const_cast<
CatchReturnInst*>(this)); } CatchReturnInst::op_iterator CatchReturnInst
::op_end() { return OperandTraits<CatchReturnInst>::op_end
(this); } CatchReturnInst::const_op_iterator CatchReturnInst::
op_end() const { return OperandTraits<CatchReturnInst>::
op_end(const_cast<CatchReturnInst*>(this)); } Value *CatchReturnInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<CatchReturnInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4619, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<CatchReturnInst
>::op_begin(const_cast<CatchReturnInst*>(this))[i_nocapture
].get()); } void CatchReturnInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<CatchReturnInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4619, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<CatchReturnInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned CatchReturnInst::getNumOperands
() const { return OperandTraits<CatchReturnInst>::operands
(this); } template <int Idx_nocapture> Use &CatchReturnInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &CatchReturnInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
4620
4621//===----------------------------------------------------------------------===//
4622// CleanupReturnInst Class
4623//===----------------------------------------------------------------------===//
4624
4625class CleanupReturnInst : public Instruction {
4626 using UnwindDestField = BoolBitfieldElementT<0>;
4627
4628private:
4629 CleanupReturnInst(const CleanupReturnInst &RI);
4630 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4631 Instruction *InsertBefore = nullptr);
4632 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4633 BasicBlock *InsertAtEnd);
4634
4635 void init(Value *CleanupPad, BasicBlock *UnwindBB);
4636
4637protected:
4638 // Note: Instruction needs to be a friend here to call cloneImpl.
4639 friend class Instruction;
4640
4641 CleanupReturnInst *cloneImpl() const;
4642
4643public:
4644 static CleanupReturnInst *Create(Value *CleanupPad,
4645 BasicBlock *UnwindBB = nullptr,
4646 Instruction *InsertBefore = nullptr) {
4647 assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail
("CleanupPad", "llvm/include/llvm/IR/Instructions.h", 4647, __extension__
__PRETTY_FUNCTION__))
;
4648 unsigned Values = 1;
4649 if (UnwindBB)
4650 ++Values;
4651 return new (Values)
4652 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore);
4653 }
4654
4655 static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB,
4656 BasicBlock *InsertAtEnd) {
4657 assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail
("CleanupPad", "llvm/include/llvm/IR/Instructions.h", 4657, __extension__
__PRETTY_FUNCTION__))
;
4658 unsigned Values = 1;
4659 if (UnwindBB)
4660 ++Values;
4661 return new (Values)
4662 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd);
4663 }
4664
4665 /// Provide fast operand accessors
4666 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4667
4668 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4669 bool unwindsToCaller() const { return !hasUnwindDest(); }
4670
4671 /// Convenience accessor.
4672 CleanupPadInst *getCleanupPad() const {
4673 return cast<CleanupPadInst>(Op<0>());
4674 }
4675 void setCleanupPad(CleanupPadInst *CleanupPad) {
4676 assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail
("CleanupPad", "llvm/include/llvm/IR/Instructions.h", 4676, __extension__
__PRETTY_FUNCTION__))
;
4677 Op<0>() = CleanupPad;
4678 }
4679
4680 unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; }
4681
4682 BasicBlock *getUnwindDest() const {
4683 return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr;
4684 }
4685 void setUnwindDest(BasicBlock *NewDest) {
4686 assert(NewDest)(static_cast <bool> (NewDest) ? void (0) : __assert_fail
("NewDest", "llvm/include/llvm/IR/Instructions.h", 4686, __extension__
__PRETTY_FUNCTION__))
;
4687 assert(hasUnwindDest())(static_cast <bool> (hasUnwindDest()) ? void (0) : __assert_fail
("hasUnwindDest()", "llvm/include/llvm/IR/Instructions.h", 4687
, __extension__ __PRETTY_FUNCTION__))
;
4688 Op<1>() = NewDest;
4689 }
4690
4691 // Methods for support type inquiry through isa, cast, and dyn_cast:
4692 static bool classof(const Instruction *I) {
4693 return (I->getOpcode() == Instruction::CleanupRet);
4694 }
4695 static bool classof(const Value *V) {
4696 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4697 }
4698
4699private:
4700 BasicBlock *getSuccessor(unsigned Idx) const {
4701 assert(Idx == 0)(static_cast <bool> (Idx == 0) ? void (0) : __assert_fail
("Idx == 0", "llvm/include/llvm/IR/Instructions.h", 4701, __extension__
__PRETTY_FUNCTION__))
;
4702 return getUnwindDest();
4703 }
4704
4705 void setSuccessor(unsigned Idx, BasicBlock *B) {
4706 assert(Idx == 0)(static_cast <bool> (Idx == 0) ? void (0) : __assert_fail
("Idx == 0", "llvm/include/llvm/IR/Instructions.h", 4706, __extension__
__PRETTY_FUNCTION__))
;
4707 setUnwindDest(B);
4708 }
4709
4710 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4711 // method so that subclasses cannot accidentally use it.
4712 template <typename Bitfield>
4713 void setSubclassData(typename Bitfield::Type Value) {
4714 Instruction::setSubclassData<Bitfield>(Value);
4715 }
4716};
4717
4718template <>
4719struct OperandTraits<CleanupReturnInst>
4720 : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {};
4721
4722DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)CleanupReturnInst::op_iterator CleanupReturnInst::op_begin() {
return OperandTraits<CleanupReturnInst>::op_begin(this
); } CleanupReturnInst::const_op_iterator CleanupReturnInst::
op_begin() const { return OperandTraits<CleanupReturnInst>
::op_begin(const_cast<CleanupReturnInst*>(this)); } CleanupReturnInst
::op_iterator CleanupReturnInst::op_end() { return OperandTraits
<CleanupReturnInst>::op_end(this); } CleanupReturnInst::
const_op_iterator CleanupReturnInst::op_end() const { return OperandTraits
<CleanupReturnInst>::op_end(const_cast<CleanupReturnInst
*>(this)); } Value *CleanupReturnInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<CleanupReturnInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4722, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<CleanupReturnInst
>::op_begin(const_cast<CleanupReturnInst*>(this))[i_nocapture
].get()); } void CleanupReturnInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<CleanupReturnInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4722, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<CleanupReturnInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned CleanupReturnInst::getNumOperands
() const { return OperandTraits<CleanupReturnInst>::operands
(this); } template <int Idx_nocapture> Use &CleanupReturnInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &CleanupReturnInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
4723
4724//===----------------------------------------------------------------------===//
4725// UnreachableInst Class
4726//===----------------------------------------------------------------------===//
4727
4728//===---------------------------------------------------------------------------
4729/// This function has undefined behavior. In particular, the
4730/// presence of this instruction indicates some higher level knowledge that the
4731/// end of the block cannot be reached.
4732///
4733class UnreachableInst : public Instruction {
4734protected:
4735 // Note: Instruction needs to be a friend here to call cloneImpl.
4736 friend class Instruction;
4737
4738 UnreachableInst *cloneImpl() const;
4739
4740public:
4741 explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr);
4742 explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd);
4743
4744 // allocate space for exactly zero operands
4745 void *operator new(size_t S) { return User::operator new(S, 0); }
4746 void operator delete(void *Ptr) { User::operator delete(Ptr); }
4747
4748 unsigned getNumSuccessors() const { return 0; }
4749
4750 // Methods for support type inquiry through isa, cast, and dyn_cast:
4751 static bool classof(const Instruction *I) {
4752 return I->getOpcode() == Instruction::Unreachable;
4753 }
4754 static bool classof(const Value *V) {
4755 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4756 }
4757
4758private:
4759 BasicBlock *getSuccessor(unsigned idx) const {
4760 llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 4760)
;
4761 }
4762
4763 void setSuccessor(unsigned idx, BasicBlock *B) {
4764 llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 4764)
;
4765 }
4766};
4767
4768//===----------------------------------------------------------------------===//
4769// TruncInst Class
4770//===----------------------------------------------------------------------===//
4771
4772/// This class represents a truncation of integer types.
4773class TruncInst : public CastInst {
4774protected:
4775 // Note: Instruction needs to be a friend here to call cloneImpl.
4776 friend class Instruction;
4777
4778 /// Clone an identical TruncInst
4779 TruncInst *cloneImpl() const;
4780
4781public:
4782 /// Constructor with insert-before-instruction semantics
4783 TruncInst(
4784 Value *S, ///< The value to be truncated
4785 Type *Ty, ///< The (smaller) type to truncate to
4786 const Twine &NameStr = "", ///< A name for the new instruction
4787 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4788 );
4789
4790 /// Constructor with insert-at-end-of-block semantics
4791 TruncInst(
4792 Value *S, ///< The value to be truncated
4793 Type *Ty, ///< The (smaller) type to truncate to
4794 const Twine &NameStr, ///< A name for the new instruction
4795 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4796 );
4797
4798 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4799 static bool classof(const Instruction *I) {
4800 return I->getOpcode() == Trunc;
4801 }
4802 static bool classof(const Value *V) {
4803 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4804 }
4805};
4806
4807//===----------------------------------------------------------------------===//
4808// ZExtInst Class
4809//===----------------------------------------------------------------------===//
4810
4811/// This class represents zero extension of integer types.
4812class ZExtInst : public CastInst {
4813protected:
4814 // Note: Instruction needs to be a friend here to call cloneImpl.
4815 friend class Instruction;
4816
4817 /// Clone an identical ZExtInst
4818 ZExtInst *cloneImpl() const;
4819
4820public:
4821 /// Constructor with insert-before-instruction semantics
4822 ZExtInst(
4823 Value *S, ///< The value to be zero extended
4824 Type *Ty, ///< The type to zero extend to
4825 const Twine &NameStr = "", ///< A name for the new instruction
4826 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4827 );
4828
4829 /// Constructor with insert-at-end semantics.
4830 ZExtInst(
4831 Value *S, ///< The value to be zero extended
4832 Type *Ty, ///< The type to zero extend to
4833 const Twine &NameStr, ///< A name for the new instruction
4834 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4835 );
4836
4837 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4838 static bool classof(const Instruction *I) {
4839 return I->getOpcode() == ZExt;
4840 }
4841 static bool classof(const Value *V) {
4842 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4843 }
4844};
4845
4846//===----------------------------------------------------------------------===//
4847// SExtInst Class
4848//===----------------------------------------------------------------------===//
4849
4850/// This class represents a sign extension of integer types.
4851class SExtInst : public CastInst {
4852protected:
4853 // Note: Instruction needs to be a friend here to call cloneImpl.
4854 friend class Instruction;
4855
4856 /// Clone an identical SExtInst
4857 SExtInst *cloneImpl() const;
4858
4859public:
4860 /// Constructor with insert-before-instruction semantics
4861 SExtInst(
4862 Value *S, ///< The value to be sign extended
4863 Type *Ty, ///< The type to sign extend to
4864 const Twine &NameStr = "", ///< A name for the new instruction
4865 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4866 );
4867
4868 /// Constructor with insert-at-end-of-block semantics
4869 SExtInst(
4870 Value *S, ///< The value to be sign extended
4871 Type *Ty, ///< The type to sign extend to
4872 const Twine &NameStr, ///< A name for the new instruction
4873 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4874 );
4875
4876 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4877 static bool classof(const Instruction *I) {
4878 return I->getOpcode() == SExt;
4879 }
4880 static bool classof(const Value *V) {
4881 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4882 }
4883};
4884
4885//===----------------------------------------------------------------------===//
4886// FPTruncInst Class
4887//===----------------------------------------------------------------------===//
4888
4889/// This class represents a truncation of floating point types.
4890class FPTruncInst : public CastInst {
4891protected:
4892 // Note: Instruction needs to be a friend here to call cloneImpl.
4893 friend class Instruction;
4894
4895 /// Clone an identical FPTruncInst
4896 FPTruncInst *cloneImpl() const;
4897
4898public:
4899 /// Constructor with insert-before-instruction semantics
4900 FPTruncInst(
4901 Value *S, ///< The value to be truncated
4902 Type *Ty, ///< The type to truncate to
4903 const Twine &NameStr = "", ///< A name for the new instruction
4904 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4905 );
4906
4907 /// Constructor with insert-before-instruction semantics
4908 FPTruncInst(
4909 Value *S, ///< The value to be truncated
4910 Type *Ty, ///< The type to truncate to
4911 const Twine &NameStr, ///< A name for the new instruction
4912 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4913 );
4914
4915 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4916 static bool classof(const Instruction *I) {
4917 return I->getOpcode() == FPTrunc;
4918 }
4919 static bool classof(const Value *V) {
4920 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4921 }
4922};
4923
4924//===----------------------------------------------------------------------===//
4925// FPExtInst Class
4926//===----------------------------------------------------------------------===//
4927
4928/// This class represents an extension of floating point types.
4929class FPExtInst : public CastInst {
4930protected:
4931 // Note: Instruction needs to be a friend here to call cloneImpl.
4932 friend class Instruction;
4933
4934 /// Clone an identical FPExtInst
4935 FPExtInst *cloneImpl() const;
4936
4937public:
4938 /// Constructor with insert-before-instruction semantics
4939 FPExtInst(
4940 Value *S, ///< The value to be extended
4941 Type *Ty, ///< The type to extend to
4942 const Twine &NameStr = "", ///< A name for the new instruction
4943 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4944 );
4945
4946 /// Constructor with insert-at-end-of-block semantics
4947 FPExtInst(
4948 Value *S, ///< The value to be extended
4949 Type *Ty, ///< The type to extend to
4950 const Twine &NameStr, ///< A name for the new instruction
4951 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4952 );
4953
4954 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4955 static bool classof(const Instruction *I) {
4956 return I->getOpcode() == FPExt;
4957 }
4958 static bool classof(const Value *V) {
4959 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4960 }
4961};
4962
4963//===----------------------------------------------------------------------===//
4964// UIToFPInst Class
4965//===----------------------------------------------------------------------===//
4966
4967/// This class represents a cast unsigned integer to floating point.
4968class UIToFPInst : public CastInst {
4969protected:
4970 // Note: Instruction needs to be a friend here to call cloneImpl.
4971 friend class Instruction;
4972
4973 /// Clone an identical UIToFPInst
4974 UIToFPInst *cloneImpl() const;
4975
4976public:
4977 /// Constructor with insert-before-instruction semantics
4978 UIToFPInst(
4979 Value *S, ///< The value to be converted
4980 Type *Ty, ///< The type to convert to
4981 const Twine &NameStr = "", ///< A name for the new instruction
4982 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4983 );
4984
4985 /// Constructor with insert-at-end-of-block semantics
4986 UIToFPInst(
4987 Value *S, ///< The value to be converted
4988 Type *Ty, ///< The type to convert to
4989 const Twine &NameStr, ///< A name for the new instruction
4990 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4991 );
4992
4993 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4994 static bool classof(const Instruction *I) {
4995 return I->getOpcode() == UIToFP;
4996 }
4997 static bool classof(const Value *V) {
4998 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4999 }
5000};
5001
5002//===----------------------------------------------------------------------===//
5003// SIToFPInst Class
5004//===----------------------------------------------------------------------===//
5005
5006/// This class represents a cast from signed integer to floating point.
5007class SIToFPInst : public CastInst {
5008protected:
5009 // Note: Instruction needs to be a friend here to call cloneImpl.
5010 friend class Instruction;
5011
5012 /// Clone an identical SIToFPInst
5013 SIToFPInst *cloneImpl() const;
5014
5015public:
5016 /// Constructor with insert-before-instruction semantics
5017 SIToFPInst(
5018 Value *S, ///< The value to be converted
5019 Type *Ty, ///< The type to convert to
5020 const Twine &NameStr = "", ///< A name for the new instruction
5021 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5022 );
5023
5024 /// Constructor with insert-at-end-of-block semantics
5025 SIToFPInst(
5026 Value *S, ///< The value to be converted
5027 Type *Ty, ///< The type to convert to
5028 const Twine &NameStr, ///< A name for the new instruction
5029 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5030 );
5031
5032 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5033 static bool classof(const Instruction *I) {
5034 return I->getOpcode() == SIToFP;
5035 }
5036 static bool classof(const Value *V) {
5037 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5038 }
5039};
5040
5041//===----------------------------------------------------------------------===//
5042// FPToUIInst Class
5043//===----------------------------------------------------------------------===//
5044
5045/// This class represents a cast from floating point to unsigned integer
5046class FPToUIInst : public CastInst {
5047protected:
5048 // Note: Instruction needs to be a friend here to call cloneImpl.
5049 friend class Instruction;
5050
5051 /// Clone an identical FPToUIInst
5052 FPToUIInst *cloneImpl() const;
5053
5054public:
5055 /// Constructor with insert-before-instruction semantics
5056 FPToUIInst(
5057 Value *S, ///< The value to be converted
5058 Type *Ty, ///< The type to convert to
5059 const Twine &NameStr = "", ///< A name for the new instruction
5060 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5061 );
5062
5063 /// Constructor with insert-at-end-of-block semantics
5064 FPToUIInst(
5065 Value *S, ///< The value to be converted
5066 Type *Ty, ///< The type to convert to
5067 const Twine &NameStr, ///< A name for the new instruction
5068 BasicBlock *InsertAtEnd ///< Where to insert the new instruction
5069 );
5070
5071 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5072 static bool classof(const Instruction *I) {
5073 return I->getOpcode() == FPToUI;
5074 }
5075 static bool classof(const Value *V) {
5076 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5077 }
5078};
5079
5080//===----------------------------------------------------------------------===//
5081// FPToSIInst Class
5082//===----------------------------------------------------------------------===//
5083
5084/// This class represents a cast from floating point to signed integer.
5085class FPToSIInst : public CastInst {
5086protected:
5087 // Note: Instruction needs to be a friend here to call cloneImpl.
5088 friend class Instruction;
5089
5090 /// Clone an identical FPToSIInst
5091 FPToSIInst *cloneImpl() const;
5092
5093public:
5094 /// Constructor with insert-before-instruction semantics
5095 FPToSIInst(
5096 Value *S, ///< The value to be converted
5097 Type *Ty, ///< The type to convert to
5098 const Twine &NameStr = "", ///< A name for the new instruction
5099 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5100 );
5101
5102 /// Constructor with insert-at-end-of-block semantics
5103 FPToSIInst(
5104 Value *S, ///< The value to be converted
5105 Type *Ty, ///< The type to convert to
5106 const Twine &NameStr, ///< A name for the new instruction
5107 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5108 );
5109
5110 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5111 static bool classof(const Instruction *I) {
5112 return I->getOpcode() == FPToSI;
5113 }
5114 static bool classof(const Value *V) {
5115 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5116 }
5117};
5118
5119//===----------------------------------------------------------------------===//
5120// IntToPtrInst Class
5121//===----------------------------------------------------------------------===//
5122
5123/// This class represents a cast from an integer to a pointer.
5124class IntToPtrInst : public CastInst {
5125public:
5126 // Note: Instruction needs to be a friend here to call cloneImpl.
5127 friend class Instruction;
5128
5129 /// Constructor with insert-before-instruction semantics
5130 IntToPtrInst(
5131 Value *S, ///< The value to be converted
5132 Type *Ty, ///< The type to convert to
5133 const Twine &NameStr = "", ///< A name for the new instruction
5134 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5135 );
5136
5137 /// Constructor with insert-at-end-of-block semantics
5138 IntToPtrInst(
5139 Value *S, ///< The value to be converted
5140 Type *Ty, ///< The type to convert to
5141 const Twine &NameStr, ///< A name for the new instruction
5142 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5143 );
5144
5145 /// Clone an identical IntToPtrInst.
5146 IntToPtrInst *cloneImpl() const;
5147
5148 /// Returns the address space of this instruction's pointer type.
5149 unsigned getAddressSpace() const {
5150 return getType()->getPointerAddressSpace();
5151 }
5152
5153 // Methods for support type inquiry through isa, cast, and dyn_cast:
5154 static bool classof(const Instruction *I) {
5155 return I->getOpcode() == IntToPtr;
5156 }
5157 static bool classof(const Value *V) {
5158 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5159 }
5160};
5161
5162//===----------------------------------------------------------------------===//
5163// PtrToIntInst Class
5164//===----------------------------------------------------------------------===//
5165
5166/// This class represents a cast from a pointer to an integer.
5167class PtrToIntInst : public CastInst {
5168protected:
5169 // Note: Instruction needs to be a friend here to call cloneImpl.
5170 friend class Instruction;
5171
5172 /// Clone an identical PtrToIntInst.
5173 PtrToIntInst *cloneImpl() const;
5174
5175public:
5176 /// Constructor with insert-before-instruction semantics
5177 PtrToIntInst(
5178 Value *S, ///< The value to be converted
5179 Type *Ty, ///< The type to convert to
5180 const Twine &NameStr = "", ///< A name for the new instruction
5181 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5182 );
5183
5184 /// Constructor with insert-at-end-of-block semantics
5185 PtrToIntInst(
5186 Value *S, ///< The value to be converted
5187 Type *Ty, ///< The type to convert to
5188 const Twine &NameStr, ///< A name for the new instruction
5189 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5190 );
5191
5192 /// Gets the pointer operand.
5193 Value *getPointerOperand() { return getOperand(0); }
5194 /// Gets the pointer operand.
5195 const Value *getPointerOperand() const { return getOperand(0); }
5196 /// Gets the operand index of the pointer operand.
5197 static unsigned getPointerOperandIndex() { return 0U; }
5198
5199 /// Returns the address space of the pointer operand.
5200 unsigned getPointerAddressSpace() const {
5201 return getPointerOperand()->getType()->getPointerAddressSpace();
5202 }
5203
5204 // Methods for support type inquiry through isa, cast, and dyn_cast:
5205 static bool classof(const Instruction *I) {
5206 return I->getOpcode() == PtrToInt;
5207 }
5208 static bool classof(const Value *V) {
5209 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5210 }
5211};
5212
5213//===----------------------------------------------------------------------===//
5214// BitCastInst Class
5215//===----------------------------------------------------------------------===//
5216
5217/// This class represents a no-op cast from one type to another.
5218class BitCastInst : public CastInst {
5219protected:
5220 // Note: Instruction needs to be a friend here to call cloneImpl.
5221 friend class Instruction;
5222
5223 /// Clone an identical BitCastInst.
5224 BitCastInst *cloneImpl() const;
5225
5226public:
5227 /// Constructor with insert-before-instruction semantics
5228 BitCastInst(
5229 Value *S, ///< The value to be casted
5230 Type *Ty, ///< The type to casted to
5231 const Twine &NameStr = "", ///< A name for the new instruction
5232 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5233 );
5234
5235 /// Constructor with insert-at-end-of-block semantics
5236 BitCastInst(
5237 Value *S, ///< The value to be casted
5238 Type *Ty, ///< The type to casted to
5239 const Twine &NameStr, ///< A name for the new instruction
5240 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5241 );
5242
5243 // Methods for support type inquiry through isa, cast, and dyn_cast:
5244 static bool classof(const Instruction *I) {
5245 return I->getOpcode() == BitCast;
5246 }
5247 static bool classof(const Value *V) {
5248 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5249 }
5250};
5251
5252//===----------------------------------------------------------------------===//
5253// AddrSpaceCastInst Class
5254//===----------------------------------------------------------------------===//
5255
5256/// This class represents a conversion between pointers from one address space
5257/// to another.
5258class AddrSpaceCastInst : public CastInst {
5259protected:
5260 // Note: Instruction needs to be a friend here to call cloneImpl.
5261 friend class Instruction;
5262
5263 /// Clone an identical AddrSpaceCastInst.
5264 AddrSpaceCastInst *cloneImpl() const;
5265
5266public:
5267 /// Constructor with insert-before-instruction semantics
5268 AddrSpaceCastInst(
5269 Value *S, ///< The value to be casted
5270 Type *Ty, ///< The type to casted to
5271 const Twine &NameStr = "", ///< A name for the new instruction
5272 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5273 );
5274
5275 /// Constructor with insert-at-end-of-block semantics
5276 AddrSpaceCastInst(
5277 Value *S, ///< The value to be casted
5278 Type *Ty, ///< The type to casted to
5279 const Twine &NameStr, ///< A name for the new instruction
5280 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5281 );
5282
5283 // Methods for support type inquiry through isa, cast, and dyn_cast:
5284 static bool classof(const Instruction *I) {
5285 return I->getOpcode() == AddrSpaceCast;
5286 }
5287 static bool classof(const Value *V) {
5288 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5289 }
5290
5291 /// Gets the pointer operand.
5292 Value *getPointerOperand() {
5293 return getOperand(0);
5294 }
5295
5296 /// Gets the pointer operand.
5297 const Value *getPointerOperand() const {
5298 return getOperand(0);
5299 }
5300
5301 /// Gets the operand index of the pointer operand.
5302 static unsigned getPointerOperandIndex() {
5303 return 0U;
5304 }
5305
5306 /// Returns the address space of the pointer operand.
5307 unsigned getSrcAddressSpace() const {
5308 return getPointerOperand()->getType()->getPointerAddressSpace();
5309 }
5310
5311 /// Returns the address space of the result.
5312 unsigned getDestAddressSpace() const {
5313 return getType()->getPointerAddressSpace();
5314 }
5315};
5316
5317/// A helper function that returns the pointer operand of a load or store
5318/// instruction. Returns nullptr if not load or store.
5319inline const Value *getLoadStorePointerOperand(const Value *V) {
5320 if (auto *Load = dyn_cast<LoadInst>(V))
5321 return Load->getPointerOperand();
5322 if (auto *Store = dyn_cast<StoreInst>(V))
5323 return Store->getPointerOperand();
5324 return nullptr;
5325}
5326inline Value *getLoadStorePointerOperand(Value *V) {
5327 return const_cast<Value *>(
5328 getLoadStorePointerOperand(static_cast<const Value *>(V)));
5329}
5330
5331/// A helper function that returns the pointer operand of a load, store
5332/// or GEP instruction. Returns nullptr if not load, store, or GEP.
5333inline const Value *getPointerOperand(const Value *V) {
5334 if (auto *Ptr = getLoadStorePointerOperand(V))
5335 return Ptr;
5336 if (auto *Gep = dyn_cast<GetElementPtrInst>(V))
5337 return Gep->getPointerOperand();
5338 return nullptr;
5339}
5340inline Value *getPointerOperand(Value *V) {
5341 return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V)));
5342}
5343
5344/// A helper function that returns the alignment of load or store instruction.
5345inline Align getLoadStoreAlignment(Value *I) {
5346 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "llvm/include/llvm/IR/Instructions.h", 5347, __extension__ __PRETTY_FUNCTION__
))
5347 "Expected Load or Store instruction")(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "llvm/include/llvm/IR/Instructions.h", 5347, __extension__ __PRETTY_FUNCTION__
))
;
5348 if (auto *LI = dyn_cast<LoadInst>(I))
5349 return LI->getAlign();
5350 return cast<StoreInst>(I)->getAlign();
5351}
5352
5353/// A helper function that returns the address space of the pointer operand of
5354/// load or store instruction.
5355inline unsigned getLoadStoreAddressSpace(Value *I) {
5356 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "llvm/include/llvm/IR/Instructions.h", 5357, __extension__ __PRETTY_FUNCTION__
))
5357 "Expected Load or Store instruction")(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "llvm/include/llvm/IR/Instructions.h", 5357, __extension__ __PRETTY_FUNCTION__
))
;
5358 if (auto *LI = dyn_cast<LoadInst>(I))
5359 return LI->getPointerAddressSpace();
5360 return cast<StoreInst>(I)->getPointerAddressSpace();
5361}
5362
5363/// A helper function that returns the type of a load or store instruction.
5364inline Type *getLoadStoreType(Value *I) {
5365 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "llvm/include/llvm/IR/Instructions.h", 5366, __extension__ __PRETTY_FUNCTION__
))
5366 "Expected Load or Store instruction")(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "llvm/include/llvm/IR/Instructions.h", 5366, __extension__ __PRETTY_FUNCTION__
))
;
5367 if (auto *LI = dyn_cast<LoadInst>(I))
5368 return LI->getType();
5369 return cast<StoreInst>(I)->getValueOperand()->getType();
5370}
5371
5372//===----------------------------------------------------------------------===//
5373// FreezeInst Class
5374//===----------------------------------------------------------------------===//
5375
5376/// This class represents a freeze function that returns random concrete
5377/// value if an operand is either a poison value or an undef value
5378class FreezeInst : public UnaryInstruction {
5379protected:
5380 // Note: Instruction needs to be a friend here to call cloneImpl.
5381 friend class Instruction;
5382
5383 /// Clone an identical FreezeInst
5384 FreezeInst *cloneImpl() const;
5385
5386public:
5387 explicit FreezeInst(Value *S,
5388 const Twine &NameStr = "",
5389 Instruction *InsertBefore = nullptr);
5390 FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd);
5391
5392 // Methods for support type inquiry through isa, cast, and dyn_cast:
5393 static inline bool classof(const Instruction *I) {
5394 return I->getOpcode() == Freeze;
5395 }
5396 static inline bool classof(const Value *V) {
5397 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5398 }
5399};
5400
5401} // end namespace llvm
5402
5403#endif // LLVM_IR_INSTRUCTIONS_H