Bug Summary

File:llvm/include/llvm/IR/Instructions.h
Warning:line 964, column 29
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name CodeExtractor.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/build-llvm/lib/Transforms/Utils -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/build-llvm/lib/Transforms/Utils -I /build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Transforms/Utils -I /build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/build-llvm/lib/Transforms/Utils -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-06-13-111025-38230-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Transforms/Utils/CodeExtractor.cpp

/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Transforms/Utils/CodeExtractor.cpp

1//===- CodeExtractor.cpp - Pull code region into a new function -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the interface to tear out a code region, such as an
10// individual loop or a parallel section, into a new function, replacing it with
11// a call to the new function.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/Transforms/Utils/CodeExtractor.h"
16#include "llvm/ADT/ArrayRef.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/Optional.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SetVector.h"
21#include "llvm/ADT/SmallPtrSet.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/Analysis/AssumptionCache.h"
24#include "llvm/Analysis/BlockFrequencyInfo.h"
25#include "llvm/Analysis/BlockFrequencyInfoImpl.h"
26#include "llvm/Analysis/BranchProbabilityInfo.h"
27#include "llvm/Analysis/LoopInfo.h"
28#include "llvm/IR/Argument.h"
29#include "llvm/IR/Attributes.h"
30#include "llvm/IR/BasicBlock.h"
31#include "llvm/IR/CFG.h"
32#include "llvm/IR/Constant.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DIBuilder.h"
35#include "llvm/IR/DataLayout.h"
36#include "llvm/IR/DebugInfoMetadata.h"
37#include "llvm/IR/DerivedTypes.h"
38#include "llvm/IR/Dominators.h"
39#include "llvm/IR/Function.h"
40#include "llvm/IR/GlobalValue.h"
41#include "llvm/IR/InstIterator.h"
42#include "llvm/IR/InstrTypes.h"
43#include "llvm/IR/Instruction.h"
44#include "llvm/IR/Instructions.h"
45#include "llvm/IR/IntrinsicInst.h"
46#include "llvm/IR/Intrinsics.h"
47#include "llvm/IR/LLVMContext.h"
48#include "llvm/IR/MDBuilder.h"
49#include "llvm/IR/Module.h"
50#include "llvm/IR/PatternMatch.h"
51#include "llvm/IR/Type.h"
52#include "llvm/IR/User.h"
53#include "llvm/IR/Value.h"
54#include "llvm/IR/Verifier.h"
55#include "llvm/Pass.h"
56#include "llvm/Support/BlockFrequency.h"
57#include "llvm/Support/BranchProbability.h"
58#include "llvm/Support/Casting.h"
59#include "llvm/Support/CommandLine.h"
60#include "llvm/Support/Debug.h"
61#include "llvm/Support/ErrorHandling.h"
62#include "llvm/Support/raw_ostream.h"
63#include "llvm/Transforms/Utils/BasicBlockUtils.h"
64#include "llvm/Transforms/Utils/Local.h"
65#include <cassert>
66#include <cstdint>
67#include <iterator>
68#include <map>
69#include <set>
70#include <utility>
71#include <vector>
72
73using namespace llvm;
74using namespace llvm::PatternMatch;
75using ProfileCount = Function::ProfileCount;
76
77#define DEBUG_TYPE"code-extractor" "code-extractor"
78
79// Provide a command-line option to aggregate function arguments into a struct
80// for functions produced by the code extractor. This is useful when converting
81// extracted functions to pthread-based code, as only one argument (void*) can
82// be passed in to pthread_create().
83static cl::opt<bool>
84AggregateArgsOpt("aggregate-extracted-args", cl::Hidden,
85 cl::desc("Aggregate arguments to code-extracted functions"));
86
87/// Test whether a block is valid for extraction.
88static bool isBlockValidForExtraction(const BasicBlock &BB,
89 const SetVector<BasicBlock *> &Result,
90 bool AllowVarArgs, bool AllowAlloca) {
91 // taking the address of a basic block moved to another function is illegal
92 if (BB.hasAddressTaken())
93 return false;
94
95 // don't hoist code that uses another basicblock address, as it's likely to
96 // lead to unexpected behavior, like cross-function jumps
97 SmallPtrSet<User const *, 16> Visited;
98 SmallVector<User const *, 16> ToVisit;
99
100 for (Instruction const &Inst : BB)
101 ToVisit.push_back(&Inst);
102
103 while (!ToVisit.empty()) {
104 User const *Curr = ToVisit.pop_back_val();
105 if (!Visited.insert(Curr).second)
106 continue;
107 if (isa<BlockAddress const>(Curr))
108 return false; // even a reference to self is likely to be not compatible
109
110 if (isa<Instruction>(Curr) && cast<Instruction>(Curr)->getParent() != &BB)
111 continue;
112
113 for (auto const &U : Curr->operands()) {
114 if (auto *UU = dyn_cast<User>(U))
115 ToVisit.push_back(UU);
116 }
117 }
118
119 // If explicitly requested, allow vastart and alloca. For invoke instructions
120 // verify that extraction is valid.
121 for (BasicBlock::const_iterator I = BB.begin(), E = BB.end(); I != E; ++I) {
122 if (isa<AllocaInst>(I)) {
123 if (!AllowAlloca)
124 return false;
125 continue;
126 }
127
128 if (const auto *II = dyn_cast<InvokeInst>(I)) {
129 // Unwind destination (either a landingpad, catchswitch, or cleanuppad)
130 // must be a part of the subgraph which is being extracted.
131 if (auto *UBB = II->getUnwindDest())
132 if (!Result.count(UBB))
133 return false;
134 continue;
135 }
136
137 // All catch handlers of a catchswitch instruction as well as the unwind
138 // destination must be in the subgraph.
139 if (const auto *CSI = dyn_cast<CatchSwitchInst>(I)) {
140 if (auto *UBB = CSI->getUnwindDest())
141 if (!Result.count(UBB))
142 return false;
143 for (auto *HBB : CSI->handlers())
144 if (!Result.count(const_cast<BasicBlock*>(HBB)))
145 return false;
146 continue;
147 }
148
149 // Make sure that entire catch handler is within subgraph. It is sufficient
150 // to check that catch return's block is in the list.
151 if (const auto *CPI = dyn_cast<CatchPadInst>(I)) {
152 for (const auto *U : CPI->users())
153 if (const auto *CRI = dyn_cast<CatchReturnInst>(U))
154 if (!Result.count(const_cast<BasicBlock*>(CRI->getParent())))
155 return false;
156 continue;
157 }
158
159 // And do similar checks for cleanup handler - the entire handler must be
160 // in subgraph which is going to be extracted. For cleanup return should
161 // additionally check that the unwind destination is also in the subgraph.
162 if (const auto *CPI = dyn_cast<CleanupPadInst>(I)) {
163 for (const auto *U : CPI->users())
164 if (const auto *CRI = dyn_cast<CleanupReturnInst>(U))
165 if (!Result.count(const_cast<BasicBlock*>(CRI->getParent())))
166 return false;
167 continue;
168 }
169 if (const auto *CRI = dyn_cast<CleanupReturnInst>(I)) {
170 if (auto *UBB = CRI->getUnwindDest())
171 if (!Result.count(UBB))
172 return false;
173 continue;
174 }
175
176 if (const CallInst *CI = dyn_cast<CallInst>(I)) {
177 if (const Function *F = CI->getCalledFunction()) {
178 auto IID = F->getIntrinsicID();
179 if (IID == Intrinsic::vastart) {
180 if (AllowVarArgs)
181 continue;
182 else
183 return false;
184 }
185
186 // Currently, we miscompile outlined copies of eh_typid_for. There are
187 // proposals for fixing this in llvm.org/PR39545.
188 if (IID == Intrinsic::eh_typeid_for)
189 return false;
190 }
191 }
192 }
193
194 return true;
195}
196
197/// Build a set of blocks to extract if the input blocks are viable.
198static SetVector<BasicBlock *>
199buildExtractionBlockSet(ArrayRef<BasicBlock *> BBs, DominatorTree *DT,
200 bool AllowVarArgs, bool AllowAlloca) {
201 assert(!BBs.empty() && "The set of blocks to extract must be non-empty")(static_cast <bool> (!BBs.empty() && "The set of blocks to extract must be non-empty"
) ? void (0) : __assert_fail ("!BBs.empty() && \"The set of blocks to extract must be non-empty\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Transforms/Utils/CodeExtractor.cpp"
, 201, __extension__ __PRETTY_FUNCTION__))
;
202 SetVector<BasicBlock *> Result;
203
204 // Loop over the blocks, adding them to our set-vector, and aborting with an
205 // empty set if we encounter invalid blocks.
206 for (BasicBlock *BB : BBs) {
207 // If this block is dead, don't process it.
208 if (DT && !DT->isReachableFromEntry(BB))
209 continue;
210
211 if (!Result.insert(BB))
212 llvm_unreachable("Repeated basic blocks in extraction input")::llvm::llvm_unreachable_internal("Repeated basic blocks in extraction input"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Transforms/Utils/CodeExtractor.cpp"
, 212)
;
213 }
214
215 LLVM_DEBUG(dbgs() << "Region front block: " << Result.front()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { dbgs() << "Region front block: " <<
Result.front()->getName() << '\n'; } } while (false
)
216 << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { dbgs() << "Region front block: " <<
Result.front()->getName() << '\n'; } } while (false
)
;
217
218 for (auto *BB : Result) {
219 if (!isBlockValidForExtraction(*BB, Result, AllowVarArgs, AllowAlloca))
220 return {};
221
222 // Make sure that the first block is not a landing pad.
223 if (BB == Result.front()) {
224 if (BB->isEHPad()) {
225 LLVM_DEBUG(dbgs() << "The first block cannot be an unwind block\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { dbgs() << "The first block cannot be an unwind block\n"
; } } while (false)
;
226 return {};
227 }
228 continue;
229 }
230
231 // All blocks other than the first must not have predecessors outside of
232 // the subgraph which is being extracted.
233 for (auto *PBB : predecessors(BB))
234 if (!Result.count(PBB)) {
235 LLVM_DEBUG(dbgs() << "No blocks in this region may have entries from "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { dbgs() << "No blocks in this region may have entries from "
"outside the region except for the first block!\n" << "Problematic source BB: "
<< BB->getName() << "\n" << "Problematic destination BB: "
<< PBB->getName() << "\n"; } } while (false)
236 "outside the region except for the first block!\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { dbgs() << "No blocks in this region may have entries from "
"outside the region except for the first block!\n" << "Problematic source BB: "
<< BB->getName() << "\n" << "Problematic destination BB: "
<< PBB->getName() << "\n"; } } while (false)
237 << "Problematic source BB: " << BB->getName() << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { dbgs() << "No blocks in this region may have entries from "
"outside the region except for the first block!\n" << "Problematic source BB: "
<< BB->getName() << "\n" << "Problematic destination BB: "
<< PBB->getName() << "\n"; } } while (false)
238 << "Problematic destination BB: " << PBB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { dbgs() << "No blocks in this region may have entries from "
"outside the region except for the first block!\n" << "Problematic source BB: "
<< BB->getName() << "\n" << "Problematic destination BB: "
<< PBB->getName() << "\n"; } } while (false)
239 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { dbgs() << "No blocks in this region may have entries from "
"outside the region except for the first block!\n" << "Problematic source BB: "
<< BB->getName() << "\n" << "Problematic destination BB: "
<< PBB->getName() << "\n"; } } while (false)
;
240 return {};
241 }
242 }
243
244 return Result;
245}
246
247CodeExtractor::CodeExtractor(ArrayRef<BasicBlock *> BBs, DominatorTree *DT,
248 bool AggregateArgs, BlockFrequencyInfo *BFI,
249 BranchProbabilityInfo *BPI, AssumptionCache *AC,
250 bool AllowVarArgs, bool AllowAlloca,
251 std::string Suffix)
252 : DT(DT), AggregateArgs(AggregateArgs || AggregateArgsOpt), BFI(BFI),
253 BPI(BPI), AC(AC), AllowVarArgs(AllowVarArgs),
254 Blocks(buildExtractionBlockSet(BBs, DT, AllowVarArgs, AllowAlloca)),
255 Suffix(Suffix) {}
256
257CodeExtractor::CodeExtractor(DominatorTree &DT, Loop &L, bool AggregateArgs,
258 BlockFrequencyInfo *BFI,
259 BranchProbabilityInfo *BPI, AssumptionCache *AC,
260 std::string Suffix)
261 : DT(&DT), AggregateArgs(AggregateArgs || AggregateArgsOpt), BFI(BFI),
262 BPI(BPI), AC(AC), AllowVarArgs(false),
263 Blocks(buildExtractionBlockSet(L.getBlocks(), &DT,
264 /* AllowVarArgs */ false,
265 /* AllowAlloca */ false)),
266 Suffix(Suffix) {}
267
268/// definedInRegion - Return true if the specified value is defined in the
269/// extracted region.
270static bool definedInRegion(const SetVector<BasicBlock *> &Blocks, Value *V) {
271 if (Instruction *I = dyn_cast<Instruction>(V))
272 if (Blocks.count(I->getParent()))
273 return true;
274 return false;
275}
276
277/// definedInCaller - Return true if the specified value is defined in the
278/// function being code extracted, but not in the region being extracted.
279/// These values must be passed in as live-ins to the function.
280static bool definedInCaller(const SetVector<BasicBlock *> &Blocks, Value *V) {
281 if (isa<Argument>(V)) return true;
282 if (Instruction *I = dyn_cast<Instruction>(V))
283 if (!Blocks.count(I->getParent()))
284 return true;
285 return false;
286}
287
288static BasicBlock *getCommonExitBlock(const SetVector<BasicBlock *> &Blocks) {
289 BasicBlock *CommonExitBlock = nullptr;
290 auto hasNonCommonExitSucc = [&](BasicBlock *Block) {
291 for (auto *Succ : successors(Block)) {
292 // Internal edges, ok.
293 if (Blocks.count(Succ))
294 continue;
295 if (!CommonExitBlock) {
296 CommonExitBlock = Succ;
297 continue;
298 }
299 if (CommonExitBlock != Succ)
300 return true;
301 }
302 return false;
303 };
304
305 if (any_of(Blocks, hasNonCommonExitSucc))
306 return nullptr;
307
308 return CommonExitBlock;
309}
310
311CodeExtractorAnalysisCache::CodeExtractorAnalysisCache(Function &F) {
312 for (BasicBlock &BB : F) {
313 for (Instruction &II : BB.instructionsWithoutDebug())
314 if (auto *AI = dyn_cast<AllocaInst>(&II))
315 Allocas.push_back(AI);
316
317 findSideEffectInfoForBlock(BB);
318 }
319}
320
321void CodeExtractorAnalysisCache::findSideEffectInfoForBlock(BasicBlock &BB) {
322 for (Instruction &II : BB.instructionsWithoutDebug()) {
323 unsigned Opcode = II.getOpcode();
324 Value *MemAddr = nullptr;
325 switch (Opcode) {
326 case Instruction::Store:
327 case Instruction::Load: {
328 if (Opcode == Instruction::Store) {
329 StoreInst *SI = cast<StoreInst>(&II);
330 MemAddr = SI->getPointerOperand();
331 } else {
332 LoadInst *LI = cast<LoadInst>(&II);
333 MemAddr = LI->getPointerOperand();
334 }
335 // Global variable can not be aliased with locals.
336 if (isa<Constant>(MemAddr))
337 break;
338 Value *Base = MemAddr->stripInBoundsConstantOffsets();
339 if (!isa<AllocaInst>(Base)) {
340 SideEffectingBlocks.insert(&BB);
341 return;
342 }
343 BaseMemAddrs[&BB].insert(Base);
344 break;
345 }
346 default: {
347 IntrinsicInst *IntrInst = dyn_cast<IntrinsicInst>(&II);
348 if (IntrInst) {
349 if (IntrInst->isLifetimeStartOrEnd())
350 break;
351 SideEffectingBlocks.insert(&BB);
352 return;
353 }
354 // Treat all the other cases conservatively if it has side effects.
355 if (II.mayHaveSideEffects()) {
356 SideEffectingBlocks.insert(&BB);
357 return;
358 }
359 }
360 }
361 }
362}
363
364bool CodeExtractorAnalysisCache::doesBlockContainClobberOfAddr(
365 BasicBlock &BB, AllocaInst *Addr) const {
366 if (SideEffectingBlocks.count(&BB))
367 return true;
368 auto It = BaseMemAddrs.find(&BB);
369 if (It != BaseMemAddrs.end())
370 return It->second.count(Addr);
371 return false;
372}
373
374bool CodeExtractor::isLegalToShrinkwrapLifetimeMarkers(
375 const CodeExtractorAnalysisCache &CEAC, Instruction *Addr) const {
376 AllocaInst *AI = cast<AllocaInst>(Addr->stripInBoundsConstantOffsets());
377 Function *Func = (*Blocks.begin())->getParent();
378 for (BasicBlock &BB : *Func) {
379 if (Blocks.count(&BB))
380 continue;
381 if (CEAC.doesBlockContainClobberOfAddr(BB, AI))
382 return false;
383 }
384 return true;
385}
386
387BasicBlock *
388CodeExtractor::findOrCreateBlockForHoisting(BasicBlock *CommonExitBlock) {
389 BasicBlock *SinglePredFromOutlineRegion = nullptr;
390 assert(!Blocks.count(CommonExitBlock) &&(static_cast <bool> (!Blocks.count(CommonExitBlock) &&
"Expect a block outside the region!") ? void (0) : __assert_fail
("!Blocks.count(CommonExitBlock) && \"Expect a block outside the region!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Transforms/Utils/CodeExtractor.cpp"
, 391, __extension__ __PRETTY_FUNCTION__))
391 "Expect a block outside the region!")(static_cast <bool> (!Blocks.count(CommonExitBlock) &&
"Expect a block outside the region!") ? void (0) : __assert_fail
("!Blocks.count(CommonExitBlock) && \"Expect a block outside the region!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Transforms/Utils/CodeExtractor.cpp"
, 391, __extension__ __PRETTY_FUNCTION__))
;
392 for (auto *Pred : predecessors(CommonExitBlock)) {
393 if (!Blocks.count(Pred))
394 continue;
395 if (!SinglePredFromOutlineRegion) {
396 SinglePredFromOutlineRegion = Pred;
397 } else if (SinglePredFromOutlineRegion != Pred) {
398 SinglePredFromOutlineRegion = nullptr;
399 break;
400 }
401 }
402
403 if (SinglePredFromOutlineRegion)
404 return SinglePredFromOutlineRegion;
405
406#ifndef NDEBUG
407 auto getFirstPHI = [](BasicBlock *BB) {
408 BasicBlock::iterator I = BB->begin();
409 PHINode *FirstPhi = nullptr;
410 while (I != BB->end()) {
411 PHINode *Phi = dyn_cast<PHINode>(I);
412 if (!Phi)
413 break;
414 if (!FirstPhi) {
415 FirstPhi = Phi;
416 break;
417 }
418 }
419 return FirstPhi;
420 };
421 // If there are any phi nodes, the single pred either exists or has already
422 // be created before code extraction.
423 assert(!getFirstPHI(CommonExitBlock) && "Phi not expected")(static_cast <bool> (!getFirstPHI(CommonExitBlock) &&
"Phi not expected") ? void (0) : __assert_fail ("!getFirstPHI(CommonExitBlock) && \"Phi not expected\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Transforms/Utils/CodeExtractor.cpp"
, 423, __extension__ __PRETTY_FUNCTION__))
;
424#endif
425
426 BasicBlock *NewExitBlock = CommonExitBlock->splitBasicBlock(
427 CommonExitBlock->getFirstNonPHI()->getIterator());
428
429 for (BasicBlock *Pred :
430 llvm::make_early_inc_range(predecessors(CommonExitBlock))) {
431 if (Blocks.count(Pred))
432 continue;
433 Pred->getTerminator()->replaceUsesOfWith(CommonExitBlock, NewExitBlock);
434 }
435 // Now add the old exit block to the outline region.
436 Blocks.insert(CommonExitBlock);
437 return CommonExitBlock;
438}
439
440// Find the pair of life time markers for address 'Addr' that are either
441// defined inside the outline region or can legally be shrinkwrapped into the
442// outline region. If there are not other untracked uses of the address, return
443// the pair of markers if found; otherwise return a pair of nullptr.
444CodeExtractor::LifetimeMarkerInfo
445CodeExtractor::getLifetimeMarkers(const CodeExtractorAnalysisCache &CEAC,
446 Instruction *Addr,
447 BasicBlock *ExitBlock) const {
448 LifetimeMarkerInfo Info;
449
450 for (User *U : Addr->users()) {
451 IntrinsicInst *IntrInst = dyn_cast<IntrinsicInst>(U);
452 if (IntrInst) {
453 // We don't model addresses with multiple start/end markers, but the
454 // markers do not need to be in the region.
455 if (IntrInst->getIntrinsicID() == Intrinsic::lifetime_start) {
456 if (Info.LifeStart)
457 return {};
458 Info.LifeStart = IntrInst;
459 continue;
460 }
461 if (IntrInst->getIntrinsicID() == Intrinsic::lifetime_end) {
462 if (Info.LifeEnd)
463 return {};
464 Info.LifeEnd = IntrInst;
465 continue;
466 }
467 // At this point, permit debug uses outside of the region.
468 // This is fixed in a later call to fixupDebugInfoPostExtraction().
469 if (isa<DbgInfoIntrinsic>(IntrInst))
470 continue;
471 }
472 // Find untracked uses of the address, bail.
473 if (!definedInRegion(Blocks, U))
474 return {};
475 }
476
477 if (!Info.LifeStart || !Info.LifeEnd)
478 return {};
479
480 Info.SinkLifeStart = !definedInRegion(Blocks, Info.LifeStart);
481 Info.HoistLifeEnd = !definedInRegion(Blocks, Info.LifeEnd);
482 // Do legality check.
483 if ((Info.SinkLifeStart || Info.HoistLifeEnd) &&
484 !isLegalToShrinkwrapLifetimeMarkers(CEAC, Addr))
485 return {};
486
487 // Check to see if we have a place to do hoisting, if not, bail.
488 if (Info.HoistLifeEnd && !ExitBlock)
489 return {};
490
491 return Info;
492}
493
494void CodeExtractor::findAllocas(const CodeExtractorAnalysisCache &CEAC,
495 ValueSet &SinkCands, ValueSet &HoistCands,
496 BasicBlock *&ExitBlock) const {
497 Function *Func = (*Blocks.begin())->getParent();
498 ExitBlock = getCommonExitBlock(Blocks);
499
500 auto moveOrIgnoreLifetimeMarkers =
501 [&](const LifetimeMarkerInfo &LMI) -> bool {
502 if (!LMI.LifeStart)
503 return false;
504 if (LMI.SinkLifeStart) {
505 LLVM_DEBUG(dbgs() << "Sinking lifetime.start: " << *LMI.LifeStartdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { dbgs() << "Sinking lifetime.start: "
<< *LMI.LifeStart << "\n"; } } while (false)
506 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { dbgs() << "Sinking lifetime.start: "
<< *LMI.LifeStart << "\n"; } } while (false)
;
507 SinkCands.insert(LMI.LifeStart);
508 }
509 if (LMI.HoistLifeEnd) {
510 LLVM_DEBUG(dbgs() << "Hoisting lifetime.end: " << *LMI.LifeEnd << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { dbgs() << "Hoisting lifetime.end: "
<< *LMI.LifeEnd << "\n"; } } while (false)
;
511 HoistCands.insert(LMI.LifeEnd);
512 }
513 return true;
514 };
515
516 // Look up allocas in the original function in CodeExtractorAnalysisCache, as
517 // this is much faster than walking all the instructions.
518 for (AllocaInst *AI : CEAC.getAllocas()) {
519 BasicBlock *BB = AI->getParent();
520 if (Blocks.count(BB))
521 continue;
522
523 // As a prior call to extractCodeRegion() may have shrinkwrapped the alloca,
524 // check whether it is actually still in the original function.
525 Function *AIFunc = BB->getParent();
526 if (AIFunc != Func)
527 continue;
528
529 LifetimeMarkerInfo MarkerInfo = getLifetimeMarkers(CEAC, AI, ExitBlock);
530 bool Moved = moveOrIgnoreLifetimeMarkers(MarkerInfo);
531 if (Moved) {
532 LLVM_DEBUG(dbgs() << "Sinking alloca: " << *AI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { dbgs() << "Sinking alloca: " <<
*AI << "\n"; } } while (false)
;
533 SinkCands.insert(AI);
534 continue;
535 }
536
537 // Find bitcasts in the outlined region that have lifetime marker users
538 // outside that region. Replace the lifetime marker use with an
539 // outside region bitcast to avoid unnecessary alloca/reload instructions
540 // and extra lifetime markers.
541 SmallVector<Instruction *, 2> LifetimeBitcastUsers;
542 for (User *U : AI->users()) {
543 if (!definedInRegion(Blocks, U))
544 continue;
545
546 if (U->stripInBoundsConstantOffsets() != AI)
547 continue;
548
549 Instruction *Bitcast = cast<Instruction>(U);
550 for (User *BU : Bitcast->users()) {
551 IntrinsicInst *IntrInst = dyn_cast<IntrinsicInst>(BU);
552 if (!IntrInst)
553 continue;
554
555 if (!IntrInst->isLifetimeStartOrEnd())
556 continue;
557
558 if (definedInRegion(Blocks, IntrInst))
559 continue;
560
561 LLVM_DEBUG(dbgs() << "Replace use of extracted region bitcast"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { dbgs() << "Replace use of extracted region bitcast"
<< *Bitcast << " in out-of-region lifetime marker "
<< *IntrInst << "\n"; } } while (false)
562 << *Bitcast << " in out-of-region lifetime marker "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { dbgs() << "Replace use of extracted region bitcast"
<< *Bitcast << " in out-of-region lifetime marker "
<< *IntrInst << "\n"; } } while (false)
563 << *IntrInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { dbgs() << "Replace use of extracted region bitcast"
<< *Bitcast << " in out-of-region lifetime marker "
<< *IntrInst << "\n"; } } while (false)
;
564 LifetimeBitcastUsers.push_back(IntrInst);
565 }
566 }
567
568 for (Instruction *I : LifetimeBitcastUsers) {
569 Module *M = AIFunc->getParent();
570 LLVMContext &Ctx = M->getContext();
571 auto *Int8PtrTy = Type::getInt8PtrTy(Ctx);
572 CastInst *CastI =
573 CastInst::CreatePointerCast(AI, Int8PtrTy, "lt.cast", I);
574 I->replaceUsesOfWith(I->getOperand(1), CastI);
575 }
576
577 // Follow any bitcasts.
578 SmallVector<Instruction *, 2> Bitcasts;
579 SmallVector<LifetimeMarkerInfo, 2> BitcastLifetimeInfo;
580 for (User *U : AI->users()) {
581 if (U->stripInBoundsConstantOffsets() == AI) {
582 Instruction *Bitcast = cast<Instruction>(U);
583 LifetimeMarkerInfo LMI = getLifetimeMarkers(CEAC, Bitcast, ExitBlock);
584 if (LMI.LifeStart) {
585 Bitcasts.push_back(Bitcast);
586 BitcastLifetimeInfo.push_back(LMI);
587 continue;
588 }
589 }
590
591 // Found unknown use of AI.
592 if (!definedInRegion(Blocks, U)) {
593 Bitcasts.clear();
594 break;
595 }
596 }
597
598 // Either no bitcasts reference the alloca or there are unknown uses.
599 if (Bitcasts.empty())
600 continue;
601
602 LLVM_DEBUG(dbgs() << "Sinking alloca (via bitcast): " << *AI << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { dbgs() << "Sinking alloca (via bitcast): "
<< *AI << "\n"; } } while (false)
;
603 SinkCands.insert(AI);
604 for (unsigned I = 0, E = Bitcasts.size(); I != E; ++I) {
605 Instruction *BitcastAddr = Bitcasts[I];
606 const LifetimeMarkerInfo &LMI = BitcastLifetimeInfo[I];
607 assert(LMI.LifeStart &&(static_cast <bool> (LMI.LifeStart && "Unsafe to sink bitcast without lifetime markers"
) ? void (0) : __assert_fail ("LMI.LifeStart && \"Unsafe to sink bitcast without lifetime markers\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Transforms/Utils/CodeExtractor.cpp"
, 608, __extension__ __PRETTY_FUNCTION__))
608 "Unsafe to sink bitcast without lifetime markers")(static_cast <bool> (LMI.LifeStart && "Unsafe to sink bitcast without lifetime markers"
) ? void (0) : __assert_fail ("LMI.LifeStart && \"Unsafe to sink bitcast without lifetime markers\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Transforms/Utils/CodeExtractor.cpp"
, 608, __extension__ __PRETTY_FUNCTION__))
;
609 moveOrIgnoreLifetimeMarkers(LMI);
610 if (!definedInRegion(Blocks, BitcastAddr)) {
611 LLVM_DEBUG(dbgs() << "Sinking bitcast-of-alloca: " << *BitcastAddrdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { dbgs() << "Sinking bitcast-of-alloca: "
<< *BitcastAddr << "\n"; } } while (false)
612 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { dbgs() << "Sinking bitcast-of-alloca: "
<< *BitcastAddr << "\n"; } } while (false)
;
613 SinkCands.insert(BitcastAddr);
614 }
615 }
616 }
617}
618
619bool CodeExtractor::isEligible() const {
620 if (Blocks.empty())
621 return false;
622 BasicBlock *Header = *Blocks.begin();
623 Function *F = Header->getParent();
624
625 // For functions with varargs, check that varargs handling is only done in the
626 // outlined function, i.e vastart and vaend are only used in outlined blocks.
627 if (AllowVarArgs && F->getFunctionType()->isVarArg()) {
628 auto containsVarArgIntrinsic = [](const Instruction &I) {
629 if (const CallInst *CI = dyn_cast<CallInst>(&I))
630 if (const Function *Callee = CI->getCalledFunction())
631 return Callee->getIntrinsicID() == Intrinsic::vastart ||
632 Callee->getIntrinsicID() == Intrinsic::vaend;
633 return false;
634 };
635
636 for (auto &BB : *F) {
637 if (Blocks.count(&BB))
638 continue;
639 if (llvm::any_of(BB, containsVarArgIntrinsic))
640 return false;
641 }
642 }
643 return true;
644}
645
646void CodeExtractor::findInputsOutputs(ValueSet &Inputs, ValueSet &Outputs,
647 const ValueSet &SinkCands) const {
648 for (BasicBlock *BB : Blocks) {
649 // If a used value is defined outside the region, it's an input. If an
650 // instruction is used outside the region, it's an output.
651 for (Instruction &II : *BB) {
652 for (auto &OI : II.operands()) {
653 Value *V = OI;
654 if (!SinkCands.count(V) && definedInCaller(Blocks, V))
655 Inputs.insert(V);
656 }
657
658 for (User *U : II.users())
659 if (!definedInRegion(Blocks, U)) {
660 Outputs.insert(&II);
661 break;
662 }
663 }
664 }
665}
666
667/// severSplitPHINodesOfEntry - If a PHI node has multiple inputs from outside
668/// of the region, we need to split the entry block of the region so that the
669/// PHI node is easier to deal with.
670void CodeExtractor::severSplitPHINodesOfEntry(BasicBlock *&Header) {
671 unsigned NumPredsFromRegion = 0;
672 unsigned NumPredsOutsideRegion = 0;
673
674 if (Header != &Header->getParent()->getEntryBlock()) {
675 PHINode *PN = dyn_cast<PHINode>(Header->begin());
676 if (!PN) return; // No PHI nodes.
677
678 // If the header node contains any PHI nodes, check to see if there is more
679 // than one entry from outside the region. If so, we need to sever the
680 // header block into two.
681 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
682 if (Blocks.count(PN->getIncomingBlock(i)))
683 ++NumPredsFromRegion;
684 else
685 ++NumPredsOutsideRegion;
686
687 // If there is one (or fewer) predecessor from outside the region, we don't
688 // need to do anything special.
689 if (NumPredsOutsideRegion <= 1) return;
690 }
691
692 // Otherwise, we need to split the header block into two pieces: one
693 // containing PHI nodes merging values from outside of the region, and a
694 // second that contains all of the code for the block and merges back any
695 // incoming values from inside of the region.
696 BasicBlock *NewBB = SplitBlock(Header, Header->getFirstNonPHI(), DT);
697
698 // We only want to code extract the second block now, and it becomes the new
699 // header of the region.
700 BasicBlock *OldPred = Header;
701 Blocks.remove(OldPred);
702 Blocks.insert(NewBB);
703 Header = NewBB;
704
705 // Okay, now we need to adjust the PHI nodes and any branches from within the
706 // region to go to the new header block instead of the old header block.
707 if (NumPredsFromRegion) {
708 PHINode *PN = cast<PHINode>(OldPred->begin());
709 // Loop over all of the predecessors of OldPred that are in the region,
710 // changing them to branch to NewBB instead.
711 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
712 if (Blocks.count(PN->getIncomingBlock(i))) {
713 Instruction *TI = PN->getIncomingBlock(i)->getTerminator();
714 TI->replaceUsesOfWith(OldPred, NewBB);
715 }
716
717 // Okay, everything within the region is now branching to the right block, we
718 // just have to update the PHI nodes now, inserting PHI nodes into NewBB.
719 BasicBlock::iterator AfterPHIs;
720 for (AfterPHIs = OldPred->begin(); isa<PHINode>(AfterPHIs); ++AfterPHIs) {
721 PHINode *PN = cast<PHINode>(AfterPHIs);
722 // Create a new PHI node in the new region, which has an incoming value
723 // from OldPred of PN.
724 PHINode *NewPN = PHINode::Create(PN->getType(), 1 + NumPredsFromRegion,
725 PN->getName() + ".ce", &NewBB->front());
726 PN->replaceAllUsesWith(NewPN);
727 NewPN->addIncoming(PN, OldPred);
728
729 // Loop over all of the incoming value in PN, moving them to NewPN if they
730 // are from the extracted region.
731 for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) {
732 if (Blocks.count(PN->getIncomingBlock(i))) {
733 NewPN->addIncoming(PN->getIncomingValue(i), PN->getIncomingBlock(i));
734 PN->removeIncomingValue(i);
735 --i;
736 }
737 }
738 }
739 }
740}
741
742/// severSplitPHINodesOfExits - if PHI nodes in exit blocks have inputs from
743/// outlined region, we split these PHIs on two: one with inputs from region
744/// and other with remaining incoming blocks; then first PHIs are placed in
745/// outlined region.
746void CodeExtractor::severSplitPHINodesOfExits(
747 const SmallPtrSetImpl<BasicBlock *> &Exits) {
748 for (BasicBlock *ExitBB : Exits) {
749 BasicBlock *NewBB = nullptr;
750
751 for (PHINode &PN : ExitBB->phis()) {
752 // Find all incoming values from the outlining region.
753 SmallVector<unsigned, 2> IncomingVals;
754 for (unsigned i = 0; i < PN.getNumIncomingValues(); ++i)
755 if (Blocks.count(PN.getIncomingBlock(i)))
756 IncomingVals.push_back(i);
757
758 // Do not process PHI if there is one (or fewer) predecessor from region.
759 // If PHI has exactly one predecessor from region, only this one incoming
760 // will be replaced on codeRepl block, so it should be safe to skip PHI.
761 if (IncomingVals.size() <= 1)
762 continue;
763
764 // Create block for new PHIs and add it to the list of outlined if it
765 // wasn't done before.
766 if (!NewBB) {
767 NewBB = BasicBlock::Create(ExitBB->getContext(),
768 ExitBB->getName() + ".split",
769 ExitBB->getParent(), ExitBB);
770 SmallVector<BasicBlock *, 4> Preds(predecessors(ExitBB));
771 for (BasicBlock *PredBB : Preds)
772 if (Blocks.count(PredBB))
773 PredBB->getTerminator()->replaceUsesOfWith(ExitBB, NewBB);
774 BranchInst::Create(ExitBB, NewBB);
775 Blocks.insert(NewBB);
776 }
777
778 // Split this PHI.
779 PHINode *NewPN =
780 PHINode::Create(PN.getType(), IncomingVals.size(),
781 PN.getName() + ".ce", NewBB->getFirstNonPHI());
782 for (unsigned i : IncomingVals)
783 NewPN->addIncoming(PN.getIncomingValue(i), PN.getIncomingBlock(i));
784 for (unsigned i : reverse(IncomingVals))
785 PN.removeIncomingValue(i, false);
786 PN.addIncoming(NewPN, NewBB);
787 }
788 }
789}
790
791void CodeExtractor::splitReturnBlocks() {
792 for (BasicBlock *Block : Blocks)
793 if (ReturnInst *RI = dyn_cast<ReturnInst>(Block->getTerminator())) {
794 BasicBlock *New =
795 Block->splitBasicBlock(RI->getIterator(), Block->getName() + ".ret");
796 if (DT) {
797 // Old dominates New. New node dominates all other nodes dominated
798 // by Old.
799 DomTreeNode *OldNode = DT->getNode(Block);
800 SmallVector<DomTreeNode *, 8> Children(OldNode->begin(),
801 OldNode->end());
802
803 DomTreeNode *NewNode = DT->addNewBlock(New, Block);
804
805 for (DomTreeNode *I : Children)
806 DT->changeImmediateDominator(I, NewNode);
807 }
808 }
809}
810
811/// constructFunction - make a function based on inputs and outputs, as follows:
812/// f(in0, ..., inN, out0, ..., outN)
813Function *CodeExtractor::constructFunction(const ValueSet &inputs,
814 const ValueSet &outputs,
815 BasicBlock *header,
816 BasicBlock *newRootNode,
817 BasicBlock *newHeader,
818 Function *oldFunction,
819 Module *M) {
820 LLVM_DEBUG(dbgs() << "inputs: " << inputs.size() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { dbgs() << "inputs: " << inputs
.size() << "\n"; } } while (false)
;
821 LLVM_DEBUG(dbgs() << "outputs: " << outputs.size() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { dbgs() << "outputs: " << outputs
.size() << "\n"; } } while (false)
;
822
823 // This function returns unsigned, outputs will go back by reference.
824 switch (NumExitBlocks) {
825 case 0:
826 case 1: RetTy = Type::getVoidTy(header->getContext()); break;
827 case 2: RetTy = Type::getInt1Ty(header->getContext()); break;
828 default: RetTy = Type::getInt16Ty(header->getContext()); break;
829 }
830
831 std::vector<Type *> paramTy;
832
833 // Add the types of the input values to the function's argument list
834 for (Value *value : inputs) {
835 LLVM_DEBUG(dbgs() << "value used in func: " << *value << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { dbgs() << "value used in func: " <<
*value << "\n"; } } while (false)
;
836 paramTy.push_back(value->getType());
837 }
838
839 // Add the types of the output values to the function's argument list.
840 for (Value *output : outputs) {
841 LLVM_DEBUG(dbgs() << "instr used in func: " << *output << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { dbgs() << "instr used in func: " <<
*output << "\n"; } } while (false)
;
842 if (AggregateArgs)
843 paramTy.push_back(output->getType());
844 else
845 paramTy.push_back(PointerType::getUnqual(output->getType()));
846 }
847
848 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { { dbgs() << "Function type: " <<
*RetTy << " f("; for (Type *i : paramTy) dbgs() <<
*i << ", "; dbgs() << ")\n"; }; } } while (false
)
849 dbgs() << "Function type: " << *RetTy << " f(";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { { dbgs() << "Function type: " <<
*RetTy << " f("; for (Type *i : paramTy) dbgs() <<
*i << ", "; dbgs() << ")\n"; }; } } while (false
)
850 for (Type *i : paramTy)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { { dbgs() << "Function type: " <<
*RetTy << " f("; for (Type *i : paramTy) dbgs() <<
*i << ", "; dbgs() << ")\n"; }; } } while (false
)
851 dbgs() << *i << ", ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { { dbgs() << "Function type: " <<
*RetTy << " f("; for (Type *i : paramTy) dbgs() <<
*i << ", "; dbgs() << ")\n"; }; } } while (false
)
852 dbgs() << ")\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { { dbgs() << "Function type: " <<
*RetTy << " f("; for (Type *i : paramTy) dbgs() <<
*i << ", "; dbgs() << ")\n"; }; } } while (false
)
853 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { { dbgs() << "Function type: " <<
*RetTy << " f("; for (Type *i : paramTy) dbgs() <<
*i << ", "; dbgs() << ")\n"; }; } } while (false
)
;
854
855 StructType *StructTy = nullptr;
856 if (AggregateArgs && (inputs.size() + outputs.size() > 0)) {
857 StructTy = StructType::get(M->getContext(), paramTy);
858 paramTy.clear();
859 paramTy.push_back(PointerType::getUnqual(StructTy));
860 }
861 FunctionType *funcType =
862 FunctionType::get(RetTy, paramTy,
863 AllowVarArgs && oldFunction->isVarArg());
864
865 std::string SuffixToUse =
866 Suffix.empty()
867 ? (header->getName().empty() ? "extracted" : header->getName().str())
868 : Suffix;
869 // Create the new function
870 Function *newFunction = Function::Create(
871 funcType, GlobalValue::InternalLinkage, oldFunction->getAddressSpace(),
872 oldFunction->getName() + "." + SuffixToUse, M);
873 // If the old function is no-throw, so is the new one.
874 if (oldFunction->doesNotThrow())
875 newFunction->setDoesNotThrow();
876
877 // Inherit the uwtable attribute if we need to.
878 if (oldFunction->hasUWTable())
879 newFunction->setHasUWTable();
880
881 // Inherit all of the target dependent attributes and white-listed
882 // target independent attributes.
883 // (e.g. If the extracted region contains a call to an x86.sse
884 // instruction we need to make sure that the extracted region has the
885 // "target-features" attribute allowing it to be lowered.
886 // FIXME: This should be changed to check to see if a specific
887 // attribute can not be inherited.
888 for (const auto &Attr : oldFunction->getAttributes().getFnAttributes()) {
889 if (Attr.isStringAttribute()) {
890 if (Attr.getKindAsString() == "thunk")
891 continue;
892 } else
893 switch (Attr.getKindAsEnum()) {
894 // Those attributes cannot be propagated safely. Explicitly list them
895 // here so we get a warning if new attributes are added. This list also
896 // includes non-function attributes.
897 case Attribute::Alignment:
898 case Attribute::AllocSize:
899 case Attribute::ArgMemOnly:
900 case Attribute::Builtin:
901 case Attribute::ByVal:
902 case Attribute::Convergent:
903 case Attribute::Dereferenceable:
904 case Attribute::DereferenceableOrNull:
905 case Attribute::InAlloca:
906 case Attribute::InReg:
907 case Attribute::InaccessibleMemOnly:
908 case Attribute::InaccessibleMemOrArgMemOnly:
909 case Attribute::JumpTable:
910 case Attribute::Naked:
911 case Attribute::Nest:
912 case Attribute::NoAlias:
913 case Attribute::NoBuiltin:
914 case Attribute::NoCapture:
915 case Attribute::NoMerge:
916 case Attribute::NoReturn:
917 case Attribute::NoSync:
918 case Attribute::NoUndef:
919 case Attribute::None:
920 case Attribute::NonNull:
921 case Attribute::Preallocated:
922 case Attribute::ReadNone:
923 case Attribute::ReadOnly:
924 case Attribute::Returned:
925 case Attribute::ReturnsTwice:
926 case Attribute::SExt:
927 case Attribute::Speculatable:
928 case Attribute::StackAlignment:
929 case Attribute::StructRet:
930 case Attribute::SwiftError:
931 case Attribute::SwiftSelf:
932 case Attribute::SwiftAsync:
933 case Attribute::WillReturn:
934 case Attribute::WriteOnly:
935 case Attribute::ZExt:
936 case Attribute::ImmArg:
937 case Attribute::ByRef:
938 case Attribute::EndAttrKinds:
939 case Attribute::EmptyKey:
940 case Attribute::TombstoneKey:
941 continue;
942 // Those attributes should be safe to propagate to the extracted function.
943 case Attribute::AlwaysInline:
944 case Attribute::Cold:
945 case Attribute::Hot:
946 case Attribute::NoRecurse:
947 case Attribute::InlineHint:
948 case Attribute::MinSize:
949 case Attribute::NoCallback:
950 case Attribute::NoDuplicate:
951 case Attribute::NoFree:
952 case Attribute::NoImplicitFloat:
953 case Attribute::NoInline:
954 case Attribute::NonLazyBind:
955 case Attribute::NoRedZone:
956 case Attribute::NoUnwind:
957 case Attribute::NoSanitizeCoverage:
958 case Attribute::NullPointerIsValid:
959 case Attribute::OptForFuzzing:
960 case Attribute::OptimizeNone:
961 case Attribute::OptimizeForSize:
962 case Attribute::SafeStack:
963 case Attribute::ShadowCallStack:
964 case Attribute::SanitizeAddress:
965 case Attribute::SanitizeMemory:
966 case Attribute::SanitizeThread:
967 case Attribute::SanitizeHWAddress:
968 case Attribute::SanitizeMemTag:
969 case Attribute::SpeculativeLoadHardening:
970 case Attribute::StackProtect:
971 case Attribute::StackProtectReq:
972 case Attribute::StackProtectStrong:
973 case Attribute::StrictFP:
974 case Attribute::UWTable:
975 case Attribute::VScaleRange:
976 case Attribute::NoCfCheck:
977 case Attribute::MustProgress:
978 case Attribute::NoProfile:
979 break;
980 }
981
982 newFunction->addFnAttr(Attr);
983 }
984 newFunction->getBasicBlockList().push_back(newRootNode);
985
986 // Create an iterator to name all of the arguments we inserted.
987 Function::arg_iterator AI = newFunction->arg_begin();
988
989 // Rewrite all users of the inputs in the extracted region to use the
990 // arguments (or appropriate addressing into struct) instead.
991 for (unsigned i = 0, e = inputs.size(); i != e; ++i) {
992 Value *RewriteVal;
993 if (AggregateArgs) {
994 Value *Idx[2];
995 Idx[0] = Constant::getNullValue(Type::getInt32Ty(header->getContext()));
996 Idx[1] = ConstantInt::get(Type::getInt32Ty(header->getContext()), i);
997 Instruction *TI = newFunction->begin()->getTerminator();
998 GetElementPtrInst *GEP = GetElementPtrInst::Create(
999 StructTy, &*AI, Idx, "gep_" + inputs[i]->getName(), TI);
1000 RewriteVal = new LoadInst(StructTy->getElementType(i), GEP,
1001 "loadgep_" + inputs[i]->getName(), TI);
1002 } else
1003 RewriteVal = &*AI++;
1004
1005 std::vector<User *> Users(inputs[i]->user_begin(), inputs[i]->user_end());
1006 for (User *use : Users)
1007 if (Instruction *inst = dyn_cast<Instruction>(use))
1008 if (Blocks.count(inst->getParent()))
1009 inst->replaceUsesOfWith(inputs[i], RewriteVal);
1010 }
1011
1012 // Set names for input and output arguments.
1013 if (!AggregateArgs) {
1014 AI = newFunction->arg_begin();
1015 for (unsigned i = 0, e = inputs.size(); i != e; ++i, ++AI)
1016 AI->setName(inputs[i]->getName());
1017 for (unsigned i = 0, e = outputs.size(); i != e; ++i, ++AI)
1018 AI->setName(outputs[i]->getName()+".out");
1019 }
1020
1021 // Rewrite branches to basic blocks outside of the loop to new dummy blocks
1022 // within the new function. This must be done before we lose track of which
1023 // blocks were originally in the code region.
1024 std::vector<User *> Users(header->user_begin(), header->user_end());
1025 for (auto &U : Users)
1026 // The BasicBlock which contains the branch is not in the region
1027 // modify the branch target to a new block
1028 if (Instruction *I = dyn_cast<Instruction>(U))
1029 if (I->isTerminator() && I->getFunction() == oldFunction &&
1030 !Blocks.count(I->getParent()))
1031 I->replaceUsesOfWith(header, newHeader);
1032
1033 return newFunction;
1034}
1035
1036/// Erase lifetime.start markers which reference inputs to the extraction
1037/// region, and insert the referenced memory into \p LifetimesStart.
1038///
1039/// The extraction region is defined by a set of blocks (\p Blocks), and a set
1040/// of allocas which will be moved from the caller function into the extracted
1041/// function (\p SunkAllocas).
1042static void eraseLifetimeMarkersOnInputs(const SetVector<BasicBlock *> &Blocks,
1043 const SetVector<Value *> &SunkAllocas,
1044 SetVector<Value *> &LifetimesStart) {
1045 for (BasicBlock *BB : Blocks) {
1046 for (auto It = BB->begin(), End = BB->end(); It != End;) {
1047 auto *II = dyn_cast<IntrinsicInst>(&*It);
1048 ++It;
1049 if (!II || !II->isLifetimeStartOrEnd())
1050 continue;
1051
1052 // Get the memory operand of the lifetime marker. If the underlying
1053 // object is a sunk alloca, or is otherwise defined in the extraction
1054 // region, the lifetime marker must not be erased.
1055 Value *Mem = II->getOperand(1)->stripInBoundsOffsets();
1056 if (SunkAllocas.count(Mem) || definedInRegion(Blocks, Mem))
1057 continue;
1058
1059 if (II->getIntrinsicID() == Intrinsic::lifetime_start)
1060 LifetimesStart.insert(Mem);
1061 II->eraseFromParent();
1062 }
1063 }
1064}
1065
1066/// Insert lifetime start/end markers surrounding the call to the new function
1067/// for objects defined in the caller.
1068static void insertLifetimeMarkersSurroundingCall(
1069 Module *M, ArrayRef<Value *> LifetimesStart, ArrayRef<Value *> LifetimesEnd,
1070 CallInst *TheCall) {
1071 LLVMContext &Ctx = M->getContext();
1072 auto Int8PtrTy = Type::getInt8PtrTy(Ctx);
1073 auto NegativeOne = ConstantInt::getSigned(Type::getInt64Ty(Ctx), -1);
1074 Instruction *Term = TheCall->getParent()->getTerminator();
1075
1076 // The memory argument to a lifetime marker must be a i8*. Cache any bitcasts
1077 // needed to satisfy this requirement so they may be reused.
1078 DenseMap<Value *, Value *> Bitcasts;
1079
1080 // Emit lifetime markers for the pointers given in \p Objects. Insert the
1081 // markers before the call if \p InsertBefore, and after the call otherwise.
1082 auto insertMarkers = [&](Function *MarkerFunc, ArrayRef<Value *> Objects,
1083 bool InsertBefore) {
1084 for (Value *Mem : Objects) {
1085 assert((!isa<Instruction>(Mem) || cast<Instruction>(Mem)->getFunction() ==(static_cast <bool> ((!isa<Instruction>(Mem) || cast
<Instruction>(Mem)->getFunction() == TheCall->getFunction
()) && "Input memory not defined in original function"
) ? void (0) : __assert_fail ("(!isa<Instruction>(Mem) || cast<Instruction>(Mem)->getFunction() == TheCall->getFunction()) && \"Input memory not defined in original function\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Transforms/Utils/CodeExtractor.cpp"
, 1087, __extension__ __PRETTY_FUNCTION__))
1086 TheCall->getFunction()) &&(static_cast <bool> ((!isa<Instruction>(Mem) || cast
<Instruction>(Mem)->getFunction() == TheCall->getFunction
()) && "Input memory not defined in original function"
) ? void (0) : __assert_fail ("(!isa<Instruction>(Mem) || cast<Instruction>(Mem)->getFunction() == TheCall->getFunction()) && \"Input memory not defined in original function\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Transforms/Utils/CodeExtractor.cpp"
, 1087, __extension__ __PRETTY_FUNCTION__))
1087 "Input memory not defined in original function")(static_cast <bool> ((!isa<Instruction>(Mem) || cast
<Instruction>(Mem)->getFunction() == TheCall->getFunction
()) && "Input memory not defined in original function"
) ? void (0) : __assert_fail ("(!isa<Instruction>(Mem) || cast<Instruction>(Mem)->getFunction() == TheCall->getFunction()) && \"Input memory not defined in original function\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Transforms/Utils/CodeExtractor.cpp"
, 1087, __extension__ __PRETTY_FUNCTION__))
;
1088 Value *&MemAsI8Ptr = Bitcasts[Mem];
1089 if (!MemAsI8Ptr) {
1090 if (Mem->getType() == Int8PtrTy)
1091 MemAsI8Ptr = Mem;
1092 else
1093 MemAsI8Ptr =
1094 CastInst::CreatePointerCast(Mem, Int8PtrTy, "lt.cast", TheCall);
1095 }
1096
1097 auto Marker = CallInst::Create(MarkerFunc, {NegativeOne, MemAsI8Ptr});
1098 if (InsertBefore)
1099 Marker->insertBefore(TheCall);
1100 else
1101 Marker->insertBefore(Term);
1102 }
1103 };
1104
1105 if (!LifetimesStart.empty()) {
1106 auto StartFn = llvm::Intrinsic::getDeclaration(
1107 M, llvm::Intrinsic::lifetime_start, Int8PtrTy);
1108 insertMarkers(StartFn, LifetimesStart, /*InsertBefore=*/true);
1109 }
1110
1111 if (!LifetimesEnd.empty()) {
1112 auto EndFn = llvm::Intrinsic::getDeclaration(
1113 M, llvm::Intrinsic::lifetime_end, Int8PtrTy);
1114 insertMarkers(EndFn, LifetimesEnd, /*InsertBefore=*/false);
1115 }
1116}
1117
1118/// emitCallAndSwitchStatement - This method sets up the caller side by adding
1119/// the call instruction, splitting any PHI nodes in the header block as
1120/// necessary.
1121CallInst *CodeExtractor::emitCallAndSwitchStatement(Function *newFunction,
1122 BasicBlock *codeReplacer,
1123 ValueSet &inputs,
1124 ValueSet &outputs) {
1125 // Emit a call to the new function, passing in: *pointer to struct (if
1126 // aggregating parameters), or plan inputs and allocated memory for outputs
1127 std::vector<Value *> params, StructValues, ReloadOutputs, Reloads;
1128
1129 Module *M = newFunction->getParent();
1130 LLVMContext &Context = M->getContext();
1131 const DataLayout &DL = M->getDataLayout();
1132 CallInst *call = nullptr;
1133
1134 // Add inputs as params, or to be filled into the struct
1135 unsigned ArgNo = 0;
1136 SmallVector<unsigned, 1> SwiftErrorArgs;
1137 for (Value *input : inputs) {
1138 if (AggregateArgs)
1139 StructValues.push_back(input);
1140 else {
1141 params.push_back(input);
1142 if (input->isSwiftError())
1143 SwiftErrorArgs.push_back(ArgNo);
1144 }
1145 ++ArgNo;
1146 }
1147
1148 // Create allocas for the outputs
1149 for (Value *output : outputs) {
1150 if (AggregateArgs) {
1151 StructValues.push_back(output);
1152 } else {
1153 AllocaInst *alloca =
1154 new AllocaInst(output->getType(), DL.getAllocaAddrSpace(),
1155 nullptr, output->getName() + ".loc",
1156 &codeReplacer->getParent()->front().front());
1157 ReloadOutputs.push_back(alloca);
1158 params.push_back(alloca);
1159 }
1160 }
1161
1162 StructType *StructArgTy = nullptr;
1163 AllocaInst *Struct = nullptr;
1
'Struct' initialized to a null pointer value
1164 if (AggregateArgs && (inputs.size() + outputs.size() > 0)) {
2
Assuming field 'AggregateArgs' is true
3
Assuming the condition is false
4
Taking false branch
1165 std::vector<Type *> ArgTypes;
1166 for (Value *V : StructValues)
1167 ArgTypes.push_back(V->getType());
1168
1169 // Allocate a struct at the beginning of this function
1170 StructArgTy = StructType::get(newFunction->getContext(), ArgTypes);
1171 Struct = new AllocaInst(StructArgTy, DL.getAllocaAddrSpace(), nullptr,
1172 "structArg",
1173 &codeReplacer->getParent()->front().front());
1174 params.push_back(Struct);
1175
1176 for (unsigned i = 0, e = inputs.size(); i != e; ++i) {
1177 Value *Idx[2];
1178 Idx[0] = Constant::getNullValue(Type::getInt32Ty(Context));
1179 Idx[1] = ConstantInt::get(Type::getInt32Ty(Context), i);
1180 GetElementPtrInst *GEP = GetElementPtrInst::Create(
1181 StructArgTy, Struct, Idx, "gep_" + StructValues[i]->getName());
1182 codeReplacer->getInstList().push_back(GEP);
1183 new StoreInst(StructValues[i], GEP, codeReplacer);
1184 }
1185 }
1186
1187 // Emit the call to the function
1188 call = CallInst::Create(newFunction, params,
1189 NumExitBlocks > 1 ? "targetBlock" : "");
5
Assuming field 'NumExitBlocks' is <= 1
6
'?' condition is false
1190 // Add debug location to the new call, if the original function has debug
1191 // info. In that case, the terminator of the entry block of the extracted
1192 // function contains the first debug location of the extracted function,
1193 // set in extractCodeRegion.
1194 if (codeReplacer->getParent()->getSubprogram()) {
7
Assuming the condition is false
8
Taking false branch
1195 if (auto DL = newFunction->getEntryBlock().getTerminator()->getDebugLoc())
1196 call->setDebugLoc(DL);
1197 }
1198 codeReplacer->getInstList().push_back(call);
1199
1200 // Set swifterror parameter attributes.
1201 for (unsigned SwiftErrArgNo : SwiftErrorArgs) {
9
Assuming '__begin1' is equal to '__end1'
1202 call->addParamAttr(SwiftErrArgNo, Attribute::SwiftError);
1203 newFunction->addParamAttr(SwiftErrArgNo, Attribute::SwiftError);
1204 }
1205
1206 Function::arg_iterator OutputArgBegin = newFunction->arg_begin();
1207 unsigned FirstOut = inputs.size();
1208 if (!AggregateArgs
9.1
Field 'AggregateArgs' is true
9.1
Field 'AggregateArgs' is true
)
10
Taking false branch
1209 std::advance(OutputArgBegin, inputs.size());
1210
1211 // Reload the outputs passed in by reference.
1212 for (unsigned i = 0, e = outputs.size(); i != e; ++i) {
11
Assuming 'i' is not equal to 'e'
12
Loop condition is true. Entering loop body
1213 Value *Output = nullptr;
1214 if (AggregateArgs
12.1
Field 'AggregateArgs' is true
12.1
Field 'AggregateArgs' is true
) {
13
Taking true branch
1215 Value *Idx[2];
1216 Idx[0] = Constant::getNullValue(Type::getInt32Ty(Context));
1217 Idx[1] = ConstantInt::get(Type::getInt32Ty(Context), FirstOut + i);
1218 GetElementPtrInst *GEP = GetElementPtrInst::Create(
15
Calling 'GetElementPtrInst::Create'
1219 StructArgTy, Struct, Idx, "gep_reload_" + outputs[i]->getName());
14
Passing null pointer value via 2nd parameter 'Ptr'
1220 codeReplacer->getInstList().push_back(GEP);
1221 Output = GEP;
1222 } else {
1223 Output = ReloadOutputs[i];
1224 }
1225 LoadInst *load = new LoadInst(outputs[i]->getType(), Output,
1226 outputs[i]->getName() + ".reload",
1227 codeReplacer);
1228 Reloads.push_back(load);
1229 std::vector<User *> Users(outputs[i]->user_begin(), outputs[i]->user_end());
1230 for (unsigned u = 0, e = Users.size(); u != e; ++u) {
1231 Instruction *inst = cast<Instruction>(Users[u]);
1232 if (!Blocks.count(inst->getParent()))
1233 inst->replaceUsesOfWith(outputs[i], load);
1234 }
1235 }
1236
1237 // Now we can emit a switch statement using the call as a value.
1238 SwitchInst *TheSwitch =
1239 SwitchInst::Create(Constant::getNullValue(Type::getInt16Ty(Context)),
1240 codeReplacer, 0, codeReplacer);
1241
1242 // Since there may be multiple exits from the original region, make the new
1243 // function return an unsigned, switch on that number. This loop iterates
1244 // over all of the blocks in the extracted region, updating any terminator
1245 // instructions in the to-be-extracted region that branch to blocks that are
1246 // not in the region to be extracted.
1247 std::map<BasicBlock *, BasicBlock *> ExitBlockMap;
1248
1249 unsigned switchVal = 0;
1250 for (BasicBlock *Block : Blocks) {
1251 Instruction *TI = Block->getTerminator();
1252 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
1253 if (!Blocks.count(TI->getSuccessor(i))) {
1254 BasicBlock *OldTarget = TI->getSuccessor(i);
1255 // add a new basic block which returns the appropriate value
1256 BasicBlock *&NewTarget = ExitBlockMap[OldTarget];
1257 if (!NewTarget) {
1258 // If we don't already have an exit stub for this non-extracted
1259 // destination, create one now!
1260 NewTarget = BasicBlock::Create(Context,
1261 OldTarget->getName() + ".exitStub",
1262 newFunction);
1263 unsigned SuccNum = switchVal++;
1264
1265 Value *brVal = nullptr;
1266 switch (NumExitBlocks) {
1267 case 0:
1268 case 1: break; // No value needed.
1269 case 2: // Conditional branch, return a bool
1270 brVal = ConstantInt::get(Type::getInt1Ty(Context), !SuccNum);
1271 break;
1272 default:
1273 brVal = ConstantInt::get(Type::getInt16Ty(Context), SuccNum);
1274 break;
1275 }
1276
1277 ReturnInst::Create(Context, brVal, NewTarget);
1278
1279 // Update the switch instruction.
1280 TheSwitch->addCase(ConstantInt::get(Type::getInt16Ty(Context),
1281 SuccNum),
1282 OldTarget);
1283 }
1284
1285 // rewrite the original branch instruction with this new target
1286 TI->setSuccessor(i, NewTarget);
1287 }
1288 }
1289
1290 // Store the arguments right after the definition of output value.
1291 // This should be proceeded after creating exit stubs to be ensure that invoke
1292 // result restore will be placed in the outlined function.
1293 Function::arg_iterator OAI = OutputArgBegin;
1294 for (unsigned i = 0, e = outputs.size(); i != e; ++i) {
1295 auto *OutI = dyn_cast<Instruction>(outputs[i]);
1296 if (!OutI)
1297 continue;
1298
1299 // Find proper insertion point.
1300 BasicBlock::iterator InsertPt;
1301 // In case OutI is an invoke, we insert the store at the beginning in the
1302 // 'normal destination' BB. Otherwise we insert the store right after OutI.
1303 if (auto *InvokeI = dyn_cast<InvokeInst>(OutI))
1304 InsertPt = InvokeI->getNormalDest()->getFirstInsertionPt();
1305 else if (auto *Phi = dyn_cast<PHINode>(OutI))
1306 InsertPt = Phi->getParent()->getFirstInsertionPt();
1307 else
1308 InsertPt = std::next(OutI->getIterator());
1309
1310 Instruction *InsertBefore = &*InsertPt;
1311 assert((InsertBefore->getFunction() == newFunction ||(static_cast <bool> ((InsertBefore->getFunction() ==
newFunction || Blocks.count(InsertBefore->getParent())) &&
"InsertPt should be in new function") ? void (0) : __assert_fail
("(InsertBefore->getFunction() == newFunction || Blocks.count(InsertBefore->getParent())) && \"InsertPt should be in new function\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Transforms/Utils/CodeExtractor.cpp"
, 1313, __extension__ __PRETTY_FUNCTION__))
1312 Blocks.count(InsertBefore->getParent())) &&(static_cast <bool> ((InsertBefore->getFunction() ==
newFunction || Blocks.count(InsertBefore->getParent())) &&
"InsertPt should be in new function") ? void (0) : __assert_fail
("(InsertBefore->getFunction() == newFunction || Blocks.count(InsertBefore->getParent())) && \"InsertPt should be in new function\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Transforms/Utils/CodeExtractor.cpp"
, 1313, __extension__ __PRETTY_FUNCTION__))
1313 "InsertPt should be in new function")(static_cast <bool> ((InsertBefore->getFunction() ==
newFunction || Blocks.count(InsertBefore->getParent())) &&
"InsertPt should be in new function") ? void (0) : __assert_fail
("(InsertBefore->getFunction() == newFunction || Blocks.count(InsertBefore->getParent())) && \"InsertPt should be in new function\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Transforms/Utils/CodeExtractor.cpp"
, 1313, __extension__ __PRETTY_FUNCTION__))
;
1314 assert(OAI != newFunction->arg_end() &&(static_cast <bool> (OAI != newFunction->arg_end() &&
"Number of output arguments should match " "the amount of defined values"
) ? void (0) : __assert_fail ("OAI != newFunction->arg_end() && \"Number of output arguments should match \" \"the amount of defined values\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Transforms/Utils/CodeExtractor.cpp"
, 1316, __extension__ __PRETTY_FUNCTION__))
1315 "Number of output arguments should match "(static_cast <bool> (OAI != newFunction->arg_end() &&
"Number of output arguments should match " "the amount of defined values"
) ? void (0) : __assert_fail ("OAI != newFunction->arg_end() && \"Number of output arguments should match \" \"the amount of defined values\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Transforms/Utils/CodeExtractor.cpp"
, 1316, __extension__ __PRETTY_FUNCTION__))
1316 "the amount of defined values")(static_cast <bool> (OAI != newFunction->arg_end() &&
"Number of output arguments should match " "the amount of defined values"
) ? void (0) : __assert_fail ("OAI != newFunction->arg_end() && \"Number of output arguments should match \" \"the amount of defined values\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Transforms/Utils/CodeExtractor.cpp"
, 1316, __extension__ __PRETTY_FUNCTION__))
;
1317 if (AggregateArgs) {
1318 Value *Idx[2];
1319 Idx[0] = Constant::getNullValue(Type::getInt32Ty(Context));
1320 Idx[1] = ConstantInt::get(Type::getInt32Ty(Context), FirstOut + i);
1321 GetElementPtrInst *GEP = GetElementPtrInst::Create(
1322 StructArgTy, &*OAI, Idx, "gep_" + outputs[i]->getName(),
1323 InsertBefore);
1324 new StoreInst(outputs[i], GEP, InsertBefore);
1325 // Since there should be only one struct argument aggregating
1326 // all the output values, we shouldn't increment OAI, which always
1327 // points to the struct argument, in this case.
1328 } else {
1329 new StoreInst(outputs[i], &*OAI, InsertBefore);
1330 ++OAI;
1331 }
1332 }
1333
1334 // Now that we've done the deed, simplify the switch instruction.
1335 Type *OldFnRetTy = TheSwitch->getParent()->getParent()->getReturnType();
1336 switch (NumExitBlocks) {
1337 case 0:
1338 // There are no successors (the block containing the switch itself), which
1339 // means that previously this was the last part of the function, and hence
1340 // this should be rewritten as a `ret'
1341
1342 // Check if the function should return a value
1343 if (OldFnRetTy->isVoidTy()) {
1344 ReturnInst::Create(Context, nullptr, TheSwitch); // Return void
1345 } else if (OldFnRetTy == TheSwitch->getCondition()->getType()) {
1346 // return what we have
1347 ReturnInst::Create(Context, TheSwitch->getCondition(), TheSwitch);
1348 } else {
1349 // Otherwise we must have code extracted an unwind or something, just
1350 // return whatever we want.
1351 ReturnInst::Create(Context,
1352 Constant::getNullValue(OldFnRetTy), TheSwitch);
1353 }
1354
1355 TheSwitch->eraseFromParent();
1356 break;
1357 case 1:
1358 // Only a single destination, change the switch into an unconditional
1359 // branch.
1360 BranchInst::Create(TheSwitch->getSuccessor(1), TheSwitch);
1361 TheSwitch->eraseFromParent();
1362 break;
1363 case 2:
1364 BranchInst::Create(TheSwitch->getSuccessor(1), TheSwitch->getSuccessor(2),
1365 call, TheSwitch);
1366 TheSwitch->eraseFromParent();
1367 break;
1368 default:
1369 // Otherwise, make the default destination of the switch instruction be one
1370 // of the other successors.
1371 TheSwitch->setCondition(call);
1372 TheSwitch->setDefaultDest(TheSwitch->getSuccessor(NumExitBlocks));
1373 // Remove redundant case
1374 TheSwitch->removeCase(SwitchInst::CaseIt(TheSwitch, NumExitBlocks-1));
1375 break;
1376 }
1377
1378 // Insert lifetime markers around the reloads of any output values. The
1379 // allocas output values are stored in are only in-use in the codeRepl block.
1380 insertLifetimeMarkersSurroundingCall(M, ReloadOutputs, ReloadOutputs, call);
1381
1382 return call;
1383}
1384
1385void CodeExtractor::moveCodeToFunction(Function *newFunction) {
1386 Function *oldFunc = (*Blocks.begin())->getParent();
1387 Function::BasicBlockListType &oldBlocks = oldFunc->getBasicBlockList();
1388 Function::BasicBlockListType &newBlocks = newFunction->getBasicBlockList();
1389
1390 for (BasicBlock *Block : Blocks) {
1391 // Delete the basic block from the old function, and the list of blocks
1392 oldBlocks.remove(Block);
1393
1394 // Insert this basic block into the new function
1395 newBlocks.push_back(Block);
1396 }
1397}
1398
1399void CodeExtractor::calculateNewCallTerminatorWeights(
1400 BasicBlock *CodeReplacer,
1401 DenseMap<BasicBlock *, BlockFrequency> &ExitWeights,
1402 BranchProbabilityInfo *BPI) {
1403 using Distribution = BlockFrequencyInfoImplBase::Distribution;
1404 using BlockNode = BlockFrequencyInfoImplBase::BlockNode;
1405
1406 // Update the branch weights for the exit block.
1407 Instruction *TI = CodeReplacer->getTerminator();
1408 SmallVector<unsigned, 8> BranchWeights(TI->getNumSuccessors(), 0);
1409
1410 // Block Frequency distribution with dummy node.
1411 Distribution BranchDist;
1412
1413 SmallVector<BranchProbability, 4> EdgeProbabilities(
1414 TI->getNumSuccessors(), BranchProbability::getUnknown());
1415
1416 // Add each of the frequencies of the successors.
1417 for (unsigned i = 0, e = TI->getNumSuccessors(); i < e; ++i) {
1418 BlockNode ExitNode(i);
1419 uint64_t ExitFreq = ExitWeights[TI->getSuccessor(i)].getFrequency();
1420 if (ExitFreq != 0)
1421 BranchDist.addExit(ExitNode, ExitFreq);
1422 else
1423 EdgeProbabilities[i] = BranchProbability::getZero();
1424 }
1425
1426 // Check for no total weight.
1427 if (BranchDist.Total == 0) {
1428 BPI->setEdgeProbability(CodeReplacer, EdgeProbabilities);
1429 return;
1430 }
1431
1432 // Normalize the distribution so that they can fit in unsigned.
1433 BranchDist.normalize();
1434
1435 // Create normalized branch weights and set the metadata.
1436 for (unsigned I = 0, E = BranchDist.Weights.size(); I < E; ++I) {
1437 const auto &Weight = BranchDist.Weights[I];
1438
1439 // Get the weight and update the current BFI.
1440 BranchWeights[Weight.TargetNode.Index] = Weight.Amount;
1441 BranchProbability BP(Weight.Amount, BranchDist.Total);
1442 EdgeProbabilities[Weight.TargetNode.Index] = BP;
1443 }
1444 BPI->setEdgeProbability(CodeReplacer, EdgeProbabilities);
1445 TI->setMetadata(
1446 LLVMContext::MD_prof,
1447 MDBuilder(TI->getContext()).createBranchWeights(BranchWeights));
1448}
1449
1450/// Erase debug info intrinsics which refer to values in \p F but aren't in
1451/// \p F.
1452static void eraseDebugIntrinsicsWithNonLocalRefs(Function &F) {
1453 for (Instruction &I : instructions(F)) {
1454 SmallVector<DbgVariableIntrinsic *, 4> DbgUsers;
1455 findDbgUsers(DbgUsers, &I);
1456 for (DbgVariableIntrinsic *DVI : DbgUsers)
1457 if (DVI->getFunction() != &F)
1458 DVI->eraseFromParent();
1459 }
1460}
1461
1462/// Fix up the debug info in the old and new functions by pointing line
1463/// locations and debug intrinsics to the new subprogram scope, and by deleting
1464/// intrinsics which point to values outside of the new function.
1465static void fixupDebugInfoPostExtraction(Function &OldFunc, Function &NewFunc,
1466 CallInst &TheCall) {
1467 DISubprogram *OldSP = OldFunc.getSubprogram();
1468 LLVMContext &Ctx = OldFunc.getContext();
1469
1470 if (!OldSP) {
1471 // Erase any debug info the new function contains.
1472 stripDebugInfo(NewFunc);
1473 // Make sure the old function doesn't contain any non-local metadata refs.
1474 eraseDebugIntrinsicsWithNonLocalRefs(NewFunc);
1475 return;
1476 }
1477
1478 // Create a subprogram for the new function. Leave out a description of the
1479 // function arguments, as the parameters don't correspond to anything at the
1480 // source level.
1481 assert(OldSP->getUnit() && "Missing compile unit for subprogram")(static_cast <bool> (OldSP->getUnit() && "Missing compile unit for subprogram"
) ? void (0) : __assert_fail ("OldSP->getUnit() && \"Missing compile unit for subprogram\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Transforms/Utils/CodeExtractor.cpp"
, 1481, __extension__ __PRETTY_FUNCTION__))
;
1482 DIBuilder DIB(*OldFunc.getParent(), /*AllowUnresolved=*/false,
1483 OldSP->getUnit());
1484 auto SPType = DIB.createSubroutineType(DIB.getOrCreateTypeArray(None));
1485 DISubprogram::DISPFlags SPFlags = DISubprogram::SPFlagDefinition |
1486 DISubprogram::SPFlagOptimized |
1487 DISubprogram::SPFlagLocalToUnit;
1488 auto NewSP = DIB.createFunction(
1489 OldSP->getUnit(), NewFunc.getName(), NewFunc.getName(), OldSP->getFile(),
1490 /*LineNo=*/0, SPType, /*ScopeLine=*/0, DINode::FlagZero, SPFlags);
1491 NewFunc.setSubprogram(NewSP);
1492
1493 // Debug intrinsics in the new function need to be updated in one of two
1494 // ways:
1495 // 1) They need to be deleted, because they describe a value in the old
1496 // function.
1497 // 2) They need to point to fresh metadata, e.g. because they currently
1498 // point to a variable in the wrong scope.
1499 SmallDenseMap<DINode *, DINode *> RemappedMetadata;
1500 SmallVector<Instruction *, 4> DebugIntrinsicsToDelete;
1501 for (Instruction &I : instructions(NewFunc)) {
1502 auto *DII = dyn_cast<DbgInfoIntrinsic>(&I);
1503 if (!DII)
1504 continue;
1505
1506 // Point the intrinsic to a fresh label within the new function.
1507 if (auto *DLI = dyn_cast<DbgLabelInst>(&I)) {
1508 DILabel *OldLabel = DLI->getLabel();
1509 DINode *&NewLabel = RemappedMetadata[OldLabel];
1510 if (!NewLabel)
1511 NewLabel = DILabel::get(Ctx, NewSP, OldLabel->getName(),
1512 OldLabel->getFile(), OldLabel->getLine());
1513 DLI->setArgOperand(0, MetadataAsValue::get(Ctx, NewLabel));
1514 continue;
1515 }
1516
1517 auto IsInvalidLocation = [&NewFunc](Value *Location) {
1518 // Location is invalid if it isn't a constant or an instruction, or is an
1519 // instruction but isn't in the new function.
1520 if (!Location ||
1521 (!isa<Constant>(Location) && !isa<Instruction>(Location)))
1522 return true;
1523 Instruction *LocationInst = dyn_cast<Instruction>(Location);
1524 return LocationInst && LocationInst->getFunction() != &NewFunc;
1525 };
1526
1527 auto *DVI = cast<DbgVariableIntrinsic>(DII);
1528 // If any of the used locations are invalid, delete the intrinsic.
1529 if (any_of(DVI->location_ops(), IsInvalidLocation)) {
1530 DebugIntrinsicsToDelete.push_back(DVI);
1531 continue;
1532 }
1533
1534 // Point the intrinsic to a fresh variable within the new function.
1535 DILocalVariable *OldVar = DVI->getVariable();
1536 DINode *&NewVar = RemappedMetadata[OldVar];
1537 if (!NewVar)
1538 NewVar = DIB.createAutoVariable(
1539 NewSP, OldVar->getName(), OldVar->getFile(), OldVar->getLine(),
1540 OldVar->getType(), /*AlwaysPreserve=*/false, DINode::FlagZero,
1541 OldVar->getAlignInBits());
1542 DVI->setVariable(cast<DILocalVariable>(NewVar));
1543 }
1544 for (auto *DII : DebugIntrinsicsToDelete)
1545 DII->eraseFromParent();
1546 DIB.finalizeSubprogram(NewSP);
1547
1548 // Fix up the scope information attached to the line locations in the new
1549 // function.
1550 for (Instruction &I : instructions(NewFunc)) {
1551 if (const DebugLoc &DL = I.getDebugLoc())
1552 I.setDebugLoc(DILocation::get(Ctx, DL.getLine(), DL.getCol(), NewSP));
1553
1554 // Loop info metadata may contain line locations. Fix them up.
1555 auto updateLoopInfoLoc = [&Ctx, NewSP](Metadata *MD) -> Metadata * {
1556 if (auto *Loc = dyn_cast_or_null<DILocation>(MD))
1557 return DILocation::get(Ctx, Loc->getLine(), Loc->getColumn(), NewSP,
1558 nullptr);
1559 return MD;
1560 };
1561 updateLoopMetadataDebugLocations(I, updateLoopInfoLoc);
1562 }
1563 if (!TheCall.getDebugLoc())
1564 TheCall.setDebugLoc(DILocation::get(Ctx, 0, 0, OldSP));
1565
1566 eraseDebugIntrinsicsWithNonLocalRefs(NewFunc);
1567}
1568
1569Function *
1570CodeExtractor::extractCodeRegion(const CodeExtractorAnalysisCache &CEAC) {
1571 if (!isEligible())
1572 return nullptr;
1573
1574 // Assumption: this is a single-entry code region, and the header is the first
1575 // block in the region.
1576 BasicBlock *header = *Blocks.begin();
1577 Function *oldFunction = header->getParent();
1578
1579 // Calculate the entry frequency of the new function before we change the root
1580 // block.
1581 BlockFrequency EntryFreq;
1582 if (BFI) {
1583 assert(BPI && "Both BPI and BFI are required to preserve profile info")(static_cast <bool> (BPI && "Both BPI and BFI are required to preserve profile info"
) ? void (0) : __assert_fail ("BPI && \"Both BPI and BFI are required to preserve profile info\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Transforms/Utils/CodeExtractor.cpp"
, 1583, __extension__ __PRETTY_FUNCTION__))
;
1584 for (BasicBlock *Pred : predecessors(header)) {
1585 if (Blocks.count(Pred))
1586 continue;
1587 EntryFreq +=
1588 BFI->getBlockFreq(Pred) * BPI->getEdgeProbability(Pred, header);
1589 }
1590 }
1591
1592 // Remove @llvm.assume calls that will be moved to the new function from the
1593 // old function's assumption cache.
1594 for (BasicBlock *Block : Blocks) {
1595 for (auto It = Block->begin(), End = Block->end(); It != End;) {
1596 Instruction *I = &*It;
1597 ++It;
1598
1599 if (auto *AI = dyn_cast<AssumeInst>(I)) {
1600 if (AC)
1601 AC->unregisterAssumption(AI);
1602 AI->eraseFromParent();
1603 }
1604 }
1605 }
1606
1607 // If we have any return instructions in the region, split those blocks so
1608 // that the return is not in the region.
1609 splitReturnBlocks();
1610
1611 // Calculate the exit blocks for the extracted region and the total exit
1612 // weights for each of those blocks.
1613 DenseMap<BasicBlock *, BlockFrequency> ExitWeights;
1614 SmallPtrSet<BasicBlock *, 1> ExitBlocks;
1615 for (BasicBlock *Block : Blocks) {
1616 for (BasicBlock *Succ : successors(Block)) {
1617 if (!Blocks.count(Succ)) {
1618 // Update the branch weight for this successor.
1619 if (BFI) {
1620 BlockFrequency &BF = ExitWeights[Succ];
1621 BF += BFI->getBlockFreq(Block) * BPI->getEdgeProbability(Block, Succ);
1622 }
1623 ExitBlocks.insert(Succ);
1624 }
1625 }
1626 }
1627 NumExitBlocks = ExitBlocks.size();
1628
1629 // If we have to split PHI nodes of the entry or exit blocks, do so now.
1630 severSplitPHINodesOfEntry(header);
1631 severSplitPHINodesOfExits(ExitBlocks);
1632
1633 // This takes place of the original loop
1634 BasicBlock *codeReplacer = BasicBlock::Create(header->getContext(),
1635 "codeRepl", oldFunction,
1636 header);
1637
1638 // The new function needs a root node because other nodes can branch to the
1639 // head of the region, but the entry node of a function cannot have preds.
1640 BasicBlock *newFuncRoot = BasicBlock::Create(header->getContext(),
1641 "newFuncRoot");
1642 auto *BranchI = BranchInst::Create(header);
1643 // If the original function has debug info, we have to add a debug location
1644 // to the new branch instruction from the artificial entry block.
1645 // We use the debug location of the first instruction in the extracted
1646 // blocks, as there is no other equivalent line in the source code.
1647 if (oldFunction->getSubprogram()) {
1648 any_of(Blocks, [&BranchI](const BasicBlock *BB) {
1649 return any_of(*BB, [&BranchI](const Instruction &I) {
1650 if (!I.getDebugLoc())
1651 return false;
1652 BranchI->setDebugLoc(I.getDebugLoc());
1653 return true;
1654 });
1655 });
1656 }
1657 newFuncRoot->getInstList().push_back(BranchI);
1658
1659 ValueSet inputs, outputs, SinkingCands, HoistingCands;
1660 BasicBlock *CommonExit = nullptr;
1661 findAllocas(CEAC, SinkingCands, HoistingCands, CommonExit);
1662 assert(HoistingCands.empty() || CommonExit)(static_cast <bool> (HoistingCands.empty() || CommonExit
) ? void (0) : __assert_fail ("HoistingCands.empty() || CommonExit"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Transforms/Utils/CodeExtractor.cpp"
, 1662, __extension__ __PRETTY_FUNCTION__))
;
1663
1664 // Find inputs to, outputs from the code region.
1665 findInputsOutputs(inputs, outputs, SinkingCands);
1666
1667 // Now sink all instructions which only have non-phi uses inside the region.
1668 // Group the allocas at the start of the block, so that any bitcast uses of
1669 // the allocas are well-defined.
1670 AllocaInst *FirstSunkAlloca = nullptr;
1671 for (auto *II : SinkingCands) {
1672 if (auto *AI = dyn_cast<AllocaInst>(II)) {
1673 AI->moveBefore(*newFuncRoot, newFuncRoot->getFirstInsertionPt());
1674 if (!FirstSunkAlloca)
1675 FirstSunkAlloca = AI;
1676 }
1677 }
1678 assert((SinkingCands.empty() || FirstSunkAlloca) &&(static_cast <bool> ((SinkingCands.empty() || FirstSunkAlloca
) && "Did not expect a sink candidate without any allocas"
) ? void (0) : __assert_fail ("(SinkingCands.empty() || FirstSunkAlloca) && \"Did not expect a sink candidate without any allocas\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Transforms/Utils/CodeExtractor.cpp"
, 1679, __extension__ __PRETTY_FUNCTION__))
1679 "Did not expect a sink candidate without any allocas")(static_cast <bool> ((SinkingCands.empty() || FirstSunkAlloca
) && "Did not expect a sink candidate without any allocas"
) ? void (0) : __assert_fail ("(SinkingCands.empty() || FirstSunkAlloca) && \"Did not expect a sink candidate without any allocas\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Transforms/Utils/CodeExtractor.cpp"
, 1679, __extension__ __PRETTY_FUNCTION__))
;
1680 for (auto *II : SinkingCands) {
1681 if (!isa<AllocaInst>(II)) {
1682 cast<Instruction>(II)->moveAfter(FirstSunkAlloca);
1683 }
1684 }
1685
1686 if (!HoistingCands.empty()) {
1687 auto *HoistToBlock = findOrCreateBlockForHoisting(CommonExit);
1688 Instruction *TI = HoistToBlock->getTerminator();
1689 for (auto *II : HoistingCands)
1690 cast<Instruction>(II)->moveBefore(TI);
1691 }
1692
1693 // Collect objects which are inputs to the extraction region and also
1694 // referenced by lifetime start markers within it. The effects of these
1695 // markers must be replicated in the calling function to prevent the stack
1696 // coloring pass from merging slots which store input objects.
1697 ValueSet LifetimesStart;
1698 eraseLifetimeMarkersOnInputs(Blocks, SinkingCands, LifetimesStart);
1699
1700 // Construct new function based on inputs/outputs & add allocas for all defs.
1701 Function *newFunction =
1702 constructFunction(inputs, outputs, header, newFuncRoot, codeReplacer,
1703 oldFunction, oldFunction->getParent());
1704
1705 // Update the entry count of the function.
1706 if (BFI) {
1707 auto Count = BFI->getProfileCountFromFreq(EntryFreq.getFrequency());
1708 if (Count.hasValue())
1709 newFunction->setEntryCount(
1710 ProfileCount(Count.getValue(), Function::PCT_Real)); // FIXME
1711 BFI->setBlockFreq(codeReplacer, EntryFreq.getFrequency());
1712 }
1713
1714 CallInst *TheCall =
1715 emitCallAndSwitchStatement(newFunction, codeReplacer, inputs, outputs);
1716
1717 moveCodeToFunction(newFunction);
1718
1719 // Replicate the effects of any lifetime start/end markers which referenced
1720 // input objects in the extraction region by placing markers around the call.
1721 insertLifetimeMarkersSurroundingCall(
1722 oldFunction->getParent(), LifetimesStart.getArrayRef(), {}, TheCall);
1723
1724 // Propagate personality info to the new function if there is one.
1725 if (oldFunction->hasPersonalityFn())
1726 newFunction->setPersonalityFn(oldFunction->getPersonalityFn());
1727
1728 // Update the branch weights for the exit block.
1729 if (BFI && NumExitBlocks > 1)
1730 calculateNewCallTerminatorWeights(codeReplacer, ExitWeights, BPI);
1731
1732 // Loop over all of the PHI nodes in the header and exit blocks, and change
1733 // any references to the old incoming edge to be the new incoming edge.
1734 for (BasicBlock::iterator I = header->begin(); isa<PHINode>(I); ++I) {
1735 PHINode *PN = cast<PHINode>(I);
1736 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
1737 if (!Blocks.count(PN->getIncomingBlock(i)))
1738 PN->setIncomingBlock(i, newFuncRoot);
1739 }
1740
1741 for (BasicBlock *ExitBB : ExitBlocks)
1742 for (PHINode &PN : ExitBB->phis()) {
1743 Value *IncomingCodeReplacerVal = nullptr;
1744 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
1745 // Ignore incoming values from outside of the extracted region.
1746 if (!Blocks.count(PN.getIncomingBlock(i)))
1747 continue;
1748
1749 // Ensure that there is only one incoming value from codeReplacer.
1750 if (!IncomingCodeReplacerVal) {
1751 PN.setIncomingBlock(i, codeReplacer);
1752 IncomingCodeReplacerVal = PN.getIncomingValue(i);
1753 } else
1754 assert(IncomingCodeReplacerVal == PN.getIncomingValue(i) &&(static_cast <bool> (IncomingCodeReplacerVal == PN.getIncomingValue
(i) && "PHI has two incompatbile incoming values from codeRepl"
) ? void (0) : __assert_fail ("IncomingCodeReplacerVal == PN.getIncomingValue(i) && \"PHI has two incompatbile incoming values from codeRepl\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Transforms/Utils/CodeExtractor.cpp"
, 1755, __extension__ __PRETTY_FUNCTION__))
1755 "PHI has two incompatbile incoming values from codeRepl")(static_cast <bool> (IncomingCodeReplacerVal == PN.getIncomingValue
(i) && "PHI has two incompatbile incoming values from codeRepl"
) ? void (0) : __assert_fail ("IncomingCodeReplacerVal == PN.getIncomingValue(i) && \"PHI has two incompatbile incoming values from codeRepl\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Transforms/Utils/CodeExtractor.cpp"
, 1755, __extension__ __PRETTY_FUNCTION__))
;
1756 }
1757 }
1758
1759 fixupDebugInfoPostExtraction(*oldFunction, *newFunction, *TheCall);
1760
1761 // Mark the new function `noreturn` if applicable. Terminators which resume
1762 // exception propagation are treated as returning instructions. This is to
1763 // avoid inserting traps after calls to outlined functions which unwind.
1764 bool doesNotReturn = none_of(*newFunction, [](const BasicBlock &BB) {
1765 const Instruction *Term = BB.getTerminator();
1766 return isa<ReturnInst>(Term) || isa<ResumeInst>(Term);
1767 });
1768 if (doesNotReturn)
1769 newFunction->setDoesNotReturn();
1770
1771 LLVM_DEBUG(if (verifyFunction(*newFunction, &errs())) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { if (verifyFunction(*newFunction, &errs
())) { newFunction->dump(); report_fatal_error("verification of newFunction failed!"
); }; } } while (false)
1772 newFunction->dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { if (verifyFunction(*newFunction, &errs
())) { newFunction->dump(); report_fatal_error("verification of newFunction failed!"
); }; } } while (false)
1773 report_fatal_error("verification of newFunction failed!");do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { if (verifyFunction(*newFunction, &errs
())) { newFunction->dump(); report_fatal_error("verification of newFunction failed!"
); }; } } while (false)
1774 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { if (verifyFunction(*newFunction, &errs
())) { newFunction->dump(); report_fatal_error("verification of newFunction failed!"
); }; } } while (false)
;
1775 LLVM_DEBUG(if (verifyFunction(*oldFunction))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { if (verifyFunction(*oldFunction)) report_fatal_error
("verification of oldFunction failed!"); } } while (false)
1776 report_fatal_error("verification of oldFunction failed!"))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { if (verifyFunction(*oldFunction)) report_fatal_error
("verification of oldFunction failed!"); } } while (false)
;
1777 LLVM_DEBUG(if (AC && verifyAssumptionCache(*oldFunction, *newFunction, AC))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { if (AC && verifyAssumptionCache
(*oldFunction, *newFunction, AC)) report_fatal_error("Stale Asumption cache for old Function!"
); } } while (false)
1778 report_fatal_error("Stale Asumption cache for old Function!"))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("code-extractor")) { if (AC && verifyAssumptionCache
(*oldFunction, *newFunction, AC)) report_fatal_error("Stale Asumption cache for old Function!"
); } } while (false)
;
1779 return newFunction;
1780}
1781
1782bool CodeExtractor::verifyAssumptionCache(const Function &OldFunc,
1783 const Function &NewFunc,
1784 AssumptionCache *AC) {
1785 for (auto AssumeVH : AC->assumptions()) {
1786 auto *I = dyn_cast_or_null<CallInst>(AssumeVH);
1787 if (!I)
1788 continue;
1789
1790 // There shouldn't be any llvm.assume intrinsics in the new function.
1791 if (I->getFunction() != &OldFunc)
1792 return true;
1793
1794 // There shouldn't be any stale affected values in the assumption cache
1795 // that were previously in the old function, but that have now been moved
1796 // to the new function.
1797 for (auto AffectedValVH : AC->assumptionsFor(I->getOperand(0))) {
1798 auto *AffectedCI = dyn_cast_or_null<CallInst>(AffectedValVH);
1799 if (!AffectedCI)
1800 continue;
1801 if (AffectedCI->getFunction() != &OldFunc)
1802 return true;
1803 auto *AssumedInst = cast<Instruction>(AffectedCI->getOperand(0));
1804 if (AssumedInst->getFunction() != &OldFunc)
1805 return true;
1806 }
1807 }
1808 return false;
1809}

/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/None.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator.h"
26#include "llvm/ADT/iterator_range.h"
27#include "llvm/IR/Attributes.h"
28#include "llvm/IR/BasicBlock.h"
29#include "llvm/IR/CallingConv.h"
30#include "llvm/IR/CFG.h"
31#include "llvm/IR/Constant.h"
32#include "llvm/IR/DerivedTypes.h"
33#include "llvm/IR/Function.h"
34#include "llvm/IR/InstrTypes.h"
35#include "llvm/IR/Instruction.h"
36#include "llvm/IR/OperandTraits.h"
37#include "llvm/IR/Type.h"
38#include "llvm/IR/Use.h"
39#include "llvm/IR/User.h"
40#include "llvm/IR/Value.h"
41#include "llvm/Support/AtomicOrdering.h"
42#include "llvm/Support/Casting.h"
43#include "llvm/Support/ErrorHandling.h"
44#include <cassert>
45#include <cstddef>
46#include <cstdint>
47#include <iterator>
48
49namespace llvm {
50
51class APInt;
52class ConstantInt;
53class DataLayout;
54class LLVMContext;
55
56//===----------------------------------------------------------------------===//
57// AllocaInst Class
58//===----------------------------------------------------------------------===//
59
60/// an instruction to allocate memory on the stack
61class AllocaInst : public UnaryInstruction {
62 Type *AllocatedType;
63
64 using AlignmentField = AlignmentBitfieldElementT<0>;
65 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
66 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>;
67 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
68 SwiftErrorField>(),
69 "Bitfields must be contiguous");
70
71protected:
72 // Note: Instruction needs to be a friend here to call cloneImpl.
73 friend class Instruction;
74
75 AllocaInst *cloneImpl() const;
76
77public:
78 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
79 const Twine &Name, Instruction *InsertBefore);
80 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
81 const Twine &Name, BasicBlock *InsertAtEnd);
82
83 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
84 Instruction *InsertBefore);
85 AllocaInst(Type *Ty, unsigned AddrSpace,
86 const Twine &Name, BasicBlock *InsertAtEnd);
87
88 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
89 const Twine &Name = "", Instruction *InsertBefore = nullptr);
90 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
91 const Twine &Name, BasicBlock *InsertAtEnd);
92
93 /// Return true if there is an allocation size parameter to the allocation
94 /// instruction that is not 1.
95 bool isArrayAllocation() const;
96
97 /// Get the number of elements allocated. For a simple allocation of a single
98 /// element, this will return a constant 1 value.
99 const Value *getArraySize() const { return getOperand(0); }
100 Value *getArraySize() { return getOperand(0); }
101
102 /// Overload to return most specific pointer type.
103 PointerType *getType() const {
104 return cast<PointerType>(Instruction::getType());
105 }
106
107 /// Get allocation size in bits. Returns None if size can't be determined,
108 /// e.g. in case of a VLA.
109 Optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
110
111 /// Return the type that is being allocated by the instruction.
112 Type *getAllocatedType() const { return AllocatedType; }
113 /// for use only in special circumstances that need to generically
114 /// transform a whole instruction (eg: IR linking and vectorization).
115 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
116
117 /// Return the alignment of the memory that is being allocated by the
118 /// instruction.
119 Align getAlign() const {
120 return Align(1ULL << getSubclassData<AlignmentField>());
121 }
122
123 void setAlignment(Align Align) {
124 setSubclassData<AlignmentField>(Log2(Align));
125 }
126
127 // FIXME: Remove this one transition to Align is over.
128 unsigned getAlignment() const { return getAlign().value(); }
129
130 /// Return true if this alloca is in the entry block of the function and is a
131 /// constant size. If so, the code generator will fold it into the
132 /// prolog/epilog code, so it is basically free.
133 bool isStaticAlloca() const;
134
135 /// Return true if this alloca is used as an inalloca argument to a call. Such
136 /// allocas are never considered static even if they are in the entry block.
137 bool isUsedWithInAlloca() const {
138 return getSubclassData<UsedWithInAllocaField>();
139 }
140
141 /// Specify whether this alloca is used to represent the arguments to a call.
142 void setUsedWithInAlloca(bool V) {
143 setSubclassData<UsedWithInAllocaField>(V);
144 }
145
146 /// Return true if this alloca is used as a swifterror argument to a call.
147 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
148 /// Specify whether this alloca is used to represent a swifterror.
149 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
150
151 // Methods for support type inquiry through isa, cast, and dyn_cast:
152 static bool classof(const Instruction *I) {
153 return (I->getOpcode() == Instruction::Alloca);
154 }
155 static bool classof(const Value *V) {
156 return isa<Instruction>(V) && classof(cast<Instruction>(V));
157 }
158
159private:
160 // Shadow Instruction::setInstructionSubclassData with a private forwarding
161 // method so that subclasses cannot accidentally use it.
162 template <typename Bitfield>
163 void setSubclassData(typename Bitfield::Type Value) {
164 Instruction::setSubclassData<Bitfield>(Value);
165 }
166};
167
168//===----------------------------------------------------------------------===//
169// LoadInst Class
170//===----------------------------------------------------------------------===//
171
172/// An instruction for reading from memory. This uses the SubclassData field in
173/// Value to store whether or not the load is volatile.
174class LoadInst : public UnaryInstruction {
175 using VolatileField = BoolBitfieldElementT<0>;
176 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
177 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
178 static_assert(
179 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
180 "Bitfields must be contiguous");
181
182 void AssertOK();
183
184protected:
185 // Note: Instruction needs to be a friend here to call cloneImpl.
186 friend class Instruction;
187
188 LoadInst *cloneImpl() const;
189
190public:
191 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
192 Instruction *InsertBefore);
193 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
194 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
195 Instruction *InsertBefore);
196 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
197 BasicBlock *InsertAtEnd);
198 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
199 Align Align, Instruction *InsertBefore = nullptr);
200 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
201 Align Align, BasicBlock *InsertAtEnd);
202 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
203 Align Align, AtomicOrdering Order,
204 SyncScope::ID SSID = SyncScope::System,
205 Instruction *InsertBefore = nullptr);
206 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
207 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
208 BasicBlock *InsertAtEnd);
209
210 /// Return true if this is a load from a volatile memory location.
211 bool isVolatile() const { return getSubclassData<VolatileField>(); }
212
213 /// Specify whether this is a volatile load or not.
214 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
215
216 /// Return the alignment of the access that is being performed.
217 /// FIXME: Remove this function once transition to Align is over.
218 /// Use getAlign() instead.
219 unsigned getAlignment() const { return getAlign().value(); }
220
221 /// Return the alignment of the access that is being performed.
222 Align getAlign() const {
223 return Align(1ULL << (getSubclassData<AlignmentField>()));
224 }
225
226 void setAlignment(Align Align) {
227 setSubclassData<AlignmentField>(Log2(Align));
228 }
229
230 /// Returns the ordering constraint of this load instruction.
231 AtomicOrdering getOrdering() const {
232 return getSubclassData<OrderingField>();
233 }
234 /// Sets the ordering constraint of this load instruction. May not be Release
235 /// or AcquireRelease.
236 void setOrdering(AtomicOrdering Ordering) {
237 setSubclassData<OrderingField>(Ordering);
238 }
239
240 /// Returns the synchronization scope ID of this load instruction.
241 SyncScope::ID getSyncScopeID() const {
242 return SSID;
243 }
244
245 /// Sets the synchronization scope ID of this load instruction.
246 void setSyncScopeID(SyncScope::ID SSID) {
247 this->SSID = SSID;
248 }
249
250 /// Sets the ordering constraint and the synchronization scope ID of this load
251 /// instruction.
252 void setAtomic(AtomicOrdering Ordering,
253 SyncScope::ID SSID = SyncScope::System) {
254 setOrdering(Ordering);
255 setSyncScopeID(SSID);
256 }
257
258 bool isSimple() const { return !isAtomic() && !isVolatile(); }
259
260 bool isUnordered() const {
261 return (getOrdering() == AtomicOrdering::NotAtomic ||
262 getOrdering() == AtomicOrdering::Unordered) &&
263 !isVolatile();
264 }
265
266 Value *getPointerOperand() { return getOperand(0); }
267 const Value *getPointerOperand() const { return getOperand(0); }
268 static unsigned getPointerOperandIndex() { return 0U; }
269 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
270
271 /// Returns the address space of the pointer operand.
272 unsigned getPointerAddressSpace() const {
273 return getPointerOperandType()->getPointerAddressSpace();
274 }
275
276 // Methods for support type inquiry through isa, cast, and dyn_cast:
277 static bool classof(const Instruction *I) {
278 return I->getOpcode() == Instruction::Load;
279 }
280 static bool classof(const Value *V) {
281 return isa<Instruction>(V) && classof(cast<Instruction>(V));
282 }
283
284private:
285 // Shadow Instruction::setInstructionSubclassData with a private forwarding
286 // method so that subclasses cannot accidentally use it.
287 template <typename Bitfield>
288 void setSubclassData(typename Bitfield::Type Value) {
289 Instruction::setSubclassData<Bitfield>(Value);
290 }
291
292 /// The synchronization scope ID of this load instruction. Not quite enough
293 /// room in SubClassData for everything, so synchronization scope ID gets its
294 /// own field.
295 SyncScope::ID SSID;
296};
297
298//===----------------------------------------------------------------------===//
299// StoreInst Class
300//===----------------------------------------------------------------------===//
301
302/// An instruction for storing to memory.
303class StoreInst : public Instruction {
304 using VolatileField = BoolBitfieldElementT<0>;
305 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
306 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
307 static_assert(
308 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
309 "Bitfields must be contiguous");
310
311 void AssertOK();
312
313protected:
314 // Note: Instruction needs to be a friend here to call cloneImpl.
315 friend class Instruction;
316
317 StoreInst *cloneImpl() const;
318
319public:
320 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
321 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
322 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
323 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
324 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
325 Instruction *InsertBefore = nullptr);
326 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
327 BasicBlock *InsertAtEnd);
328 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
329 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
330 Instruction *InsertBefore = nullptr);
331 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
332 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
333
334 // allocate space for exactly two operands
335 void *operator new(size_t s) {
336 return User::operator new(s, 2);
337 }
338
339 /// Return true if this is a store to a volatile memory location.
340 bool isVolatile() const { return getSubclassData<VolatileField>(); }
341
342 /// Specify whether this is a volatile store or not.
343 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
344
345 /// Transparently provide more efficient getOperand methods.
346 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
347
348 /// Return the alignment of the access that is being performed
349 /// FIXME: Remove this function once transition to Align is over.
350 /// Use getAlign() instead.
351 unsigned getAlignment() const { return getAlign().value(); }
352
353 Align getAlign() const {
354 return Align(1ULL << (getSubclassData<AlignmentField>()));
355 }
356
357 void setAlignment(Align Align) {
358 setSubclassData<AlignmentField>(Log2(Align));
359 }
360
361 /// Returns the ordering constraint of this store instruction.
362 AtomicOrdering getOrdering() const {
363 return getSubclassData<OrderingField>();
364 }
365
366 /// Sets the ordering constraint of this store instruction. May not be
367 /// Acquire or AcquireRelease.
368 void setOrdering(AtomicOrdering Ordering) {
369 setSubclassData<OrderingField>(Ordering);
370 }
371
372 /// Returns the synchronization scope ID of this store instruction.
373 SyncScope::ID getSyncScopeID() const {
374 return SSID;
375 }
376
377 /// Sets the synchronization scope ID of this store instruction.
378 void setSyncScopeID(SyncScope::ID SSID) {
379 this->SSID = SSID;
380 }
381
382 /// Sets the ordering constraint and the synchronization scope ID of this
383 /// store instruction.
384 void setAtomic(AtomicOrdering Ordering,
385 SyncScope::ID SSID = SyncScope::System) {
386 setOrdering(Ordering);
387 setSyncScopeID(SSID);
388 }
389
390 bool isSimple() const { return !isAtomic() && !isVolatile(); }
391
392 bool isUnordered() const {
393 return (getOrdering() == AtomicOrdering::NotAtomic ||
394 getOrdering() == AtomicOrdering::Unordered) &&
395 !isVolatile();
396 }
397
398 Value *getValueOperand() { return getOperand(0); }
399 const Value *getValueOperand() const { return getOperand(0); }
400
401 Value *getPointerOperand() { return getOperand(1); }
402 const Value *getPointerOperand() const { return getOperand(1); }
403 static unsigned getPointerOperandIndex() { return 1U; }
404 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
405
406 /// Returns the address space of the pointer operand.
407 unsigned getPointerAddressSpace() const {
408 return getPointerOperandType()->getPointerAddressSpace();
409 }
410
411 // Methods for support type inquiry through isa, cast, and dyn_cast:
412 static bool classof(const Instruction *I) {
413 return I->getOpcode() == Instruction::Store;
414 }
415 static bool classof(const Value *V) {
416 return isa<Instruction>(V) && classof(cast<Instruction>(V));
417 }
418
419private:
420 // Shadow Instruction::setInstructionSubclassData with a private forwarding
421 // method so that subclasses cannot accidentally use it.
422 template <typename Bitfield>
423 void setSubclassData(typename Bitfield::Type Value) {
424 Instruction::setSubclassData<Bitfield>(Value);
425 }
426
427 /// The synchronization scope ID of this store instruction. Not quite enough
428 /// room in SubClassData for everything, so synchronization scope ID gets its
429 /// own field.
430 SyncScope::ID SSID;
431};
432
433template <>
434struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
435};
436
437DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<StoreInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 437, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<StoreInst>::op_begin(const_cast
<StoreInst*>(this))[i_nocapture].get()); } void StoreInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<StoreInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 437, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
StoreInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned StoreInst::getNumOperands() const { return OperandTraits
<StoreInst>::operands(this); } template <int Idx_nocapture
> Use &StoreInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
StoreInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
438
439//===----------------------------------------------------------------------===//
440// FenceInst Class
441//===----------------------------------------------------------------------===//
442
443/// An instruction for ordering other memory operations.
444class FenceInst : public Instruction {
445 using OrderingField = AtomicOrderingBitfieldElementT<0>;
446
447 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
448
449protected:
450 // Note: Instruction needs to be a friend here to call cloneImpl.
451 friend class Instruction;
452
453 FenceInst *cloneImpl() const;
454
455public:
456 // Ordering may only be Acquire, Release, AcquireRelease, or
457 // SequentiallyConsistent.
458 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
459 SyncScope::ID SSID = SyncScope::System,
460 Instruction *InsertBefore = nullptr);
461 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
462 BasicBlock *InsertAtEnd);
463
464 // allocate space for exactly zero operands
465 void *operator new(size_t s) {
466 return User::operator new(s, 0);
467 }
468
469 /// Returns the ordering constraint of this fence instruction.
470 AtomicOrdering getOrdering() const {
471 return getSubclassData<OrderingField>();
472 }
473
474 /// Sets the ordering constraint of this fence instruction. May only be
475 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
476 void setOrdering(AtomicOrdering Ordering) {
477 setSubclassData<OrderingField>(Ordering);
478 }
479
480 /// Returns the synchronization scope ID of this fence instruction.
481 SyncScope::ID getSyncScopeID() const {
482 return SSID;
483 }
484
485 /// Sets the synchronization scope ID of this fence instruction.
486 void setSyncScopeID(SyncScope::ID SSID) {
487 this->SSID = SSID;
488 }
489
490 // Methods for support type inquiry through isa, cast, and dyn_cast:
491 static bool classof(const Instruction *I) {
492 return I->getOpcode() == Instruction::Fence;
493 }
494 static bool classof(const Value *V) {
495 return isa<Instruction>(V) && classof(cast<Instruction>(V));
496 }
497
498private:
499 // Shadow Instruction::setInstructionSubclassData with a private forwarding
500 // method so that subclasses cannot accidentally use it.
501 template <typename Bitfield>
502 void setSubclassData(typename Bitfield::Type Value) {
503 Instruction::setSubclassData<Bitfield>(Value);
504 }
505
506 /// The synchronization scope ID of this fence instruction. Not quite enough
507 /// room in SubClassData for everything, so synchronization scope ID gets its
508 /// own field.
509 SyncScope::ID SSID;
510};
511
512//===----------------------------------------------------------------------===//
513// AtomicCmpXchgInst Class
514//===----------------------------------------------------------------------===//
515
516/// An instruction that atomically checks whether a
517/// specified value is in a memory location, and, if it is, stores a new value
518/// there. The value returned by this instruction is a pair containing the
519/// original value as first element, and an i1 indicating success (true) or
520/// failure (false) as second element.
521///
522class AtomicCmpXchgInst : public Instruction {
523 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
524 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
525 SyncScope::ID SSID);
526
527 template <unsigned Offset>
528 using AtomicOrderingBitfieldElement =
529 typename Bitfield::Element<AtomicOrdering, Offset, 3,
530 AtomicOrdering::LAST>;
531
532protected:
533 // Note: Instruction needs to be a friend here to call cloneImpl.
534 friend class Instruction;
535
536 AtomicCmpXchgInst *cloneImpl() const;
537
538public:
539 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
540 AtomicOrdering SuccessOrdering,
541 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
542 Instruction *InsertBefore = nullptr);
543 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
544 AtomicOrdering SuccessOrdering,
545 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
546 BasicBlock *InsertAtEnd);
547
548 // allocate space for exactly three operands
549 void *operator new(size_t s) {
550 return User::operator new(s, 3);
551 }
552
553 using VolatileField = BoolBitfieldElementT<0>;
554 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>;
555 using SuccessOrderingField =
556 AtomicOrderingBitfieldElementT<WeakField::NextBit>;
557 using FailureOrderingField =
558 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>;
559 using AlignmentField =
560 AlignmentBitfieldElementT<FailureOrderingField::NextBit>;
561 static_assert(
562 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField,
563 FailureOrderingField, AlignmentField>(),
564 "Bitfields must be contiguous");
565
566 /// Return the alignment of the memory that is being allocated by the
567 /// instruction.
568 Align getAlign() const {
569 return Align(1ULL << getSubclassData<AlignmentField>());
570 }
571
572 void setAlignment(Align Align) {
573 setSubclassData<AlignmentField>(Log2(Align));
574 }
575
576 /// Return true if this is a cmpxchg from a volatile memory
577 /// location.
578 ///
579 bool isVolatile() const { return getSubclassData<VolatileField>(); }
580
581 /// Specify whether this is a volatile cmpxchg.
582 ///
583 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
584
585 /// Return true if this cmpxchg may spuriously fail.
586 bool isWeak() const { return getSubclassData<WeakField>(); }
587
588 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
589
590 /// Transparently provide more efficient getOperand methods.
591 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
592
593 static bool isValidSuccessOrdering(AtomicOrdering Ordering) {
594 return Ordering != AtomicOrdering::NotAtomic &&
595 Ordering != AtomicOrdering::Unordered;
596 }
597
598 static bool isValidFailureOrdering(AtomicOrdering Ordering) {
599 return Ordering != AtomicOrdering::NotAtomic &&
600 Ordering != AtomicOrdering::Unordered &&
601 Ordering != AtomicOrdering::AcquireRelease &&
602 Ordering != AtomicOrdering::Release;
603 }
604
605 /// Returns the success ordering constraint of this cmpxchg instruction.
606 AtomicOrdering getSuccessOrdering() const {
607 return getSubclassData<SuccessOrderingField>();
608 }
609
610 /// Sets the success ordering constraint of this cmpxchg instruction.
611 void setSuccessOrdering(AtomicOrdering Ordering) {
612 assert(isValidSuccessOrdering(Ordering) &&(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 613, __extension__ __PRETTY_FUNCTION__))
613 "invalid CmpXchg success ordering")(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 613, __extension__ __PRETTY_FUNCTION__))
;
614 setSubclassData<SuccessOrderingField>(Ordering);
615 }
616
617 /// Returns the failure ordering constraint of this cmpxchg instruction.
618 AtomicOrdering getFailureOrdering() const {
619 return getSubclassData<FailureOrderingField>();
620 }
621
622 /// Sets the failure ordering constraint of this cmpxchg instruction.
623 void setFailureOrdering(AtomicOrdering Ordering) {
624 assert(isValidFailureOrdering(Ordering) &&(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 625, __extension__ __PRETTY_FUNCTION__))
625 "invalid CmpXchg failure ordering")(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 625, __extension__ __PRETTY_FUNCTION__))
;
626 setSubclassData<FailureOrderingField>(Ordering);
627 }
628
629 /// Returns a single ordering which is at least as strong as both the
630 /// success and failure orderings for this cmpxchg.
631 AtomicOrdering getMergedOrdering() const {
632 if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent)
633 return AtomicOrdering::SequentiallyConsistent;
634 if (getFailureOrdering() == AtomicOrdering::Acquire) {
635 if (getSuccessOrdering() == AtomicOrdering::Monotonic)
636 return AtomicOrdering::Acquire;
637 if (getSuccessOrdering() == AtomicOrdering::Release)
638 return AtomicOrdering::AcquireRelease;
639 }
640 return getSuccessOrdering();
641 }
642
643 /// Returns the synchronization scope ID of this cmpxchg instruction.
644 SyncScope::ID getSyncScopeID() const {
645 return SSID;
646 }
647
648 /// Sets the synchronization scope ID of this cmpxchg instruction.
649 void setSyncScopeID(SyncScope::ID SSID) {
650 this->SSID = SSID;
651 }
652
653 Value *getPointerOperand() { return getOperand(0); }
654 const Value *getPointerOperand() const { return getOperand(0); }
655 static unsigned getPointerOperandIndex() { return 0U; }
656
657 Value *getCompareOperand() { return getOperand(1); }
658 const Value *getCompareOperand() const { return getOperand(1); }
659
660 Value *getNewValOperand() { return getOperand(2); }
661 const Value *getNewValOperand() const { return getOperand(2); }
662
663 /// Returns the address space of the pointer operand.
664 unsigned getPointerAddressSpace() const {
665 return getPointerOperand()->getType()->getPointerAddressSpace();
666 }
667
668 /// Returns the strongest permitted ordering on failure, given the
669 /// desired ordering on success.
670 ///
671 /// If the comparison in a cmpxchg operation fails, there is no atomic store
672 /// so release semantics cannot be provided. So this function drops explicit
673 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
674 /// operation would remain SequentiallyConsistent.
675 static AtomicOrdering
676 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
677 switch (SuccessOrdering) {
678 default:
679 llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 679)
;
680 case AtomicOrdering::Release:
681 case AtomicOrdering::Monotonic:
682 return AtomicOrdering::Monotonic;
683 case AtomicOrdering::AcquireRelease:
684 case AtomicOrdering::Acquire:
685 return AtomicOrdering::Acquire;
686 case AtomicOrdering::SequentiallyConsistent:
687 return AtomicOrdering::SequentiallyConsistent;
688 }
689 }
690
691 // Methods for support type inquiry through isa, cast, and dyn_cast:
692 static bool classof(const Instruction *I) {
693 return I->getOpcode() == Instruction::AtomicCmpXchg;
694 }
695 static bool classof(const Value *V) {
696 return isa<Instruction>(V) && classof(cast<Instruction>(V));
697 }
698
699private:
700 // Shadow Instruction::setInstructionSubclassData with a private forwarding
701 // method so that subclasses cannot accidentally use it.
702 template <typename Bitfield>
703 void setSubclassData(typename Bitfield::Type Value) {
704 Instruction::setSubclassData<Bitfield>(Value);
705 }
706
707 /// The synchronization scope ID of this cmpxchg instruction. Not quite
708 /// enough room in SubClassData for everything, so synchronization scope ID
709 /// gets its own field.
710 SyncScope::ID SSID;
711};
712
713template <>
714struct OperandTraits<AtomicCmpXchgInst> :
715 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
716};
717
718DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 718, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<AtomicCmpXchgInst>::op_begin
(const_cast<AtomicCmpXchgInst*>(this))[i_nocapture].get
()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 718, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
AtomicCmpXchgInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned AtomicCmpXchgInst::getNumOperands() const { return
OperandTraits<AtomicCmpXchgInst>::operands(this); } template
<int Idx_nocapture> Use &AtomicCmpXchgInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &AtomicCmpXchgInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
719
720//===----------------------------------------------------------------------===//
721// AtomicRMWInst Class
722//===----------------------------------------------------------------------===//
723
724/// an instruction that atomically reads a memory location,
725/// combines it with another value, and then stores the result back. Returns
726/// the old value.
727///
728class AtomicRMWInst : public Instruction {
729protected:
730 // Note: Instruction needs to be a friend here to call cloneImpl.
731 friend class Instruction;
732
733 AtomicRMWInst *cloneImpl() const;
734
735public:
736 /// This enumeration lists the possible modifications atomicrmw can make. In
737 /// the descriptions, 'p' is the pointer to the instruction's memory location,
738 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
739 /// instruction. These instructions always return 'old'.
740 enum BinOp : unsigned {
741 /// *p = v
742 Xchg,
743 /// *p = old + v
744 Add,
745 /// *p = old - v
746 Sub,
747 /// *p = old & v
748 And,
749 /// *p = ~(old & v)
750 Nand,
751 /// *p = old | v
752 Or,
753 /// *p = old ^ v
754 Xor,
755 /// *p = old >signed v ? old : v
756 Max,
757 /// *p = old <signed v ? old : v
758 Min,
759 /// *p = old >unsigned v ? old : v
760 UMax,
761 /// *p = old <unsigned v ? old : v
762 UMin,
763
764 /// *p = old + v
765 FAdd,
766
767 /// *p = old - v
768 FSub,
769
770 FIRST_BINOP = Xchg,
771 LAST_BINOP = FSub,
772 BAD_BINOP
773 };
774
775private:
776 template <unsigned Offset>
777 using AtomicOrderingBitfieldElement =
778 typename Bitfield::Element<AtomicOrdering, Offset, 3,
779 AtomicOrdering::LAST>;
780
781 template <unsigned Offset>
782 using BinOpBitfieldElement =
783 typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>;
784
785public:
786 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
787 AtomicOrdering Ordering, SyncScope::ID SSID,
788 Instruction *InsertBefore = nullptr);
789 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
790 AtomicOrdering Ordering, SyncScope::ID SSID,
791 BasicBlock *InsertAtEnd);
792
793 // allocate space for exactly two operands
794 void *operator new(size_t s) {
795 return User::operator new(s, 2);
796 }
797
798 using VolatileField = BoolBitfieldElementT<0>;
799 using AtomicOrderingField =
800 AtomicOrderingBitfieldElementT<VolatileField::NextBit>;
801 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
802 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>;
803 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField,
804 OperationField, AlignmentField>(),
805 "Bitfields must be contiguous");
806
807 BinOp getOperation() const { return getSubclassData<OperationField>(); }
808
809 static StringRef getOperationName(BinOp Op);
810
811 static bool isFPOperation(BinOp Op) {
812 switch (Op) {
813 case AtomicRMWInst::FAdd:
814 case AtomicRMWInst::FSub:
815 return true;
816 default:
817 return false;
818 }
819 }
820
821 void setOperation(BinOp Operation) {
822 setSubclassData<OperationField>(Operation);
823 }
824
825 /// Return the alignment of the memory that is being allocated by the
826 /// instruction.
827 Align getAlign() const {
828 return Align(1ULL << getSubclassData<AlignmentField>());
829 }
830
831 void setAlignment(Align Align) {
832 setSubclassData<AlignmentField>(Log2(Align));
833 }
834
835 /// Return true if this is a RMW on a volatile memory location.
836 ///
837 bool isVolatile() const { return getSubclassData<VolatileField>(); }
838
839 /// Specify whether this is a volatile RMW or not.
840 ///
841 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
842
843 /// Transparently provide more efficient getOperand methods.
844 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
845
846 /// Returns the ordering constraint of this rmw instruction.
847 AtomicOrdering getOrdering() const {
848 return getSubclassData<AtomicOrderingField>();
849 }
850
851 /// Sets the ordering constraint of this rmw instruction.
852 void setOrdering(AtomicOrdering Ordering) {
853 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 854, __extension__ __PRETTY_FUNCTION__))
854 "atomicrmw instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 854, __extension__ __PRETTY_FUNCTION__))
;
855 setSubclassData<AtomicOrderingField>(Ordering);
856 }
857
858 /// Returns the synchronization scope ID of this rmw instruction.
859 SyncScope::ID getSyncScopeID() const {
860 return SSID;
861 }
862
863 /// Sets the synchronization scope ID of this rmw instruction.
864 void setSyncScopeID(SyncScope::ID SSID) {
865 this->SSID = SSID;
866 }
867
868 Value *getPointerOperand() { return getOperand(0); }
869 const Value *getPointerOperand() const { return getOperand(0); }
870 static unsigned getPointerOperandIndex() { return 0U; }
871
872 Value *getValOperand() { return getOperand(1); }
873 const Value *getValOperand() const { return getOperand(1); }
874
875 /// Returns the address space of the pointer operand.
876 unsigned getPointerAddressSpace() const {
877 return getPointerOperand()->getType()->getPointerAddressSpace();
878 }
879
880 bool isFloatingPointOperation() const {
881 return isFPOperation(getOperation());
882 }
883
884 // Methods for support type inquiry through isa, cast, and dyn_cast:
885 static bool classof(const Instruction *I) {
886 return I->getOpcode() == Instruction::AtomicRMW;
887 }
888 static bool classof(const Value *V) {
889 return isa<Instruction>(V) && classof(cast<Instruction>(V));
890 }
891
892private:
893 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
894 AtomicOrdering Ordering, SyncScope::ID SSID);
895
896 // Shadow Instruction::setInstructionSubclassData with a private forwarding
897 // method so that subclasses cannot accidentally use it.
898 template <typename Bitfield>
899 void setSubclassData(typename Bitfield::Type Value) {
900 Instruction::setSubclassData<Bitfield>(Value);
901 }
902
903 /// The synchronization scope ID of this rmw instruction. Not quite enough
904 /// room in SubClassData for everything, so synchronization scope ID gets its
905 /// own field.
906 SyncScope::ID SSID;
907};
908
909template <>
910struct OperandTraits<AtomicRMWInst>
911 : public FixedNumOperandTraits<AtomicRMWInst,2> {
912};
913
914DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 914, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<AtomicRMWInst>::op_begin(const_cast
<AtomicRMWInst*>(this))[i_nocapture].get()); } void AtomicRMWInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<AtomicRMWInst
>::operands(this) && "setOperand() out of range!")
? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 914, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
AtomicRMWInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned AtomicRMWInst::getNumOperands() const { return OperandTraits
<AtomicRMWInst>::operands(this); } template <int Idx_nocapture
> Use &AtomicRMWInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &AtomicRMWInst::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
915
916//===----------------------------------------------------------------------===//
917// GetElementPtrInst Class
918//===----------------------------------------------------------------------===//
919
920// checkGEPType - Simple wrapper function to give a better assertion failure
921// message on bad indexes for a gep instruction.
922//
923inline Type *checkGEPType(Type *Ty) {
924 assert(Ty && "Invalid GetElementPtrInst indices for type!")(static_cast <bool> (Ty && "Invalid GetElementPtrInst indices for type!"
) ? void (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 924, __extension__ __PRETTY_FUNCTION__))
;
925 return Ty;
926}
927
928/// an instruction for type-safe pointer arithmetic to
929/// access elements of arrays and structs
930///
931class GetElementPtrInst : public Instruction {
932 Type *SourceElementType;
933 Type *ResultElementType;
934
935 GetElementPtrInst(const GetElementPtrInst &GEPI);
936
937 /// Constructors - Create a getelementptr instruction with a base pointer an
938 /// list of indices. The first ctor can optionally insert before an existing
939 /// instruction, the second appends the new instruction to the specified
940 /// BasicBlock.
941 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
942 ArrayRef<Value *> IdxList, unsigned Values,
943 const Twine &NameStr, Instruction *InsertBefore);
944 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
945 ArrayRef<Value *> IdxList, unsigned Values,
946 const Twine &NameStr, BasicBlock *InsertAtEnd);
947
948 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
949
950protected:
951 // Note: Instruction needs to be a friend here to call cloneImpl.
952 friend class Instruction;
953
954 GetElementPtrInst *cloneImpl() const;
955
956public:
957 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
958 ArrayRef<Value *> IdxList,
959 const Twine &NameStr = "",
960 Instruction *InsertBefore = nullptr) {
961 unsigned Values = 1 + unsigned(IdxList.size());
962 if (!PointeeType
15.1
'PointeeType' is null
15.1
'PointeeType' is null
) {
16
Taking true branch
963 PointeeType =
964 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
17
Called C++ object pointer is null
965 } else {
966 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 967, __extension__ __PRETTY_FUNCTION__))
967 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 967, __extension__ __PRETTY_FUNCTION__))
;
968 }
969 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
970 NameStr, InsertBefore);
971 }
972
973 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
974 ArrayRef<Value *> IdxList,
975 const Twine &NameStr,
976 BasicBlock *InsertAtEnd) {
977 unsigned Values = 1 + unsigned(IdxList.size());
978 if (!PointeeType) {
979 PointeeType =
980 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
981 } else {
982 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 983, __extension__ __PRETTY_FUNCTION__))
983 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 983, __extension__ __PRETTY_FUNCTION__))
;
984 }
985 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
986 NameStr, InsertAtEnd);
987 }
988
989 /// Create an "inbounds" getelementptr. See the documentation for the
990 /// "inbounds" flag in LangRef.html for details.
991 static GetElementPtrInst *CreateInBounds(Value *Ptr,
992 ArrayRef<Value *> IdxList,
993 const Twine &NameStr = "",
994 Instruction *InsertBefore = nullptr){
995 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore);
996 }
997
998 static GetElementPtrInst *
999 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
1000 const Twine &NameStr = "",
1001 Instruction *InsertBefore = nullptr) {
1002 GetElementPtrInst *GEP =
1003 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
1004 GEP->setIsInBounds(true);
1005 return GEP;
1006 }
1007
1008 static GetElementPtrInst *CreateInBounds(Value *Ptr,
1009 ArrayRef<Value *> IdxList,
1010 const Twine &NameStr,
1011 BasicBlock *InsertAtEnd) {
1012 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd);
1013 }
1014
1015 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
1016 ArrayRef<Value *> IdxList,
1017 const Twine &NameStr,
1018 BasicBlock *InsertAtEnd) {
1019 GetElementPtrInst *GEP =
1020 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
1021 GEP->setIsInBounds(true);
1022 return GEP;
1023 }
1024
1025 /// Transparently provide more efficient getOperand methods.
1026 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1027
1028 Type *getSourceElementType() const { return SourceElementType; }
1029
1030 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1031 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1032
1033 Type *getResultElementType() const {
1034 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1035, __extension__ __PRETTY_FUNCTION__))
1035 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1035, __extension__ __PRETTY_FUNCTION__))
;
1036 return ResultElementType;
1037 }
1038
1039 /// Returns the address space of this instruction's pointer type.
1040 unsigned getAddressSpace() const {
1041 // Note that this is always the same as the pointer operand's address space
1042 // and that is cheaper to compute, so cheat here.
1043 return getPointerAddressSpace();
1044 }
1045
1046 /// Returns the result type of a getelementptr with the given source
1047 /// element type and indexes.
1048 ///
1049 /// Null is returned if the indices are invalid for the specified
1050 /// source element type.
1051 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1052 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1053 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1054
1055 /// Return the type of the element at the given index of an indexable
1056 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1057 ///
1058 /// Returns null if the type can't be indexed, or the given index is not
1059 /// legal for the given type.
1060 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1061 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1062
1063 inline op_iterator idx_begin() { return op_begin()+1; }
1064 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1065 inline op_iterator idx_end() { return op_end(); }
1066 inline const_op_iterator idx_end() const { return op_end(); }
1067
1068 inline iterator_range<op_iterator> indices() {
1069 return make_range(idx_begin(), idx_end());
1070 }
1071
1072 inline iterator_range<const_op_iterator> indices() const {
1073 return make_range(idx_begin(), idx_end());
1074 }
1075
1076 Value *getPointerOperand() {
1077 return getOperand(0);
1078 }
1079 const Value *getPointerOperand() const {
1080 return getOperand(0);
1081 }
1082 static unsigned getPointerOperandIndex() {
1083 return 0U; // get index for modifying correct operand.
1084 }
1085
1086 /// Method to return the pointer operand as a
1087 /// PointerType.
1088 Type *getPointerOperandType() const {
1089 return getPointerOperand()->getType();
1090 }
1091
1092 /// Returns the address space of the pointer operand.
1093 unsigned getPointerAddressSpace() const {
1094 return getPointerOperandType()->getPointerAddressSpace();
1095 }
1096
1097 /// Returns the pointer type returned by the GEP
1098 /// instruction, which may be a vector of pointers.
1099 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1100 ArrayRef<Value *> IdxList) {
1101 Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)),
1102 Ptr->getType()->getPointerAddressSpace());
1103 // Vector GEP
1104 if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
1105 ElementCount EltCount = PtrVTy->getElementCount();
1106 return VectorType::get(PtrTy, EltCount);
1107 }
1108 for (Value *Index : IdxList)
1109 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1110 ElementCount EltCount = IndexVTy->getElementCount();
1111 return VectorType::get(PtrTy, EltCount);
1112 }
1113 // Scalar GEP
1114 return PtrTy;
1115 }
1116
1117 unsigned getNumIndices() const { // Note: always non-negative
1118 return getNumOperands() - 1;
1119 }
1120
1121 bool hasIndices() const {
1122 return getNumOperands() > 1;
1123 }
1124
1125 /// Return true if all of the indices of this GEP are
1126 /// zeros. If so, the result pointer and the first operand have the same
1127 /// value, just potentially different types.
1128 bool hasAllZeroIndices() const;
1129
1130 /// Return true if all of the indices of this GEP are
1131 /// constant integers. If so, the result pointer and the first operand have
1132 /// a constant offset between them.
1133 bool hasAllConstantIndices() const;
1134
1135 /// Set or clear the inbounds flag on this GEP instruction.
1136 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1137 void setIsInBounds(bool b = true);
1138
1139 /// Determine whether the GEP has the inbounds flag.
1140 bool isInBounds() const;
1141
1142 /// Accumulate the constant address offset of this GEP if possible.
1143 ///
1144 /// This routine accepts an APInt into which it will accumulate the constant
1145 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1146 /// all-constant, it returns false and the value of the offset APInt is
1147 /// undefined (it is *not* preserved!). The APInt passed into this routine
1148 /// must be at least as wide as the IntPtr type for the address space of
1149 /// the base GEP pointer.
1150 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1151
1152 // Methods for support type inquiry through isa, cast, and dyn_cast:
1153 static bool classof(const Instruction *I) {
1154 return (I->getOpcode() == Instruction::GetElementPtr);
1155 }
1156 static bool classof(const Value *V) {
1157 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1158 }
1159};
1160
1161template <>
1162struct OperandTraits<GetElementPtrInst> :
1163 public VariadicOperandTraits<GetElementPtrInst, 1> {
1164};
1165
1166GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1167 ArrayRef<Value *> IdxList, unsigned Values,
1168 const Twine &NameStr,
1169 Instruction *InsertBefore)
1170 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1171 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1172 Values, InsertBefore),
1173 SourceElementType(PointeeType),
1174 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1175 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1176, __extension__ __PRETTY_FUNCTION__))
1176 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1176, __extension__ __PRETTY_FUNCTION__))
;
1177 init(Ptr, IdxList, NameStr);
1178}
1179
1180GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1181 ArrayRef<Value *> IdxList, unsigned Values,
1182 const Twine &NameStr,
1183 BasicBlock *InsertAtEnd)
1184 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1185 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1186 Values, InsertAtEnd),
1187 SourceElementType(PointeeType),
1188 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1189 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1190, __extension__ __PRETTY_FUNCTION__))
1190 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1190, __extension__ __PRETTY_FUNCTION__))
;
1191 init(Ptr, IdxList, NameStr);
1192}
1193
1194DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<GetElementPtrInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1194, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<GetElementPtrInst>::op_begin
(const_cast<GetElementPtrInst*>(this))[i_nocapture].get
()); } void GetElementPtrInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<GetElementPtrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1194, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
GetElementPtrInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned GetElementPtrInst::getNumOperands() const { return
OperandTraits<GetElementPtrInst>::operands(this); } template
<int Idx_nocapture> Use &GetElementPtrInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &GetElementPtrInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1195
1196//===----------------------------------------------------------------------===//
1197// ICmpInst Class
1198//===----------------------------------------------------------------------===//
1199
1200/// This instruction compares its operands according to the predicate given
1201/// to the constructor. It only operates on integers or pointers. The operands
1202/// must be identical types.
1203/// Represent an integer comparison operator.
1204class ICmpInst: public CmpInst {
1205 void AssertOK() {
1206 assert(isIntPredicate() &&(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1207, __extension__ __PRETTY_FUNCTION__))
1207 "Invalid ICmp predicate value")(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1207, __extension__ __PRETTY_FUNCTION__))
;
1208 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1209, __extension__ __PRETTY_FUNCTION__))
1209 "Both operands to ICmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1209, __extension__ __PRETTY_FUNCTION__))
;
1210 // Check that the operands are the right type
1211 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1213, __extension__ __PRETTY_FUNCTION__))
1212 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1213, __extension__ __PRETTY_FUNCTION__))
1213 "Invalid operand types for ICmp instruction")(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1213, __extension__ __PRETTY_FUNCTION__))
;
1214 }
1215
1216protected:
1217 // Note: Instruction needs to be a friend here to call cloneImpl.
1218 friend class Instruction;
1219
1220 /// Clone an identical ICmpInst
1221 ICmpInst *cloneImpl() const;
1222
1223public:
1224 /// Constructor with insert-before-instruction semantics.
1225 ICmpInst(
1226 Instruction *InsertBefore, ///< Where to insert
1227 Predicate pred, ///< The predicate to use for the comparison
1228 Value *LHS, ///< The left-hand-side of the expression
1229 Value *RHS, ///< The right-hand-side of the expression
1230 const Twine &NameStr = "" ///< Name of the instruction
1231 ) : CmpInst(makeCmpResultType(LHS->getType()),
1232 Instruction::ICmp, pred, LHS, RHS, NameStr,
1233 InsertBefore) {
1234#ifndef NDEBUG
1235 AssertOK();
1236#endif
1237 }
1238
1239 /// Constructor with insert-at-end semantics.
1240 ICmpInst(
1241 BasicBlock &InsertAtEnd, ///< Block to insert into.
1242 Predicate pred, ///< The predicate to use for the comparison
1243 Value *LHS, ///< The left-hand-side of the expression
1244 Value *RHS, ///< The right-hand-side of the expression
1245 const Twine &NameStr = "" ///< Name of the instruction
1246 ) : CmpInst(makeCmpResultType(LHS->getType()),
1247 Instruction::ICmp, pred, LHS, RHS, NameStr,
1248 &InsertAtEnd) {
1249#ifndef NDEBUG
1250 AssertOK();
1251#endif
1252 }
1253
1254 /// Constructor with no-insertion semantics
1255 ICmpInst(
1256 Predicate pred, ///< The predicate to use for the comparison
1257 Value *LHS, ///< The left-hand-side of the expression
1258 Value *RHS, ///< The right-hand-side of the expression
1259 const Twine &NameStr = "" ///< Name of the instruction
1260 ) : CmpInst(makeCmpResultType(LHS->getType()),
1261 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1262#ifndef NDEBUG
1263 AssertOK();
1264#endif
1265 }
1266
1267 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1268 /// @returns the predicate that would be the result if the operand were
1269 /// regarded as signed.
1270 /// Return the signed version of the predicate
1271 Predicate getSignedPredicate() const {
1272 return getSignedPredicate(getPredicate());
1273 }
1274
1275 /// This is a static version that you can use without an instruction.
1276 /// Return the signed version of the predicate.
1277 static Predicate getSignedPredicate(Predicate pred);
1278
1279 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1280 /// @returns the predicate that would be the result if the operand were
1281 /// regarded as unsigned.
1282 /// Return the unsigned version of the predicate
1283 Predicate getUnsignedPredicate() const {
1284 return getUnsignedPredicate(getPredicate());
1285 }
1286
1287 /// This is a static version that you can use without an instruction.
1288 /// Return the unsigned version of the predicate.
1289 static Predicate getUnsignedPredicate(Predicate pred);
1290
1291 /// Return true if this predicate is either EQ or NE. This also
1292 /// tests for commutativity.
1293 static bool isEquality(Predicate P) {
1294 return P == ICMP_EQ || P == ICMP_NE;
1295 }
1296
1297 /// Return true if this predicate is either EQ or NE. This also
1298 /// tests for commutativity.
1299 bool isEquality() const {
1300 return isEquality(getPredicate());
1301 }
1302
1303 /// @returns true if the predicate of this ICmpInst is commutative
1304 /// Determine if this relation is commutative.
1305 bool isCommutative() const { return isEquality(); }
1306
1307 /// Return true if the predicate is relational (not EQ or NE).
1308 ///
1309 bool isRelational() const {
1310 return !isEquality();
1311 }
1312
1313 /// Return true if the predicate is relational (not EQ or NE).
1314 ///
1315 static bool isRelational(Predicate P) {
1316 return !isEquality(P);
1317 }
1318
1319 /// Return true if the predicate is SGT or UGT.
1320 ///
1321 static bool isGT(Predicate P) {
1322 return P == ICMP_SGT || P == ICMP_UGT;
1323 }
1324
1325 /// Return true if the predicate is SLT or ULT.
1326 ///
1327 static bool isLT(Predicate P) {
1328 return P == ICMP_SLT || P == ICMP_ULT;
1329 }
1330
1331 /// Return true if the predicate is SGE or UGE.
1332 ///
1333 static bool isGE(Predicate P) {
1334 return P == ICMP_SGE || P == ICMP_UGE;
1335 }
1336
1337 /// Return true if the predicate is SLE or ULE.
1338 ///
1339 static bool isLE(Predicate P) {
1340 return P == ICMP_SLE || P == ICMP_ULE;
1341 }
1342
1343 /// Exchange the two operands to this instruction in such a way that it does
1344 /// not modify the semantics of the instruction. The predicate value may be
1345 /// changed to retain the same result if the predicate is order dependent
1346 /// (e.g. ult).
1347 /// Swap operands and adjust predicate.
1348 void swapOperands() {
1349 setPredicate(getSwappedPredicate());
1350 Op<0>().swap(Op<1>());
1351 }
1352
1353 // Methods for support type inquiry through isa, cast, and dyn_cast:
1354 static bool classof(const Instruction *I) {
1355 return I->getOpcode() == Instruction::ICmp;
1356 }
1357 static bool classof(const Value *V) {
1358 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1359 }
1360};
1361
1362//===----------------------------------------------------------------------===//
1363// FCmpInst Class
1364//===----------------------------------------------------------------------===//
1365
1366/// This instruction compares its operands according to the predicate given
1367/// to the constructor. It only operates on floating point values or packed
1368/// vectors of floating point values. The operands must be identical types.
1369/// Represents a floating point comparison operator.
1370class FCmpInst: public CmpInst {
1371 void AssertOK() {
1372 assert(isFPPredicate() && "Invalid FCmp predicate value")(static_cast <bool> (isFPPredicate() && "Invalid FCmp predicate value"
) ? void (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1372, __extension__ __PRETTY_FUNCTION__))
;
1373 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1374, __extension__ __PRETTY_FUNCTION__))
1374 "Both operands to FCmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1374, __extension__ __PRETTY_FUNCTION__))
;
1375 // Check that the operands are the right type
1376 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1377, __extension__ __PRETTY_FUNCTION__))
1377 "Invalid operand types for FCmp instruction")(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1377, __extension__ __PRETTY_FUNCTION__))
;
1378 }
1379
1380protected:
1381 // Note: Instruction needs to be a friend here to call cloneImpl.
1382 friend class Instruction;
1383
1384 /// Clone an identical FCmpInst
1385 FCmpInst *cloneImpl() const;
1386
1387public:
1388 /// Constructor with insert-before-instruction semantics.
1389 FCmpInst(
1390 Instruction *InsertBefore, ///< Where to insert
1391 Predicate pred, ///< The predicate to use for the comparison
1392 Value *LHS, ///< The left-hand-side of the expression
1393 Value *RHS, ///< The right-hand-side of the expression
1394 const Twine &NameStr = "" ///< Name of the instruction
1395 ) : CmpInst(makeCmpResultType(LHS->getType()),
1396 Instruction::FCmp, pred, LHS, RHS, NameStr,
1397 InsertBefore) {
1398 AssertOK();
1399 }
1400
1401 /// Constructor with insert-at-end semantics.
1402 FCmpInst(
1403 BasicBlock &InsertAtEnd, ///< Block to insert into.
1404 Predicate pred, ///< The predicate to use for the comparison
1405 Value *LHS, ///< The left-hand-side of the expression
1406 Value *RHS, ///< The right-hand-side of the expression
1407 const Twine &NameStr = "" ///< Name of the instruction
1408 ) : CmpInst(makeCmpResultType(LHS->getType()),
1409 Instruction::FCmp, pred, LHS, RHS, NameStr,
1410 &InsertAtEnd) {
1411 AssertOK();
1412 }
1413
1414 /// Constructor with no-insertion semantics
1415 FCmpInst(
1416 Predicate Pred, ///< The predicate to use for the comparison
1417 Value *LHS, ///< The left-hand-side of the expression
1418 Value *RHS, ///< The right-hand-side of the expression
1419 const Twine &NameStr = "", ///< Name of the instruction
1420 Instruction *FlagsSource = nullptr
1421 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1422 RHS, NameStr, nullptr, FlagsSource) {
1423 AssertOK();
1424 }
1425
1426 /// @returns true if the predicate of this instruction is EQ or NE.
1427 /// Determine if this is an equality predicate.
1428 static bool isEquality(Predicate Pred) {
1429 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1430 Pred == FCMP_UNE;
1431 }
1432
1433 /// @returns true if the predicate of this instruction is EQ or NE.
1434 /// Determine if this is an equality predicate.
1435 bool isEquality() const { return isEquality(getPredicate()); }
1436
1437 /// @returns true if the predicate of this instruction is commutative.
1438 /// Determine if this is a commutative predicate.
1439 bool isCommutative() const {
1440 return isEquality() ||
1441 getPredicate() == FCMP_FALSE ||
1442 getPredicate() == FCMP_TRUE ||
1443 getPredicate() == FCMP_ORD ||
1444 getPredicate() == FCMP_UNO;
1445 }
1446
1447 /// @returns true if the predicate is relational (not EQ or NE).
1448 /// Determine if this a relational predicate.
1449 bool isRelational() const { return !isEquality(); }
1450
1451 /// Exchange the two operands to this instruction in such a way that it does
1452 /// not modify the semantics of the instruction. The predicate value may be
1453 /// changed to retain the same result if the predicate is order dependent
1454 /// (e.g. ult).
1455 /// Swap operands and adjust predicate.
1456 void swapOperands() {
1457 setPredicate(getSwappedPredicate());
1458 Op<0>().swap(Op<1>());
1459 }
1460
1461 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1462 static bool classof(const Instruction *I) {
1463 return I->getOpcode() == Instruction::FCmp;
1464 }
1465 static bool classof(const Value *V) {
1466 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1467 }
1468};
1469
1470//===----------------------------------------------------------------------===//
1471/// This class represents a function call, abstracting a target
1472/// machine's calling convention. This class uses low bit of the SubClassData
1473/// field to indicate whether or not this is a tail call. The rest of the bits
1474/// hold the calling convention of the call.
1475///
1476class CallInst : public CallBase {
1477 CallInst(const CallInst &CI);
1478
1479 /// Construct a CallInst given a range of arguments.
1480 /// Construct a CallInst from a range of arguments
1481 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1482 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1483 Instruction *InsertBefore);
1484
1485 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1486 const Twine &NameStr, Instruction *InsertBefore)
1487 : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {}
1488
1489 /// Construct a CallInst given a range of arguments.
1490 /// Construct a CallInst from a range of arguments
1491 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1492 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1493 BasicBlock *InsertAtEnd);
1494
1495 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1496 Instruction *InsertBefore);
1497
1498 CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1499 BasicBlock *InsertAtEnd);
1500
1501 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1502 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1503 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1504
1505 /// Compute the number of operands to allocate.
1506 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1507 // We need one operand for the called function, plus the input operand
1508 // counts provided.
1509 return 1 + NumArgs + NumBundleInputs;
1510 }
1511
1512protected:
1513 // Note: Instruction needs to be a friend here to call cloneImpl.
1514 friend class Instruction;
1515
1516 CallInst *cloneImpl() const;
1517
1518public:
1519 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1520 Instruction *InsertBefore = nullptr) {
1521 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1522 }
1523
1524 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1525 const Twine &NameStr,
1526 Instruction *InsertBefore = nullptr) {
1527 return new (ComputeNumOperands(Args.size()))
1528 CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1529 }
1530
1531 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1532 ArrayRef<OperandBundleDef> Bundles = None,
1533 const Twine &NameStr = "",
1534 Instruction *InsertBefore = nullptr) {
1535 const int NumOperands =
1536 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1537 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1538
1539 return new (NumOperands, DescriptorBytes)
1540 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1541 }
1542
1543 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1544 BasicBlock *InsertAtEnd) {
1545 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1546 }
1547
1548 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1549 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1550 return new (ComputeNumOperands(Args.size()))
1551 CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd);
1552 }
1553
1554 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1555 ArrayRef<OperandBundleDef> Bundles,
1556 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1557 const int NumOperands =
1558 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1559 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1560
1561 return new (NumOperands, DescriptorBytes)
1562 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1563 }
1564
1565 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1566 Instruction *InsertBefore = nullptr) {
1567 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1568 InsertBefore);
1569 }
1570
1571 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1572 ArrayRef<OperandBundleDef> Bundles = None,
1573 const Twine &NameStr = "",
1574 Instruction *InsertBefore = nullptr) {
1575 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1576 NameStr, InsertBefore);
1577 }
1578
1579 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1580 const Twine &NameStr,
1581 Instruction *InsertBefore = nullptr) {
1582 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1583 InsertBefore);
1584 }
1585
1586 static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1587 BasicBlock *InsertAtEnd) {
1588 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1589 InsertAtEnd);
1590 }
1591
1592 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1593 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1594 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1595 InsertAtEnd);
1596 }
1597
1598 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1599 ArrayRef<OperandBundleDef> Bundles,
1600 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1601 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1602 NameStr, InsertAtEnd);
1603 }
1604
1605 /// Create a clone of \p CI with a different set of operand bundles and
1606 /// insert it before \p InsertPt.
1607 ///
1608 /// The returned call instruction is identical \p CI in every way except that
1609 /// the operand bundles for the new instruction are set to the operand bundles
1610 /// in \p Bundles.
1611 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1612 Instruction *InsertPt = nullptr);
1613
1614 /// Generate the IR for a call to malloc:
1615 /// 1. Compute the malloc call's argument as the specified type's size,
1616 /// possibly multiplied by the array size if the array size is not
1617 /// constant 1.
1618 /// 2. Call malloc with that argument.
1619 /// 3. Bitcast the result of the malloc call to the specified type.
1620 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1621 Type *AllocTy, Value *AllocSize,
1622 Value *ArraySize = nullptr,
1623 Function *MallocF = nullptr,
1624 const Twine &Name = "");
1625 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1626 Type *AllocTy, Value *AllocSize,
1627 Value *ArraySize = nullptr,
1628 Function *MallocF = nullptr,
1629 const Twine &Name = "");
1630 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1631 Type *AllocTy, Value *AllocSize,
1632 Value *ArraySize = nullptr,
1633 ArrayRef<OperandBundleDef> Bundles = None,
1634 Function *MallocF = nullptr,
1635 const Twine &Name = "");
1636 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1637 Type *AllocTy, Value *AllocSize,
1638 Value *ArraySize = nullptr,
1639 ArrayRef<OperandBundleDef> Bundles = None,
1640 Function *MallocF = nullptr,
1641 const Twine &Name = "");
1642 /// Generate the IR for a call to the builtin free function.
1643 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
1644 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
1645 static Instruction *CreateFree(Value *Source,
1646 ArrayRef<OperandBundleDef> Bundles,
1647 Instruction *InsertBefore);
1648 static Instruction *CreateFree(Value *Source,
1649 ArrayRef<OperandBundleDef> Bundles,
1650 BasicBlock *InsertAtEnd);
1651
1652 // Note that 'musttail' implies 'tail'.
1653 enum TailCallKind : unsigned {
1654 TCK_None = 0,
1655 TCK_Tail = 1,
1656 TCK_MustTail = 2,
1657 TCK_NoTail = 3,
1658 TCK_LAST = TCK_NoTail
1659 };
1660
1661 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>;
1662 static_assert(
1663 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1664 "Bitfields must be contiguous");
1665
1666 TailCallKind getTailCallKind() const {
1667 return getSubclassData<TailCallKindField>();
1668 }
1669
1670 bool isTailCall() const {
1671 TailCallKind Kind = getTailCallKind();
1672 return Kind == TCK_Tail || Kind == TCK_MustTail;
1673 }
1674
1675 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1676
1677 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1678
1679 void setTailCallKind(TailCallKind TCK) {
1680 setSubclassData<TailCallKindField>(TCK);
1681 }
1682
1683 void setTailCall(bool IsTc = true) {
1684 setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1685 }
1686
1687 /// Return true if the call can return twice
1688 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1689 void setCanReturnTwice() {
1690 addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice);
1691 }
1692
1693 // Methods for support type inquiry through isa, cast, and dyn_cast:
1694 static bool classof(const Instruction *I) {
1695 return I->getOpcode() == Instruction::Call;
1696 }
1697 static bool classof(const Value *V) {
1698 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1699 }
1700
1701 /// Updates profile metadata by scaling it by \p S / \p T.
1702 void updateProfWeight(uint64_t S, uint64_t T);
1703
1704private:
1705 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1706 // method so that subclasses cannot accidentally use it.
1707 template <typename Bitfield>
1708 void setSubclassData(typename Bitfield::Type Value) {
1709 Instruction::setSubclassData<Bitfield>(Value);
1710 }
1711};
1712
1713CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1714 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1715 BasicBlock *InsertAtEnd)
1716 : CallBase(Ty->getReturnType(), Instruction::Call,
1717 OperandTraits<CallBase>::op_end(this) -
1718 (Args.size() + CountBundleInputs(Bundles) + 1),
1719 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1720 InsertAtEnd) {
1721 init(Ty, Func, Args, Bundles, NameStr);
1722}
1723
1724CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1725 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1726 Instruction *InsertBefore)
1727 : CallBase(Ty->getReturnType(), Instruction::Call,
1728 OperandTraits<CallBase>::op_end(this) -
1729 (Args.size() + CountBundleInputs(Bundles) + 1),
1730 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1731 InsertBefore) {
1732 init(Ty, Func, Args, Bundles, NameStr);
1733}
1734
1735//===----------------------------------------------------------------------===//
1736// SelectInst Class
1737//===----------------------------------------------------------------------===//
1738
1739/// This class represents the LLVM 'select' instruction.
1740///
1741class SelectInst : public Instruction {
1742 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1743 Instruction *InsertBefore)
1744 : Instruction(S1->getType(), Instruction::Select,
1745 &Op<0>(), 3, InsertBefore) {
1746 init(C, S1, S2);
1747 setName(NameStr);
1748 }
1749
1750 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1751 BasicBlock *InsertAtEnd)
1752 : Instruction(S1->getType(), Instruction::Select,
1753 &Op<0>(), 3, InsertAtEnd) {
1754 init(C, S1, S2);
1755 setName(NameStr);
1756 }
1757
1758 void init(Value *C, Value *S1, Value *S2) {
1759 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")(static_cast <bool> (!areInvalidOperands(C, S1, S2) &&
"Invalid operands for select") ? void (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1759, __extension__ __PRETTY_FUNCTION__))
;
1760 Op<0>() = C;
1761 Op<1>() = S1;
1762 Op<2>() = S2;
1763 }
1764
1765protected:
1766 // Note: Instruction needs to be a friend here to call cloneImpl.
1767 friend class Instruction;
1768
1769 SelectInst *cloneImpl() const;
1770
1771public:
1772 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1773 const Twine &NameStr = "",
1774 Instruction *InsertBefore = nullptr,
1775 Instruction *MDFrom = nullptr) {
1776 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1777 if (MDFrom)
1778 Sel->copyMetadata(*MDFrom);
1779 return Sel;
1780 }
1781
1782 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1783 const Twine &NameStr,
1784 BasicBlock *InsertAtEnd) {
1785 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1786 }
1787
1788 const Value *getCondition() const { return Op<0>(); }
1789 const Value *getTrueValue() const { return Op<1>(); }
1790 const Value *getFalseValue() const { return Op<2>(); }
1791 Value *getCondition() { return Op<0>(); }
1792 Value *getTrueValue() { return Op<1>(); }
1793 Value *getFalseValue() { return Op<2>(); }
1794
1795 void setCondition(Value *V) { Op<0>() = V; }
1796 void setTrueValue(Value *V) { Op<1>() = V; }
1797 void setFalseValue(Value *V) { Op<2>() = V; }
1798
1799 /// Swap the true and false values of the select instruction.
1800 /// This doesn't swap prof metadata.
1801 void swapValues() { Op<1>().swap(Op<2>()); }
1802
1803 /// Return a string if the specified operands are invalid
1804 /// for a select operation, otherwise return null.
1805 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1806
1807 /// Transparently provide more efficient getOperand methods.
1808 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1809
1810 OtherOps getOpcode() const {
1811 return static_cast<OtherOps>(Instruction::getOpcode());
1812 }
1813
1814 // Methods for support type inquiry through isa, cast, and dyn_cast:
1815 static bool classof(const Instruction *I) {
1816 return I->getOpcode() == Instruction::Select;
1817 }
1818 static bool classof(const Value *V) {
1819 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1820 }
1821};
1822
1823template <>
1824struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1825};
1826
1827DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits
<SelectInst>::op_begin(this); } SelectInst::const_op_iterator
SelectInst::op_begin() const { return OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this)); } SelectInst
::op_iterator SelectInst::op_end() { return OperandTraits<
SelectInst>::op_end(this); } SelectInst::const_op_iterator
SelectInst::op_end() const { return OperandTraits<SelectInst
>::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SelectInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1827, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<SelectInst>::op_begin(const_cast
<SelectInst*>(this))[i_nocapture].get()); } void SelectInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<SelectInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1827, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
SelectInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned SelectInst::getNumOperands() const { return OperandTraits
<SelectInst>::operands(this); } template <int Idx_nocapture
> Use &SelectInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SelectInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
1828
1829//===----------------------------------------------------------------------===//
1830// VAArgInst Class
1831//===----------------------------------------------------------------------===//
1832
1833/// This class represents the va_arg llvm instruction, which returns
1834/// an argument of the specified type given a va_list and increments that list
1835///
1836class VAArgInst : public UnaryInstruction {
1837protected:
1838 // Note: Instruction needs to be a friend here to call cloneImpl.
1839 friend class Instruction;
1840
1841 VAArgInst *cloneImpl() const;
1842
1843public:
1844 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1845 Instruction *InsertBefore = nullptr)
1846 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1847 setName(NameStr);
1848 }
1849
1850 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1851 BasicBlock *InsertAtEnd)
1852 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1853 setName(NameStr);
1854 }
1855
1856 Value *getPointerOperand() { return getOperand(0); }
1857 const Value *getPointerOperand() const { return getOperand(0); }
1858 static unsigned getPointerOperandIndex() { return 0U; }
1859
1860 // Methods for support type inquiry through isa, cast, and dyn_cast:
1861 static bool classof(const Instruction *I) {
1862 return I->getOpcode() == VAArg;
1863 }
1864 static bool classof(const Value *V) {
1865 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1866 }
1867};
1868
1869//===----------------------------------------------------------------------===//
1870// ExtractElementInst Class
1871//===----------------------------------------------------------------------===//
1872
1873/// This instruction extracts a single (scalar)
1874/// element from a VectorType value
1875///
1876class ExtractElementInst : public Instruction {
1877 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1878 Instruction *InsertBefore = nullptr);
1879 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1880 BasicBlock *InsertAtEnd);
1881
1882protected:
1883 // Note: Instruction needs to be a friend here to call cloneImpl.
1884 friend class Instruction;
1885
1886 ExtractElementInst *cloneImpl() const;
1887
1888public:
1889 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1890 const Twine &NameStr = "",
1891 Instruction *InsertBefore = nullptr) {
1892 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1893 }
1894
1895 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1896 const Twine &NameStr,
1897 BasicBlock *InsertAtEnd) {
1898 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1899 }
1900
1901 /// Return true if an extractelement instruction can be
1902 /// formed with the specified operands.
1903 static bool isValidOperands(const Value *Vec, const Value *Idx);
1904
1905 Value *getVectorOperand() { return Op<0>(); }
1906 Value *getIndexOperand() { return Op<1>(); }
1907 const Value *getVectorOperand() const { return Op<0>(); }
1908 const Value *getIndexOperand() const { return Op<1>(); }
1909
1910 VectorType *getVectorOperandType() const {
1911 return cast<VectorType>(getVectorOperand()->getType());
1912 }
1913
1914 /// Transparently provide more efficient getOperand methods.
1915 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1916
1917 // Methods for support type inquiry through isa, cast, and dyn_cast:
1918 static bool classof(const Instruction *I) {
1919 return I->getOpcode() == Instruction::ExtractElement;
1920 }
1921 static bool classof(const Value *V) {
1922 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1923 }
1924};
1925
1926template <>
1927struct OperandTraits<ExtractElementInst> :
1928 public FixedNumOperandTraits<ExtractElementInst, 2> {
1929};
1930
1931DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin(
) { return OperandTraits<ExtractElementInst>::op_begin(
this); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_begin() const { return OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this)); }
ExtractElementInst::op_iterator ExtractElementInst::op_end()
{ return OperandTraits<ExtractElementInst>::op_end(this
); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_end() const { return OperandTraits<ExtractElementInst
>::op_end(const_cast<ExtractElementInst*>(this)); } Value
*ExtractElementInst::getOperand(unsigned i_nocapture) const {
(static_cast <bool> (i_nocapture < OperandTraits<
ExtractElementInst>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1931, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ExtractElementInst>::op_begin
(const_cast<ExtractElementInst*>(this))[i_nocapture].get
()); } void ExtractElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ExtractElementInst>::operands(this)
&& "setOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1931, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ExtractElementInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned ExtractElementInst::getNumOperands() const { return
OperandTraits<ExtractElementInst>::operands(this); } template
<int Idx_nocapture> Use &ExtractElementInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &ExtractElementInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1932
1933//===----------------------------------------------------------------------===//
1934// InsertElementInst Class
1935//===----------------------------------------------------------------------===//
1936
1937/// This instruction inserts a single (scalar)
1938/// element into a VectorType value
1939///
1940class InsertElementInst : public Instruction {
1941 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1942 const Twine &NameStr = "",
1943 Instruction *InsertBefore = nullptr);
1944 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1945 BasicBlock *InsertAtEnd);
1946
1947protected:
1948 // Note: Instruction needs to be a friend here to call cloneImpl.
1949 friend class Instruction;
1950
1951 InsertElementInst *cloneImpl() const;
1952
1953public:
1954 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1955 const Twine &NameStr = "",
1956 Instruction *InsertBefore = nullptr) {
1957 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1958 }
1959
1960 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1961 const Twine &NameStr,
1962 BasicBlock *InsertAtEnd) {
1963 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1964 }
1965
1966 /// Return true if an insertelement instruction can be
1967 /// formed with the specified operands.
1968 static bool isValidOperands(const Value *Vec, const Value *NewElt,
1969 const Value *Idx);
1970
1971 /// Overload to return most specific vector type.
1972 ///
1973 VectorType *getType() const {
1974 return cast<VectorType>(Instruction::getType());
1975 }
1976
1977 /// Transparently provide more efficient getOperand methods.
1978 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1979
1980 // Methods for support type inquiry through isa, cast, and dyn_cast:
1981 static bool classof(const Instruction *I) {
1982 return I->getOpcode() == Instruction::InsertElement;
1983 }
1984 static bool classof(const Value *V) {
1985 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1986 }
1987};
1988
1989template <>
1990struct OperandTraits<InsertElementInst> :
1991 public FixedNumOperandTraits<InsertElementInst, 3> {
1992};
1993
1994DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() {
return OperandTraits<InsertElementInst>::op_begin(this
); } InsertElementInst::const_op_iterator InsertElementInst::
op_begin() const { return OperandTraits<InsertElementInst>
::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst
::op_iterator InsertElementInst::op_end() { return OperandTraits
<InsertElementInst>::op_end(this); } InsertElementInst::
const_op_iterator InsertElementInst::op_end() const { return OperandTraits
<InsertElementInst>::op_end(const_cast<InsertElementInst
*>(this)); } Value *InsertElementInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<InsertElementInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1994, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<InsertElementInst>::op_begin
(const_cast<InsertElementInst*>(this))[i_nocapture].get
()); } void InsertElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<InsertElementInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1994, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
InsertElementInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned InsertElementInst::getNumOperands() const { return
OperandTraits<InsertElementInst>::operands(this); } template
<int Idx_nocapture> Use &InsertElementInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &InsertElementInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1995
1996//===----------------------------------------------------------------------===//
1997// ShuffleVectorInst Class
1998//===----------------------------------------------------------------------===//
1999
2000constexpr int UndefMaskElem = -1;
2001
2002/// This instruction constructs a fixed permutation of two
2003/// input vectors.
2004///
2005/// For each element of the result vector, the shuffle mask selects an element
2006/// from one of the input vectors to copy to the result. Non-negative elements
2007/// in the mask represent an index into the concatenated pair of input vectors.
2008/// UndefMaskElem (-1) specifies that the result element is undefined.
2009///
2010/// For scalable vectors, all the elements of the mask must be 0 or -1. This
2011/// requirement may be relaxed in the future.
2012class ShuffleVectorInst : public Instruction {
2013 SmallVector<int, 4> ShuffleMask;
2014 Constant *ShuffleMaskForBitcode;
2015
2016protected:
2017 // Note: Instruction needs to be a friend here to call cloneImpl.
2018 friend class Instruction;
2019
2020 ShuffleVectorInst *cloneImpl() const;
2021
2022public:
2023 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2024 const Twine &NameStr = "",
2025 Instruction *InsertBefor = nullptr);
2026 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2027 const Twine &NameStr, BasicBlock *InsertAtEnd);
2028 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2029 const Twine &NameStr = "",
2030 Instruction *InsertBefor = nullptr);
2031 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2032 const Twine &NameStr, BasicBlock *InsertAtEnd);
2033
2034 void *operator new(size_t s) { return User::operator new(s, 2); }
2035
2036 /// Swap the operands and adjust the mask to preserve the semantics
2037 /// of the instruction.
2038 void commute();
2039
2040 /// Return true if a shufflevector instruction can be
2041 /// formed with the specified operands.
2042 static bool isValidOperands(const Value *V1, const Value *V2,
2043 const Value *Mask);
2044 static bool isValidOperands(const Value *V1, const Value *V2,
2045 ArrayRef<int> Mask);
2046
2047 /// Overload to return most specific vector type.
2048 ///
2049 VectorType *getType() const {
2050 return cast<VectorType>(Instruction::getType());
2051 }
2052
2053 /// Transparently provide more efficient getOperand methods.
2054 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2055
2056 /// Return the shuffle mask value of this instruction for the given element
2057 /// index. Return UndefMaskElem if the element is undef.
2058 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2059
2060 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2061 /// elements of the mask are returned as UndefMaskElem.
2062 static void getShuffleMask(const Constant *Mask,
2063 SmallVectorImpl<int> &Result);
2064
2065 /// Return the mask for this instruction as a vector of integers. Undefined
2066 /// elements of the mask are returned as UndefMaskElem.
2067 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2068 Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2069 }
2070
2071 /// Return the mask for this instruction, for use in bitcode.
2072 ///
2073 /// TODO: This is temporary until we decide a new bitcode encoding for
2074 /// shufflevector.
2075 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2076
2077 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2078 Type *ResultTy);
2079
2080 void setShuffleMask(ArrayRef<int> Mask);
2081
2082 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2083
2084 /// Return true if this shuffle returns a vector with a different number of
2085 /// elements than its source vectors.
2086 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2087 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2088 bool changesLength() const {
2089 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2090 ->getElementCount()
2091 .getKnownMinValue();
2092 unsigned NumMaskElts = ShuffleMask.size();
2093 return NumSourceElts != NumMaskElts;
2094 }
2095
2096 /// Return true if this shuffle returns a vector with a greater number of
2097 /// elements than its source vectors.
2098 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2099 bool increasesLength() const {
2100 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2101 ->getElementCount()
2102 .getKnownMinValue();
2103 unsigned NumMaskElts = ShuffleMask.size();
2104 return NumSourceElts < NumMaskElts;
2105 }
2106
2107 /// Return true if this shuffle mask chooses elements from exactly one source
2108 /// vector.
2109 /// Example: <7,5,undef,7>
2110 /// This assumes that vector operands are the same length as the mask.
2111 static bool isSingleSourceMask(ArrayRef<int> Mask);
2112 static bool isSingleSourceMask(const Constant *Mask) {
2113 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2113, __extension__ __PRETTY_FUNCTION__))
;
2114 SmallVector<int, 16> MaskAsInts;
2115 getShuffleMask(Mask, MaskAsInts);
2116 return isSingleSourceMask(MaskAsInts);
2117 }
2118
2119 /// Return true if this shuffle chooses elements from exactly one source
2120 /// vector without changing the length of that vector.
2121 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2122 /// TODO: Optionally allow length-changing shuffles.
2123 bool isSingleSource() const {
2124 return !changesLength() && isSingleSourceMask(ShuffleMask);
2125 }
2126
2127 /// Return true if this shuffle mask chooses elements from exactly one source
2128 /// vector without lane crossings. A shuffle using this mask is not
2129 /// necessarily a no-op because it may change the number of elements from its
2130 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2131 /// Example: <undef,undef,2,3>
2132 static bool isIdentityMask(ArrayRef<int> Mask);
2133 static bool isIdentityMask(const Constant *Mask) {
2134 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2134, __extension__ __PRETTY_FUNCTION__))
;
2135 SmallVector<int, 16> MaskAsInts;
2136 getShuffleMask(Mask, MaskAsInts);
2137 return isIdentityMask(MaskAsInts);
2138 }
2139
2140 /// Return true if this shuffle chooses elements from exactly one source
2141 /// vector without lane crossings and does not change the number of elements
2142 /// from its input vectors.
2143 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2144 bool isIdentity() const {
2145 return !changesLength() && isIdentityMask(ShuffleMask);
2146 }
2147
2148 /// Return true if this shuffle lengthens exactly one source vector with
2149 /// undefs in the high elements.
2150 bool isIdentityWithPadding() const;
2151
2152 /// Return true if this shuffle extracts the first N elements of exactly one
2153 /// source vector.
2154 bool isIdentityWithExtract() const;
2155
2156 /// Return true if this shuffle concatenates its 2 source vectors. This
2157 /// returns false if either input is undefined. In that case, the shuffle is
2158 /// is better classified as an identity with padding operation.
2159 bool isConcat() const;
2160
2161 /// Return true if this shuffle mask chooses elements from its source vectors
2162 /// without lane crossings. A shuffle using this mask would be
2163 /// equivalent to a vector select with a constant condition operand.
2164 /// Example: <4,1,6,undef>
2165 /// This returns false if the mask does not choose from both input vectors.
2166 /// In that case, the shuffle is better classified as an identity shuffle.
2167 /// This assumes that vector operands are the same length as the mask
2168 /// (a length-changing shuffle can never be equivalent to a vector select).
2169 static bool isSelectMask(ArrayRef<int> Mask);
2170 static bool isSelectMask(const Constant *Mask) {
2171 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2171, __extension__ __PRETTY_FUNCTION__))
;
2172 SmallVector<int, 16> MaskAsInts;
2173 getShuffleMask(Mask, MaskAsInts);
2174 return isSelectMask(MaskAsInts);
2175 }
2176
2177 /// Return true if this shuffle chooses elements from its source vectors
2178 /// without lane crossings and all operands have the same number of elements.
2179 /// In other words, this shuffle is equivalent to a vector select with a
2180 /// constant condition operand.
2181 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2182 /// This returns false if the mask does not choose from both input vectors.
2183 /// In that case, the shuffle is better classified as an identity shuffle.
2184 /// TODO: Optionally allow length-changing shuffles.
2185 bool isSelect() const {
2186 return !changesLength() && isSelectMask(ShuffleMask);
2187 }
2188
2189 /// Return true if this shuffle mask swaps the order of elements from exactly
2190 /// one source vector.
2191 /// Example: <7,6,undef,4>
2192 /// This assumes that vector operands are the same length as the mask.
2193 static bool isReverseMask(ArrayRef<int> Mask);
2194 static bool isReverseMask(const Constant *Mask) {
2195 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2195, __extension__ __PRETTY_FUNCTION__))
;
2196 SmallVector<int, 16> MaskAsInts;
2197 getShuffleMask(Mask, MaskAsInts);
2198 return isReverseMask(MaskAsInts);
2199 }
2200
2201 /// Return true if this shuffle swaps the order of elements from exactly
2202 /// one source vector.
2203 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2204 /// TODO: Optionally allow length-changing shuffles.
2205 bool isReverse() const {
2206 return !changesLength() && isReverseMask(ShuffleMask);
2207 }
2208
2209 /// Return true if this shuffle mask chooses all elements with the same value
2210 /// as the first element of exactly one source vector.
2211 /// Example: <4,undef,undef,4>
2212 /// This assumes that vector operands are the same length as the mask.
2213 static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2214 static bool isZeroEltSplatMask(const Constant *Mask) {
2215 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2215, __extension__ __PRETTY_FUNCTION__))
;
2216 SmallVector<int, 16> MaskAsInts;
2217 getShuffleMask(Mask, MaskAsInts);
2218 return isZeroEltSplatMask(MaskAsInts);
2219 }
2220
2221 /// Return true if all elements of this shuffle are the same value as the
2222 /// first element of exactly one source vector without changing the length
2223 /// of that vector.
2224 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2225 /// TODO: Optionally allow length-changing shuffles.
2226 /// TODO: Optionally allow splats from other elements.
2227 bool isZeroEltSplat() const {
2228 return !changesLength() && isZeroEltSplatMask(ShuffleMask);
2229 }
2230
2231 /// Return true if this shuffle mask is a transpose mask.
2232 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2233 /// even- or odd-numbered vector elements from two n-dimensional source
2234 /// vectors and write each result into consecutive elements of an
2235 /// n-dimensional destination vector. Two shuffles are necessary to complete
2236 /// the transpose, one for the even elements and another for the odd elements.
2237 /// This description closely follows how the TRN1 and TRN2 AArch64
2238 /// instructions operate.
2239 ///
2240 /// For example, a simple 2x2 matrix can be transposed with:
2241 ///
2242 /// ; Original matrix
2243 /// m0 = < a, b >
2244 /// m1 = < c, d >
2245 ///
2246 /// ; Transposed matrix
2247 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2248 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2249 ///
2250 /// For matrices having greater than n columns, the resulting nx2 transposed
2251 /// matrix is stored in two result vectors such that one vector contains
2252 /// interleaved elements from all the even-numbered rows and the other vector
2253 /// contains interleaved elements from all the odd-numbered rows. For example,
2254 /// a 2x4 matrix can be transposed with:
2255 ///
2256 /// ; Original matrix
2257 /// m0 = < a, b, c, d >
2258 /// m1 = < e, f, g, h >
2259 ///
2260 /// ; Transposed matrix
2261 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2262 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2263 static bool isTransposeMask(ArrayRef<int> Mask);
2264 static bool isTransposeMask(const Constant *Mask) {
2265 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2265, __extension__ __PRETTY_FUNCTION__))
;
2266 SmallVector<int, 16> MaskAsInts;
2267 getShuffleMask(Mask, MaskAsInts);
2268 return isTransposeMask(MaskAsInts);
2269 }
2270
2271 /// Return true if this shuffle transposes the elements of its inputs without
2272 /// changing the length of the vectors. This operation may also be known as a
2273 /// merge or interleave. See the description for isTransposeMask() for the
2274 /// exact specification.
2275 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2276 bool isTranspose() const {
2277 return !changesLength() && isTransposeMask(ShuffleMask);
2278 }
2279
2280 /// Return true if this shuffle mask is an extract subvector mask.
2281 /// A valid extract subvector mask returns a smaller vector from a single
2282 /// source operand. The base extraction index is returned as well.
2283 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2284 int &Index);
2285 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2286 int &Index) {
2287 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2287, __extension__ __PRETTY_FUNCTION__))
;
2288 // Not possible to express a shuffle mask for a scalable vector for this
2289 // case.
2290 if (isa<ScalableVectorType>(Mask->getType()))
2291 return false;
2292 SmallVector<int, 16> MaskAsInts;
2293 getShuffleMask(Mask, MaskAsInts);
2294 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2295 }
2296
2297 /// Return true if this shuffle mask is an extract subvector mask.
2298 bool isExtractSubvectorMask(int &Index) const {
2299 // Not possible to express a shuffle mask for a scalable vector for this
2300 // case.
2301 if (isa<ScalableVectorType>(getType()))
2302 return false;
2303
2304 int NumSrcElts =
2305 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2306 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2307 }
2308
2309 /// Change values in a shuffle permute mask assuming the two vector operands
2310 /// of length InVecNumElts have swapped position.
2311 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2312 unsigned InVecNumElts) {
2313 for (int &Idx : Mask) {
2314 if (Idx == -1)
2315 continue;
2316 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2317 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2318, __extension__ __PRETTY_FUNCTION__))
2318 "shufflevector mask index out of range")(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2318, __extension__ __PRETTY_FUNCTION__))
;
2319 }
2320 }
2321
2322 // Methods for support type inquiry through isa, cast, and dyn_cast:
2323 static bool classof(const Instruction *I) {
2324 return I->getOpcode() == Instruction::ShuffleVector;
2325 }
2326 static bool classof(const Value *V) {
2327 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2328 }
2329};
2330
2331template <>
2332struct OperandTraits<ShuffleVectorInst>
2333 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2334
2335DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() {
return OperandTraits<ShuffleVectorInst>::op_begin(this
); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst::
op_begin() const { return OperandTraits<ShuffleVectorInst>
::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst
::op_iterator ShuffleVectorInst::op_end() { return OperandTraits
<ShuffleVectorInst>::op_end(this); } ShuffleVectorInst::
const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits
<ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst
*>(this)); } Value *ShuffleVectorInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<ShuffleVectorInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2335, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ShuffleVectorInst>::op_begin
(const_cast<ShuffleVectorInst*>(this))[i_nocapture].get
()); } void ShuffleVectorInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ShuffleVectorInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2335, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ShuffleVectorInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned ShuffleVectorInst::getNumOperands() const { return
OperandTraits<ShuffleVectorInst>::operands(this); } template
<int Idx_nocapture> Use &ShuffleVectorInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &ShuffleVectorInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2336
2337//===----------------------------------------------------------------------===//
2338// ExtractValueInst Class
2339//===----------------------------------------------------------------------===//
2340
2341/// This instruction extracts a struct member or array
2342/// element value from an aggregate value.
2343///
2344class ExtractValueInst : public UnaryInstruction {
2345 SmallVector<unsigned, 4> Indices;
2346
2347 ExtractValueInst(const ExtractValueInst &EVI);
2348
2349 /// Constructors - Create a extractvalue instruction with a base aggregate
2350 /// value and a list of indices. The first ctor can optionally insert before
2351 /// an existing instruction, the second appends the new instruction to the
2352 /// specified BasicBlock.
2353 inline ExtractValueInst(Value *Agg,
2354 ArrayRef<unsigned> Idxs,
2355 const Twine &NameStr,
2356 Instruction *InsertBefore);
2357 inline ExtractValueInst(Value *Agg,
2358 ArrayRef<unsigned> Idxs,
2359 const Twine &NameStr, BasicBlock *InsertAtEnd);
2360
2361 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2362
2363protected:
2364 // Note: Instruction needs to be a friend here to call cloneImpl.
2365 friend class Instruction;
2366
2367 ExtractValueInst *cloneImpl() const;
2368
2369public:
2370 static ExtractValueInst *Create(Value *Agg,
2371 ArrayRef<unsigned> Idxs,
2372 const Twine &NameStr = "",
2373 Instruction *InsertBefore = nullptr) {
2374 return new
2375 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2376 }
2377
2378 static ExtractValueInst *Create(Value *Agg,
2379 ArrayRef<unsigned> Idxs,
2380 const Twine &NameStr,
2381 BasicBlock *InsertAtEnd) {
2382 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2383 }
2384
2385 /// Returns the type of the element that would be extracted
2386 /// with an extractvalue instruction with the specified parameters.
2387 ///
2388 /// Null is returned if the indices are invalid for the specified type.
2389 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2390
2391 using idx_iterator = const unsigned*;
2392
2393 inline idx_iterator idx_begin() const { return Indices.begin(); }
2394 inline idx_iterator idx_end() const { return Indices.end(); }
2395 inline iterator_range<idx_iterator> indices() const {
2396 return make_range(idx_begin(), idx_end());
2397 }
2398
2399 Value *getAggregateOperand() {
2400 return getOperand(0);
2401 }
2402 const Value *getAggregateOperand() const {
2403 return getOperand(0);
2404 }
2405 static unsigned getAggregateOperandIndex() {
2406 return 0U; // get index for modifying correct operand
2407 }
2408
2409 ArrayRef<unsigned> getIndices() const {
2410 return Indices;
2411 }
2412
2413 unsigned getNumIndices() const {
2414 return (unsigned)Indices.size();
2415 }
2416
2417 bool hasIndices() const {
2418 return true;
2419 }
2420
2421 // Methods for support type inquiry through isa, cast, and dyn_cast:
2422 static bool classof(const Instruction *I) {
2423 return I->getOpcode() == Instruction::ExtractValue;
2424 }
2425 static bool classof(const Value *V) {
2426 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2427 }
2428};
2429
2430ExtractValueInst::ExtractValueInst(Value *Agg,
2431 ArrayRef<unsigned> Idxs,
2432 const Twine &NameStr,
2433 Instruction *InsertBefore)
2434 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2435 ExtractValue, Agg, InsertBefore) {
2436 init(Idxs, NameStr);
2437}
2438
2439ExtractValueInst::ExtractValueInst(Value *Agg,
2440 ArrayRef<unsigned> Idxs,
2441 const Twine &NameStr,
2442 BasicBlock *InsertAtEnd)
2443 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2444 ExtractValue, Agg, InsertAtEnd) {
2445 init(Idxs, NameStr);
2446}
2447
2448//===----------------------------------------------------------------------===//
2449// InsertValueInst Class
2450//===----------------------------------------------------------------------===//
2451
2452/// This instruction inserts a struct field of array element
2453/// value into an aggregate value.
2454///
2455class InsertValueInst : public Instruction {
2456 SmallVector<unsigned, 4> Indices;
2457
2458 InsertValueInst(const InsertValueInst &IVI);
2459
2460 /// Constructors - Create a insertvalue instruction with a base aggregate
2461 /// value, a value to insert, and a list of indices. The first ctor can
2462 /// optionally insert before an existing instruction, the second appends
2463 /// the new instruction to the specified BasicBlock.
2464 inline InsertValueInst(Value *Agg, Value *Val,
2465 ArrayRef<unsigned> Idxs,
2466 const Twine &NameStr,
2467 Instruction *InsertBefore);
2468 inline InsertValueInst(Value *Agg, Value *Val,
2469 ArrayRef<unsigned> Idxs,
2470 const Twine &NameStr, BasicBlock *InsertAtEnd);
2471
2472 /// Constructors - These two constructors are convenience methods because one
2473 /// and two index insertvalue instructions are so common.
2474 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2475 const Twine &NameStr = "",
2476 Instruction *InsertBefore = nullptr);
2477 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2478 BasicBlock *InsertAtEnd);
2479
2480 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2481 const Twine &NameStr);
2482
2483protected:
2484 // Note: Instruction needs to be a friend here to call cloneImpl.
2485 friend class Instruction;
2486
2487 InsertValueInst *cloneImpl() const;
2488
2489public:
2490 // allocate space for exactly two operands
2491 void *operator new(size_t s) {
2492 return User::operator new(s, 2);
2493 }
2494
2495 static InsertValueInst *Create(Value *Agg, Value *Val,
2496 ArrayRef<unsigned> Idxs,
2497 const Twine &NameStr = "",
2498 Instruction *InsertBefore = nullptr) {
2499 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2500 }
2501
2502 static InsertValueInst *Create(Value *Agg, Value *Val,
2503 ArrayRef<unsigned> Idxs,
2504 const Twine &NameStr,
2505 BasicBlock *InsertAtEnd) {
2506 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2507 }
2508
2509 /// Transparently provide more efficient getOperand methods.
2510 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2511
2512 using idx_iterator = const unsigned*;
2513
2514 inline idx_iterator idx_begin() const { return Indices.begin(); }
2515 inline idx_iterator idx_end() const { return Indices.end(); }
2516 inline iterator_range<idx_iterator> indices() const {
2517 return make_range(idx_begin(), idx_end());
2518 }
2519
2520 Value *getAggregateOperand() {
2521 return getOperand(0);
2522 }
2523 const Value *getAggregateOperand() const {
2524 return getOperand(0);
2525 }
2526 static unsigned getAggregateOperandIndex() {
2527 return 0U; // get index for modifying correct operand
2528 }
2529
2530 Value *getInsertedValueOperand() {
2531 return getOperand(1);
2532 }
2533 const Value *getInsertedValueOperand() const {
2534 return getOperand(1);
2535 }
2536 static unsigned getInsertedValueOperandIndex() {
2537 return 1U; // get index for modifying correct operand
2538 }
2539
2540 ArrayRef<unsigned> getIndices() const {
2541 return Indices;
2542 }
2543
2544 unsigned getNumIndices() const {
2545 return (unsigned)Indices.size();
2546 }
2547
2548 bool hasIndices() const {
2549 return true;
2550 }
2551
2552 // Methods for support type inquiry through isa, cast, and dyn_cast:
2553 static bool classof(const Instruction *I) {
2554 return I->getOpcode() == Instruction::InsertValue;
2555 }
2556 static bool classof(const Value *V) {
2557 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2558 }
2559};
2560
2561template <>
2562struct OperandTraits<InsertValueInst> :
2563 public FixedNumOperandTraits<InsertValueInst, 2> {
2564};
2565
2566InsertValueInst::InsertValueInst(Value *Agg,
2567 Value *Val,
2568 ArrayRef<unsigned> Idxs,
2569 const Twine &NameStr,
2570 Instruction *InsertBefore)
2571 : Instruction(Agg->getType(), InsertValue,
2572 OperandTraits<InsertValueInst>::op_begin(this),
2573 2, InsertBefore) {
2574 init(Agg, Val, Idxs, NameStr);
2575}
2576
2577InsertValueInst::InsertValueInst(Value *Agg,
2578 Value *Val,
2579 ArrayRef<unsigned> Idxs,
2580 const Twine &NameStr,
2581 BasicBlock *InsertAtEnd)
2582 : Instruction(Agg->getType(), InsertValue,
2583 OperandTraits<InsertValueInst>::op_begin(this),
2584 2, InsertAtEnd) {
2585 init(Agg, Val, Idxs, NameStr);
2586}
2587
2588DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return
OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst
::const_op_iterator InsertValueInst::op_begin() const { return
OperandTraits<InsertValueInst>::op_begin(const_cast<
InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst
::op_end() { return OperandTraits<InsertValueInst>::op_end
(this); } InsertValueInst::const_op_iterator InsertValueInst::
op_end() const { return OperandTraits<InsertValueInst>::
op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<InsertValueInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2588, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<InsertValueInst>::op_begin
(const_cast<InsertValueInst*>(this))[i_nocapture].get()
); } void InsertValueInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<InsertValueInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2588, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
InsertValueInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned InsertValueInst::getNumOperands() const { return
OperandTraits<InsertValueInst>::operands(this); } template
<int Idx_nocapture> Use &InsertValueInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &InsertValueInst::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
2589
2590//===----------------------------------------------------------------------===//
2591// PHINode Class
2592//===----------------------------------------------------------------------===//
2593
2594// PHINode - The PHINode class is used to represent the magical mystical PHI
2595// node, that can not exist in nature, but can be synthesized in a computer
2596// scientist's overactive imagination.
2597//
2598class PHINode : public Instruction {
2599 /// The number of operands actually allocated. NumOperands is
2600 /// the number actually in use.
2601 unsigned ReservedSpace;
2602
2603 PHINode(const PHINode &PN);
2604
2605 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2606 const Twine &NameStr = "",
2607 Instruction *InsertBefore = nullptr)
2608 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2609 ReservedSpace(NumReservedValues) {
2610 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!"
) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2610, __extension__ __PRETTY_FUNCTION__))
;
2611 setName(NameStr);
2612 allocHungoffUses(ReservedSpace);
2613 }
2614
2615 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2616 BasicBlock *InsertAtEnd)
2617 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2618 ReservedSpace(NumReservedValues) {
2619 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!"
) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2619, __extension__ __PRETTY_FUNCTION__))
;
2620 setName(NameStr);
2621 allocHungoffUses(ReservedSpace);
2622 }
2623
2624protected:
2625 // Note: Instruction needs to be a friend here to call cloneImpl.
2626 friend class Instruction;
2627
2628 PHINode *cloneImpl() const;
2629
2630 // allocHungoffUses - this is more complicated than the generic
2631 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2632 // values and pointers to the incoming blocks, all in one allocation.
2633 void allocHungoffUses(unsigned N) {
2634 User::allocHungoffUses(N, /* IsPhi */ true);
2635 }
2636
2637public:
2638 /// Constructors - NumReservedValues is a hint for the number of incoming
2639 /// edges that this phi node will have (use 0 if you really have no idea).
2640 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2641 const Twine &NameStr = "",
2642 Instruction *InsertBefore = nullptr) {
2643 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2644 }
2645
2646 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2647 const Twine &NameStr, BasicBlock *InsertAtEnd) {
2648 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2649 }
2650
2651 /// Provide fast operand accessors
2652 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2653
2654 // Block iterator interface. This provides access to the list of incoming
2655 // basic blocks, which parallels the list of incoming values.
2656
2657 using block_iterator = BasicBlock **;
2658 using const_block_iterator = BasicBlock * const *;
2659
2660 block_iterator block_begin() {
2661 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
2662 }
2663
2664 const_block_iterator block_begin() const {
2665 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2666 }
2667
2668 block_iterator block_end() {
2669 return block_begin() + getNumOperands();
2670 }
2671
2672 const_block_iterator block_end() const {
2673 return block_begin() + getNumOperands();
2674 }
2675
2676 iterator_range<block_iterator> blocks() {
2677 return make_range(block_begin(), block_end());
2678 }
2679
2680 iterator_range<const_block_iterator> blocks() const {
2681 return make_range(block_begin(), block_end());
2682 }
2683
2684 op_range incoming_values() { return operands(); }
2685
2686 const_op_range incoming_values() const { return operands(); }
2687
2688 /// Return the number of incoming edges
2689 ///
2690 unsigned getNumIncomingValues() const { return getNumOperands(); }
2691
2692 /// Return incoming value number x
2693 ///
2694 Value *getIncomingValue(unsigned i) const {
2695 return getOperand(i);
2696 }
2697 void setIncomingValue(unsigned i, Value *V) {
2698 assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!"
) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2698, __extension__ __PRETTY_FUNCTION__))
;
2699 assert(getType() == V->getType() &&(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2700, __extension__ __PRETTY_FUNCTION__))
2700 "All operands to PHI node must be the same type as the PHI node!")(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2700, __extension__ __PRETTY_FUNCTION__))
;
2701 setOperand(i, V);
2702 }
2703
2704 static unsigned getOperandNumForIncomingValue(unsigned i) {
2705 return i;
2706 }
2707
2708 static unsigned getIncomingValueNumForOperand(unsigned i) {
2709 return i;
2710 }
2711
2712 /// Return incoming basic block number @p i.
2713 ///
2714 BasicBlock *getIncomingBlock(unsigned i) const {
2715 return block_begin()[i];
2716 }
2717
2718 /// Return incoming basic block corresponding
2719 /// to an operand of the PHI.
2720 ///
2721 BasicBlock *getIncomingBlock(const Use &U) const {
2722 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?"
) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2722, __extension__ __PRETTY_FUNCTION__))
;
2723 return getIncomingBlock(unsigned(&U - op_begin()));
2724 }
2725
2726 /// Return incoming basic block corresponding
2727 /// to value use iterator.
2728 ///
2729 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
2730 return getIncomingBlock(I.getUse());
2731 }
2732
2733 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2734 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2734, __extension__ __PRETTY_FUNCTION__))
;
2735 block_begin()[i] = BB;
2736 }
2737
2738 /// Replace every incoming basic block \p Old to basic block \p New.
2739 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) {
2740 assert(New && Old && "PHI node got a null basic block!")(static_cast <bool> (New && Old && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("New && Old && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2740, __extension__ __PRETTY_FUNCTION__))
;
2741 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2742 if (getIncomingBlock(Op) == Old)
2743 setIncomingBlock(Op, New);
2744 }
2745
2746 /// Add an incoming value to the end of the PHI list
2747 ///
2748 void addIncoming(Value *V, BasicBlock *BB) {
2749 if (getNumOperands() == ReservedSpace)
2750 growOperands(); // Get more space!
2751 // Initialize some new operands.
2752 setNumHungOffUseOperands(getNumOperands() + 1);
2753 setIncomingValue(getNumOperands() - 1, V);
2754 setIncomingBlock(getNumOperands() - 1, BB);
2755 }
2756
2757 /// Remove an incoming value. This is useful if a
2758 /// predecessor basic block is deleted. The value removed is returned.
2759 ///
2760 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2761 /// is true), the PHI node is destroyed and any uses of it are replaced with
2762 /// dummy values. The only time there should be zero incoming values to a PHI
2763 /// node is when the block is dead, so this strategy is sound.
2764 ///
2765 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2766
2767 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2768 int Idx = getBasicBlockIndex(BB);
2769 assert(Idx >= 0 && "Invalid basic block argument to remove!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument to remove!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2769, __extension__ __PRETTY_FUNCTION__))
;
2770 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2771 }
2772
2773 /// Return the first index of the specified basic
2774 /// block in the value list for this PHI. Returns -1 if no instance.
2775 ///
2776 int getBasicBlockIndex(const BasicBlock *BB) const {
2777 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2778 if (block_begin()[i] == BB)
2779 return i;
2780 return -1;
2781 }
2782
2783 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
2784 int Idx = getBasicBlockIndex(BB);
2785 assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2785, __extension__ __PRETTY_FUNCTION__))
;
2786 return getIncomingValue(Idx);
2787 }
2788
2789 /// Set every incoming value(s) for block \p BB to \p V.
2790 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) {
2791 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2791, __extension__ __PRETTY_FUNCTION__))
;
2792 bool Found = false;
2793 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2794 if (getIncomingBlock(Op) == BB) {
2795 Found = true;
2796 setIncomingValue(Op, V);
2797 }
2798 (void)Found;
2799 assert(Found && "Invalid basic block argument to set!")(static_cast <bool> (Found && "Invalid basic block argument to set!"
) ? void (0) : __assert_fail ("Found && \"Invalid basic block argument to set!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2799, __extension__ __PRETTY_FUNCTION__))
;
2800 }
2801
2802 /// If the specified PHI node always merges together the
2803 /// same value, return the value, otherwise return null.
2804 Value *hasConstantValue() const;
2805
2806 /// Whether the specified PHI node always merges
2807 /// together the same value, assuming undefs are equal to a unique
2808 /// non-undef value.
2809 bool hasConstantOrUndefValue() const;
2810
2811 /// If the PHI node is complete which means all of its parent's predecessors
2812 /// have incoming value in this PHI, return true, otherwise return false.
2813 bool isComplete() const {
2814 return llvm::all_of(predecessors(getParent()),
2815 [this](const BasicBlock *Pred) {
2816 return getBasicBlockIndex(Pred) >= 0;
2817 });
2818 }
2819
2820 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2821 static bool classof(const Instruction *I) {
2822 return I->getOpcode() == Instruction::PHI;
2823 }
2824 static bool classof(const Value *V) {
2825 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2826 }
2827
2828private:
2829 void growOperands();
2830};
2831
2832template <>
2833struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
2834};
2835
2836DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits
<PHINode>::op_begin(this); } PHINode::const_op_iterator
PHINode::op_begin() const { return OperandTraits<PHINode>
::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator
PHINode::op_end() { return OperandTraits<PHINode>::op_end
(this); } PHINode::const_op_iterator PHINode::op_end() const {
return OperandTraits<PHINode>::op_end(const_cast<PHINode
*>(this)); } Value *PHINode::getOperand(unsigned i_nocapture
) const { (static_cast <bool> (i_nocapture < OperandTraits
<PHINode>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2836, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<PHINode>::op_begin(const_cast
<PHINode*>(this))[i_nocapture].get()); } void PHINode::
setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<PHINode>::
operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2836, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
PHINode>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
PHINode::getNumOperands() const { return OperandTraits<PHINode
>::operands(this); } template <int Idx_nocapture> Use
&PHINode::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
PHINode::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
2837
2838//===----------------------------------------------------------------------===//
2839// LandingPadInst Class
2840//===----------------------------------------------------------------------===//
2841
2842//===---------------------------------------------------------------------------
2843/// The landingpad instruction holds all of the information
2844/// necessary to generate correct exception handling. The landingpad instruction
2845/// cannot be moved from the top of a landing pad block, which itself is
2846/// accessible only from the 'unwind' edge of an invoke. This uses the
2847/// SubclassData field in Value to store whether or not the landingpad is a
2848/// cleanup.
2849///
2850class LandingPadInst : public Instruction {
2851 using CleanupField = BoolBitfieldElementT<0>;
2852
2853 /// The number of operands actually allocated. NumOperands is
2854 /// the number actually in use.
2855 unsigned ReservedSpace;
2856
2857 LandingPadInst(const LandingPadInst &LP);
2858
2859public:
2860 enum ClauseType { Catch, Filter };
2861
2862private:
2863 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2864 const Twine &NameStr, Instruction *InsertBefore);
2865 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2866 const Twine &NameStr, BasicBlock *InsertAtEnd);
2867
2868 // Allocate space for exactly zero operands.
2869 void *operator new(size_t s) {
2870 return User::operator new(s);
2871 }
2872
2873 void growOperands(unsigned Size);
2874 void init(unsigned NumReservedValues, const Twine &NameStr);
2875
2876protected:
2877 // Note: Instruction needs to be a friend here to call cloneImpl.
2878 friend class Instruction;
2879
2880 LandingPadInst *cloneImpl() const;
2881
2882public:
2883 /// Constructors - NumReservedClauses is a hint for the number of incoming
2884 /// clauses that this landingpad will have (use 0 if you really have no idea).
2885 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2886 const Twine &NameStr = "",
2887 Instruction *InsertBefore = nullptr);
2888 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2889 const Twine &NameStr, BasicBlock *InsertAtEnd);
2890
2891 /// Provide fast operand accessors
2892 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2893
2894 /// Return 'true' if this landingpad instruction is a
2895 /// cleanup. I.e., it should be run when unwinding even if its landing pad
2896 /// doesn't catch the exception.
2897 bool isCleanup() const { return getSubclassData<CleanupField>(); }
2898
2899 /// Indicate that this landingpad instruction is a cleanup.
2900 void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
2901
2902 /// Add a catch or filter clause to the landing pad.
2903 void addClause(Constant *ClauseVal);
2904
2905 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
2906 /// determine what type of clause this is.
2907 Constant *getClause(unsigned Idx) const {
2908 return cast<Constant>(getOperandList()[Idx]);
2909 }
2910
2911 /// Return 'true' if the clause and index Idx is a catch clause.
2912 bool isCatch(unsigned Idx) const {
2913 return !isa<ArrayType>(getOperandList()[Idx]->getType());
2914 }
2915
2916 /// Return 'true' if the clause and index Idx is a filter clause.
2917 bool isFilter(unsigned Idx) const {
2918 return isa<ArrayType>(getOperandList()[Idx]->getType());
2919 }
2920
2921 /// Get the number of clauses for this landing pad.
2922 unsigned getNumClauses() const { return getNumOperands(); }
2923
2924 /// Grow the size of the operand list to accommodate the new
2925 /// number of clauses.
2926 void reserveClauses(unsigned Size) { growOperands(Size); }
2927
2928 // Methods for support type inquiry through isa, cast, and dyn_cast:
2929 static bool classof(const Instruction *I) {
2930 return I->getOpcode() == Instruction::LandingPad;
2931 }
2932 static bool classof(const Value *V) {
2933 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2934 }
2935};
2936
2937template <>
2938struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
2939};
2940
2941DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return
OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst
::const_op_iterator LandingPadInst::op_begin() const { return
OperandTraits<LandingPadInst>::op_begin(const_cast<
LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst
::op_end() { return OperandTraits<LandingPadInst>::op_end
(this); } LandingPadInst::const_op_iterator LandingPadInst::op_end
() const { return OperandTraits<LandingPadInst>::op_end
(const_cast<LandingPadInst*>(this)); } Value *LandingPadInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<LandingPadInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2941, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<LandingPadInst>::op_begin(
const_cast<LandingPadInst*>(this))[i_nocapture].get());
} void LandingPadInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<LandingPadInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2941, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
LandingPadInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned LandingPadInst::getNumOperands() const { return OperandTraits
<LandingPadInst>::operands(this); } template <int Idx_nocapture
> Use &LandingPadInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &LandingPadInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
2942
2943//===----------------------------------------------------------------------===//
2944// ReturnInst Class
2945//===----------------------------------------------------------------------===//
2946
2947//===---------------------------------------------------------------------------
2948/// Return a value (possibly void), from a function. Execution
2949/// does not continue in this function any longer.
2950///
2951class ReturnInst : public Instruction {
2952 ReturnInst(const ReturnInst &RI);
2953
2954private:
2955 // ReturnInst constructors:
2956 // ReturnInst() - 'ret void' instruction
2957 // ReturnInst( null) - 'ret void' instruction
2958 // ReturnInst(Value* X) - 'ret X' instruction
2959 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
2960 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
2961 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
2962 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
2963 //
2964 // NOTE: If the Value* passed is of type void then the constructor behaves as
2965 // if it was passed NULL.
2966 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
2967 Instruction *InsertBefore = nullptr);
2968 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
2969 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
2970
2971protected:
2972 // Note: Instruction needs to be a friend here to call cloneImpl.
2973 friend class Instruction;
2974
2975 ReturnInst *cloneImpl() const;
2976
2977public:
2978 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
2979 Instruction *InsertBefore = nullptr) {
2980 return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
2981 }
2982
2983 static ReturnInst* Create(LLVMContext &C, Value *retVal,
2984 BasicBlock *InsertAtEnd) {
2985 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
2986 }
2987
2988 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
2989 return new(0) ReturnInst(C, InsertAtEnd);
2990 }
2991
2992 /// Provide fast operand accessors
2993 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2994
2995 /// Convenience accessor. Returns null if there is no return value.
2996 Value *getReturnValue() const {
2997 return getNumOperands() != 0 ? getOperand(0) : nullptr;
2998 }
2999
3000 unsigned getNumSuccessors() const { return 0; }
3001
3002 // Methods for support type inquiry through isa, cast, and dyn_cast:
3003 static bool classof(const Instruction *I) {
3004 return (I->getOpcode() == Instruction::Ret);
3005 }
3006 static bool classof(const Value *V) {
3007 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3008 }
3009
3010private:
3011 BasicBlock *getSuccessor(unsigned idx) const {
3012 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3012)
;
3013 }
3014
3015 void setSuccessor(unsigned idx, BasicBlock *B) {
3016 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3016)
;
3017 }
3018};
3019
3020template <>
3021struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3022};
3023
3024DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits
<ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator
ReturnInst::op_begin() const { return OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst
::op_iterator ReturnInst::op_end() { return OperandTraits<
ReturnInst>::op_end(this); } ReturnInst::const_op_iterator
ReturnInst::op_end() const { return OperandTraits<ReturnInst
>::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<ReturnInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3024, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ReturnInst>::op_begin(const_cast
<ReturnInst*>(this))[i_nocapture].get()); } void ReturnInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<ReturnInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3024, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ReturnInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned ReturnInst::getNumOperands() const { return OperandTraits
<ReturnInst>::operands(this); } template <int Idx_nocapture
> Use &ReturnInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
ReturnInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3025
3026//===----------------------------------------------------------------------===//
3027// BranchInst Class
3028//===----------------------------------------------------------------------===//
3029
3030//===---------------------------------------------------------------------------
3031/// Conditional or Unconditional Branch instruction.
3032///
3033class BranchInst : public Instruction {
3034 /// Ops list - Branches are strange. The operands are ordered:
3035 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3036 /// they don't have to check for cond/uncond branchness. These are mostly
3037 /// accessed relative from op_end().
3038 BranchInst(const BranchInst &BI);
3039 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3040 // BranchInst(BB *B) - 'br B'
3041 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3042 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3043 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3044 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3045 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3046 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3047 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3048 Instruction *InsertBefore = nullptr);
3049 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3050 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3051 BasicBlock *InsertAtEnd);
3052
3053 void AssertOK();
3054
3055protected:
3056 // Note: Instruction needs to be a friend here to call cloneImpl.
3057 friend class Instruction;
3058
3059 BranchInst *cloneImpl() const;
3060
3061public:
3062 /// Iterator type that casts an operand to a basic block.
3063 ///
3064 /// This only makes sense because the successors are stored as adjacent
3065 /// operands for branch instructions.
3066 struct succ_op_iterator
3067 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3068 std::random_access_iterator_tag, BasicBlock *,
3069 ptrdiff_t, BasicBlock *, BasicBlock *> {
3070 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3071
3072 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3073 BasicBlock *operator->() const { return operator*(); }
3074 };
3075
3076 /// The const version of `succ_op_iterator`.
3077 struct const_succ_op_iterator
3078 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3079 std::random_access_iterator_tag,
3080 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3081 const BasicBlock *> {
3082 explicit const_succ_op_iterator(const_value_op_iterator I)
3083 : iterator_adaptor_base(I) {}
3084
3085 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3086 const BasicBlock *operator->() const { return operator*(); }
3087 };
3088
3089 static BranchInst *Create(BasicBlock *IfTrue,
3090 Instruction *InsertBefore = nullptr) {
3091 return new(1) BranchInst(IfTrue, InsertBefore);
3092 }
3093
3094 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3095 Value *Cond, Instruction *InsertBefore = nullptr) {
3096 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3097 }
3098
3099 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3100 return new(1) BranchInst(IfTrue, InsertAtEnd);
3101 }
3102
3103 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3104 Value *Cond, BasicBlock *InsertAtEnd) {
3105 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3106 }
3107
3108 /// Transparently provide more efficient getOperand methods.
3109 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3110
3111 bool isUnconditional() const { return getNumOperands() == 1; }
3112 bool isConditional() const { return getNumOperands() == 3; }
3113
3114 Value *getCondition() const {
3115 assert(isConditional() && "Cannot get condition of an uncond branch!")(static_cast <bool> (isConditional() && "Cannot get condition of an uncond branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot get condition of an uncond branch!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3115, __extension__ __PRETTY_FUNCTION__))
;
3116 return Op<-3>();
3117 }
3118
3119 void setCondition(Value *V) {
3120 assert(isConditional() && "Cannot set condition of unconditional branch!")(static_cast <bool> (isConditional() && "Cannot set condition of unconditional branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot set condition of unconditional branch!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3120, __extension__ __PRETTY_FUNCTION__))
;
3121 Op<-3>() = V;
3122 }
3123
3124 unsigned getNumSuccessors() const { return 1+isConditional(); }
3125
3126 BasicBlock *getSuccessor(unsigned i) const {
3127 assert(i < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (i < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("i < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3127, __extension__ __PRETTY_FUNCTION__))
;
3128 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3129 }
3130
3131 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3132 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3132, __extension__ __PRETTY_FUNCTION__))
;
3133 *(&Op<-1>() - idx) = NewSucc;
3134 }
3135
3136 /// Swap the successors of this branch instruction.
3137 ///
3138 /// Swaps the successors of the branch instruction. This also swaps any
3139 /// branch weight metadata associated with the instruction so that it
3140 /// continues to map correctly to each operand.
3141 void swapSuccessors();
3142
3143 iterator_range<succ_op_iterator> successors() {
3144 return make_range(
3145 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3146 succ_op_iterator(value_op_end()));
3147 }
3148
3149 iterator_range<const_succ_op_iterator> successors() const {
3150 return make_range(const_succ_op_iterator(
3151 std::next(value_op_begin(), isConditional() ? 1 : 0)),
3152 const_succ_op_iterator(value_op_end()));
3153 }
3154
3155 // Methods for support type inquiry through isa, cast, and dyn_cast:
3156 static bool classof(const Instruction *I) {
3157 return (I->getOpcode() == Instruction::Br);
3158 }
3159 static bool classof(const Value *V) {
3160 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3161 }
3162};
3163
3164template <>
3165struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3166};
3167
3168DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits
<BranchInst>::op_begin(this); } BranchInst::const_op_iterator
BranchInst::op_begin() const { return OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this)); } BranchInst
::op_iterator BranchInst::op_end() { return OperandTraits<
BranchInst>::op_end(this); } BranchInst::const_op_iterator
BranchInst::op_end() const { return OperandTraits<BranchInst
>::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<BranchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3168, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<BranchInst>::op_begin(const_cast
<BranchInst*>(this))[i_nocapture].get()); } void BranchInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<BranchInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3168, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
BranchInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned BranchInst::getNumOperands() const { return OperandTraits
<BranchInst>::operands(this); } template <int Idx_nocapture
> Use &BranchInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
BranchInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3169
3170//===----------------------------------------------------------------------===//
3171// SwitchInst Class
3172//===----------------------------------------------------------------------===//
3173
3174//===---------------------------------------------------------------------------
3175/// Multiway switch
3176///
3177class SwitchInst : public Instruction {
3178 unsigned ReservedSpace;
3179
3180 // Operand[0] = Value to switch on
3181 // Operand[1] = Default basic block destination
3182 // Operand[2n ] = Value to match
3183 // Operand[2n+1] = BasicBlock to go to on match
3184 SwitchInst(const SwitchInst &SI);
3185
3186 /// Create a new switch instruction, specifying a value to switch on and a
3187 /// default destination. The number of additional cases can be specified here
3188 /// to make memory allocation more efficient. This constructor can also
3189 /// auto-insert before another instruction.
3190 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3191 Instruction *InsertBefore);
3192
3193 /// Create a new switch instruction, specifying a value to switch on and a
3194 /// default destination. The number of additional cases can be specified here
3195 /// to make memory allocation more efficient. This constructor also
3196 /// auto-inserts at the end of the specified BasicBlock.
3197 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3198 BasicBlock *InsertAtEnd);
3199
3200 // allocate space for exactly zero operands
3201 void *operator new(size_t s) {
3202 return User::operator new(s);
3203 }
3204
3205 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3206 void growOperands();
3207
3208protected:
3209 // Note: Instruction needs to be a friend here to call cloneImpl.
3210 friend class Instruction;
3211
3212 SwitchInst *cloneImpl() const;
3213
3214public:
3215 // -2
3216 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3217
3218 template <typename CaseHandleT> class CaseIteratorImpl;
3219
3220 /// A handle to a particular switch case. It exposes a convenient interface
3221 /// to both the case value and the successor block.
3222 ///
3223 /// We define this as a template and instantiate it to form both a const and
3224 /// non-const handle.
3225 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3226 class CaseHandleImpl {
3227 // Directly befriend both const and non-const iterators.
3228 friend class SwitchInst::CaseIteratorImpl<
3229 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3230
3231 protected:
3232 // Expose the switch type we're parameterized with to the iterator.
3233 using SwitchInstType = SwitchInstT;
3234
3235 SwitchInstT *SI;
3236 ptrdiff_t Index;
3237
3238 CaseHandleImpl() = default;
3239 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3240
3241 public:
3242 /// Resolves case value for current case.
3243 ConstantIntT *getCaseValue() const {
3244 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3245, __extension__ __PRETTY_FUNCTION__))
3245 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3245, __extension__ __PRETTY_FUNCTION__))
;
3246 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3247 }
3248
3249 /// Resolves successor for current case.
3250 BasicBlockT *getCaseSuccessor() const {
3251 assert(((unsigned)Index < SI->getNumCases() ||(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3253, __extension__ __PRETTY_FUNCTION__))
3252 (unsigned)Index == DefaultPseudoIndex) &&(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3253, __extension__ __PRETTY_FUNCTION__))
3253 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3253, __extension__ __PRETTY_FUNCTION__))
;
3254 return SI->getSuccessor(getSuccessorIndex());
3255 }
3256
3257 /// Returns number of current case.
3258 unsigned getCaseIndex() const { return Index; }
3259
3260 /// Returns successor index for current case successor.
3261 unsigned getSuccessorIndex() const {
3262 assert(((unsigned)Index == DefaultPseudoIndex ||(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3264, __extension__ __PRETTY_FUNCTION__))
3263 (unsigned)Index < SI->getNumCases()) &&(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3264, __extension__ __PRETTY_FUNCTION__))
3264 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3264, __extension__ __PRETTY_FUNCTION__))
;
3265 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3266 }
3267
3268 bool operator==(const CaseHandleImpl &RHS) const {
3269 assert(SI == RHS.SI && "Incompatible operators.")(static_cast <bool> (SI == RHS.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("SI == RHS.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3269, __extension__ __PRETTY_FUNCTION__))
;
3270 return Index == RHS.Index;
3271 }
3272 };
3273
3274 using ConstCaseHandle =
3275 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>;
3276
3277 class CaseHandle
3278 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3279 friend class SwitchInst::CaseIteratorImpl<CaseHandle>;
3280
3281 public:
3282 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {}
3283
3284 /// Sets the new value for current case.
3285 void setValue(ConstantInt *V) {
3286 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3287, __extension__ __PRETTY_FUNCTION__))
3287 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3287, __extension__ __PRETTY_FUNCTION__))
;
3288 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3289 }
3290
3291 /// Sets the new successor for current case.
3292 void setSuccessor(BasicBlock *S) {
3293 SI->setSuccessor(getSuccessorIndex(), S);
3294 }
3295 };
3296
3297 template <typename CaseHandleT>
3298 class CaseIteratorImpl
3299 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3300 std::random_access_iterator_tag,
3301 CaseHandleT> {
3302 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3303
3304 CaseHandleT Case;
3305
3306 public:
3307 /// Default constructed iterator is in an invalid state until assigned to
3308 /// a case for a particular switch.
3309 CaseIteratorImpl() = default;
3310
3311 /// Initializes case iterator for given SwitchInst and for given
3312 /// case number.
3313 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3314
3315 /// Initializes case iterator for given SwitchInst and for given
3316 /// successor index.
3317 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
3318 unsigned SuccessorIndex) {
3319 assert(SuccessorIndex < SI->getNumSuccessors() &&(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3320, __extension__ __PRETTY_FUNCTION__))
3320 "Successor index # out of range!")(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3320, __extension__ __PRETTY_FUNCTION__))
;
3321 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3322 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3323 }
3324
3325 /// Support converting to the const variant. This will be a no-op for const
3326 /// variant.
3327 operator CaseIteratorImpl<ConstCaseHandle>() const {
3328 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3329 }
3330
3331 CaseIteratorImpl &operator+=(ptrdiff_t N) {
3332 // Check index correctness after addition.
3333 // Note: Index == getNumCases() means end().
3334 assert(Case.Index + N >= 0 &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3336, __extension__ __PRETTY_FUNCTION__))
3335 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3336, __extension__ __PRETTY_FUNCTION__))
3336 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3336, __extension__ __PRETTY_FUNCTION__))
;
3337 Case.Index += N;
3338 return *this;
3339 }
3340 CaseIteratorImpl &operator-=(ptrdiff_t N) {
3341 // Check index correctness after subtraction.
3342 // Note: Case.Index == getNumCases() means end().
3343 assert(Case.Index - N >= 0 &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3345, __extension__ __PRETTY_FUNCTION__))
3344 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3345, __extension__ __PRETTY_FUNCTION__))
3345 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3345, __extension__ __PRETTY_FUNCTION__))
;
3346 Case.Index -= N;
3347 return *this;
3348 }
3349 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const {
3350 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3350, __extension__ __PRETTY_FUNCTION__))
;
3351 return Case.Index - RHS.Case.Index;
3352 }
3353 bool operator==(const CaseIteratorImpl &RHS) const {
3354 return Case == RHS.Case;
3355 }
3356 bool operator<(const CaseIteratorImpl &RHS) const {
3357 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3357, __extension__ __PRETTY_FUNCTION__))
;
3358 return Case.Index < RHS.Case.Index;
3359 }
3360 CaseHandleT &operator*() { return Case; }
3361 const CaseHandleT &operator*() const { return Case; }
3362 };
3363
3364 using CaseIt = CaseIteratorImpl<CaseHandle>;
3365 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>;
3366
3367 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3368 unsigned NumCases,
3369 Instruction *InsertBefore = nullptr) {
3370 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3371 }
3372
3373 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3374 unsigned NumCases, BasicBlock *InsertAtEnd) {
3375 return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3376 }
3377
3378 /// Provide fast operand accessors
3379 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3380
3381 // Accessor Methods for Switch stmt
3382 Value *getCondition() const { return getOperand(0); }
3383 void setCondition(Value *V) { setOperand(0, V); }
3384
3385 BasicBlock *getDefaultDest() const {
3386 return cast<BasicBlock>(getOperand(1));
3387 }
3388
3389 void setDefaultDest(BasicBlock *DefaultCase) {
3390 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3391 }
3392
3393 /// Return the number of 'cases' in this switch instruction, excluding the
3394 /// default case.
3395 unsigned getNumCases() const {
3396 return getNumOperands()/2 - 1;
3397 }
3398
3399 /// Returns a read/write iterator that points to the first case in the
3400 /// SwitchInst.
3401 CaseIt case_begin() {
3402 return CaseIt(this, 0);
3403 }
3404
3405 /// Returns a read-only iterator that points to the first case in the
3406 /// SwitchInst.
3407 ConstCaseIt case_begin() const {
3408 return ConstCaseIt(this, 0);
3409 }
3410
3411 /// Returns a read/write iterator that points one past the last in the
3412 /// SwitchInst.
3413 CaseIt case_end() {
3414 return CaseIt(this, getNumCases());
3415 }
3416
3417 /// Returns a read-only iterator that points one past the last in the
3418 /// SwitchInst.
3419 ConstCaseIt case_end() const {
3420 return ConstCaseIt(this, getNumCases());
3421 }
3422
3423 /// Iteration adapter for range-for loops.
3424 iterator_range<CaseIt> cases() {
3425 return make_range(case_begin(), case_end());
3426 }
3427
3428 /// Constant iteration adapter for range-for loops.
3429 iterator_range<ConstCaseIt> cases() const {
3430 return make_range(case_begin(), case_end());
3431 }
3432
3433 /// Returns an iterator that points to the default case.
3434 /// Note: this iterator allows to resolve successor only. Attempt
3435 /// to resolve case value causes an assertion.
3436 /// Also note, that increment and decrement also causes an assertion and
3437 /// makes iterator invalid.
3438 CaseIt case_default() {
3439 return CaseIt(this, DefaultPseudoIndex);
3440 }
3441 ConstCaseIt case_default() const {
3442 return ConstCaseIt(this, DefaultPseudoIndex);
3443 }
3444
3445 /// Search all of the case values for the specified constant. If it is
3446 /// explicitly handled, return the case iterator of it, otherwise return
3447 /// default case iterator to indicate that it is handled by the default
3448 /// handler.
3449 CaseIt findCaseValue(const ConstantInt *C) {
3450 CaseIt I = llvm::find_if(
3451 cases(), [C](CaseHandle &Case) { return Case.getCaseValue() == C; });
3452 if (I != case_end())
3453 return I;
3454
3455 return case_default();
3456 }
3457 ConstCaseIt findCaseValue(const ConstantInt *C) const {
3458 ConstCaseIt I = llvm::find_if(cases(), [C](ConstCaseHandle &Case) {
3459 return Case.getCaseValue() == C;
3460 });
3461 if (I != case_end())
3462 return I;
3463
3464 return case_default();
3465 }
3466
3467 /// Finds the unique case value for a given successor. Returns null if the
3468 /// successor is not found, not unique, or is the default case.
3469 ConstantInt *findCaseDest(BasicBlock *BB) {
3470 if (BB == getDefaultDest())
3471 return nullptr;
3472
3473 ConstantInt *CI = nullptr;
3474 for (auto Case : cases()) {
3475 if (Case.getCaseSuccessor() != BB)
3476 continue;
3477
3478 if (CI)
3479 return nullptr; // Multiple cases lead to BB.
3480
3481 CI = Case.getCaseValue();
3482 }
3483
3484 return CI;
3485 }
3486
3487 /// Add an entry to the switch instruction.
3488 /// Note:
3489 /// This action invalidates case_end(). Old case_end() iterator will
3490 /// point to the added case.
3491 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3492
3493 /// This method removes the specified case and its successor from the switch
3494 /// instruction. Note that this operation may reorder the remaining cases at
3495 /// index idx and above.
3496 /// Note:
3497 /// This action invalidates iterators for all cases following the one removed,
3498 /// including the case_end() iterator. It returns an iterator for the next
3499 /// case.
3500 CaseIt removeCase(CaseIt I);
3501
3502 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3503 BasicBlock *getSuccessor(unsigned idx) const {
3504 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor idx out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() &&\"Successor idx out of range for switch!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3504, __extension__ __PRETTY_FUNCTION__))
;
3505 return cast<BasicBlock>(getOperand(idx*2+1));
3506 }
3507 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3508 assert(idx < getNumSuccessors() && "Successor # out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for switch!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3508, __extension__ __PRETTY_FUNCTION__))
;
3509 setOperand(idx * 2 + 1, NewSucc);
3510 }
3511
3512 // Methods for support type inquiry through isa, cast, and dyn_cast:
3513 static bool classof(const Instruction *I) {
3514 return I->getOpcode() == Instruction::Switch;
3515 }
3516 static bool classof(const Value *V) {
3517 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3518 }
3519};
3520
3521/// A wrapper class to simplify modification of SwitchInst cases along with
3522/// their prof branch_weights metadata.
3523class SwitchInstProfUpdateWrapper {
3524 SwitchInst &SI;
3525 Optional<SmallVector<uint32_t, 8> > Weights = None;
3526 bool Changed = false;
3527
3528protected:
3529 static MDNode *getProfBranchWeightsMD(const SwitchInst &SI);
3530
3531 MDNode *buildProfBranchWeightsMD();
3532
3533 void init();
3534
3535public:
3536 using CaseWeightOpt = Optional<uint32_t>;
3537 SwitchInst *operator->() { return &SI; }
3538 SwitchInst &operator*() { return SI; }
3539 operator SwitchInst *() { return &SI; }
3540
3541 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); }
3542
3543 ~SwitchInstProfUpdateWrapper() {
3544 if (Changed)
3545 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3546 }
3547
3548 /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3549 /// correspondent branch weight.
3550 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I);
3551
3552 /// Delegate the call to the underlying SwitchInst::addCase() and set the
3553 /// specified branch weight for the added case.
3554 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3555
3556 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3557 /// this object to not touch the underlying SwitchInst in destructor.
3558 SymbolTableList<Instruction>::iterator eraseFromParent();
3559
3560 void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3561 CaseWeightOpt getSuccessorWeight(unsigned idx);
3562
3563 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3564};
3565
3566template <>
3567struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> {
3568};
3569
3570DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)SwitchInst::op_iterator SwitchInst::op_begin() { return OperandTraits
<SwitchInst>::op_begin(this); } SwitchInst::const_op_iterator
SwitchInst::op_begin() const { return OperandTraits<SwitchInst
>::op_begin(const_cast<SwitchInst*>(this)); } SwitchInst
::op_iterator SwitchInst::op_end() { return OperandTraits<
SwitchInst>::op_end(this); } SwitchInst::const_op_iterator
SwitchInst::op_end() const { return OperandTraits<SwitchInst
>::op_end(const_cast<SwitchInst*>(this)); } Value *SwitchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SwitchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3570, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<SwitchInst>::op_begin(const_cast
<SwitchInst*>(this))[i_nocapture].get()); } void SwitchInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<SwitchInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3570, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
SwitchInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned SwitchInst::getNumOperands() const { return OperandTraits
<SwitchInst>::operands(this); } template <int Idx_nocapture
> Use &SwitchInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SwitchInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3571
3572//===----------------------------------------------------------------------===//
3573// IndirectBrInst Class
3574//===----------------------------------------------------------------------===//
3575
3576//===---------------------------------------------------------------------------
3577/// Indirect Branch Instruction.
3578///
3579class IndirectBrInst : public Instruction {
3580 unsigned ReservedSpace;
3581
3582 // Operand[0] = Address to jump to
3583 // Operand[n+1] = n-th destination
3584 IndirectBrInst(const IndirectBrInst &IBI);
3585
3586 /// Create a new indirectbr instruction, specifying an
3587 /// Address to jump to. The number of expected destinations can be specified
3588 /// here to make memory allocation more efficient. This constructor can also
3589 /// autoinsert before another instruction.
3590 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3591
3592 /// Create a new indirectbr instruction, specifying an
3593 /// Address to jump to. The number of expected destinations can be specified
3594 /// here to make memory allocation more efficient. This constructor also
3595 /// autoinserts at the end of the specified BasicBlock.
3596 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3597
3598 // allocate space for exactly zero operands
3599 void *operator new(size_t s) {
3600 return User::operator new(s);
3601 }
3602
3603 void init(Value *Address, unsigned NumDests);
3604 void growOperands();
3605
3606protected:
3607 // Note: Instruction needs to be a friend here to call cloneImpl.
3608 friend class Instruction;
3609
3610 IndirectBrInst *cloneImpl() const;
3611
3612public:
3613 /// Iterator type that casts an operand to a basic block.
3614 ///
3615 /// This only makes sense because the successors are stored as adjacent
3616 /// operands for indirectbr instructions.
3617 struct succ_op_iterator
3618 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3619 std::random_access_iterator_tag, BasicBlock *,
3620 ptrdiff_t, BasicBlock *, BasicBlock *> {
3621 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3622
3623 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3624 BasicBlock *operator->() const { return operator*(); }
3625 };
3626
3627 /// The const version of `succ_op_iterator`.
3628 struct const_succ_op_iterator
3629 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3630 std::random_access_iterator_tag,
3631 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3632 const BasicBlock *> {
3633 explicit const_succ_op_iterator(const_value_op_iterator I)
3634 : iterator_adaptor_base(I) {}
3635
3636 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3637 const BasicBlock *operator->() const { return operator*(); }
3638 };
3639
3640 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3641 Instruction *InsertBefore = nullptr) {
3642 return new IndirectBrInst(Address, NumDests, InsertBefore);
3643 }
3644
3645 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3646 BasicBlock *InsertAtEnd) {
3647 return new IndirectBrInst(Address, NumDests, InsertAtEnd);
3648 }
3649
3650 /// Provide fast operand accessors.
3651 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3652
3653 // Accessor Methods for IndirectBrInst instruction.
3654 Value *getAddress() { return getOperand(0); }
3655 const Value *getAddress() const { return getOperand(0); }
3656 void setAddress(Value *V) { setOperand(0, V); }
3657
3658 /// return the number of possible destinations in this
3659 /// indirectbr instruction.
3660 unsigned getNumDestinations() const { return getNumOperands()-1; }
3661
3662 /// Return the specified destination.
3663 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3664 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3665
3666 /// Add a destination.
3667 ///
3668 void addDestination(BasicBlock *Dest);
3669
3670 /// This method removes the specified successor from the
3671 /// indirectbr instruction.
3672 void removeDestination(unsigned i);
3673
3674 unsigned getNumSuccessors() const { return getNumOperands()-1; }
3675 BasicBlock *getSuccessor(unsigned i) const {
3676 return cast<BasicBlock>(getOperand(i+1));
3677 }
3678 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3679 setOperand(i + 1, NewSucc);
3680 }
3681
3682 iterator_range<succ_op_iterator> successors() {
3683 return make_range(succ_op_iterator(std::next(value_op_begin())),
3684 succ_op_iterator(value_op_end()));
3685 }