Bug Summary

File:llvm/lib/Transforms/Scalar/JumpThreading.cpp
Warning:line 1453, column 7
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name JumpThreading.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/llvm/lib/Transforms/Scalar -I include -I /build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-01-19-134126-35450-1 -x c++ /build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/llvm/lib/Transforms/Scalar/JumpThreading.cpp

/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/llvm/lib/Transforms/Scalar/JumpThreading.cpp

1//===- JumpThreading.cpp - Thread control through conditional blocks ------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the Jump Threading pass.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/Transforms/Scalar/JumpThreading.h"
14#include "llvm/ADT/DenseMap.h"
15#include "llvm/ADT/DenseSet.h"
16#include "llvm/ADT/MapVector.h"
17#include "llvm/ADT/Optional.h"
18#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/SmallPtrSet.h"
20#include "llvm/ADT/SmallVector.h"
21#include "llvm/ADT/Statistic.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/Analysis/BlockFrequencyInfo.h"
24#include "llvm/Analysis/BranchProbabilityInfo.h"
25#include "llvm/Analysis/CFG.h"
26#include "llvm/Analysis/ConstantFolding.h"
27#include "llvm/Analysis/DomTreeUpdater.h"
28#include "llvm/Analysis/GlobalsModRef.h"
29#include "llvm/Analysis/GuardUtils.h"
30#include "llvm/Analysis/InstructionSimplify.h"
31#include "llvm/Analysis/LazyValueInfo.h"
32#include "llvm/Analysis/Loads.h"
33#include "llvm/Analysis/LoopInfo.h"
34#include "llvm/Analysis/MemoryLocation.h"
35#include "llvm/Analysis/TargetLibraryInfo.h"
36#include "llvm/Analysis/TargetTransformInfo.h"
37#include "llvm/Analysis/ValueTracking.h"
38#include "llvm/IR/BasicBlock.h"
39#include "llvm/IR/CFG.h"
40#include "llvm/IR/Constant.h"
41#include "llvm/IR/ConstantRange.h"
42#include "llvm/IR/Constants.h"
43#include "llvm/IR/DataLayout.h"
44#include "llvm/IR/Dominators.h"
45#include "llvm/IR/Function.h"
46#include "llvm/IR/InstrTypes.h"
47#include "llvm/IR/Instruction.h"
48#include "llvm/IR/Instructions.h"
49#include "llvm/IR/IntrinsicInst.h"
50#include "llvm/IR/Intrinsics.h"
51#include "llvm/IR/LLVMContext.h"
52#include "llvm/IR/MDBuilder.h"
53#include "llvm/IR/Metadata.h"
54#include "llvm/IR/Module.h"
55#include "llvm/IR/PassManager.h"
56#include "llvm/IR/PatternMatch.h"
57#include "llvm/IR/Type.h"
58#include "llvm/IR/Use.h"
59#include "llvm/IR/User.h"
60#include "llvm/IR/Value.h"
61#include "llvm/InitializePasses.h"
62#include "llvm/Pass.h"
63#include "llvm/Support/BlockFrequency.h"
64#include "llvm/Support/BranchProbability.h"
65#include "llvm/Support/Casting.h"
66#include "llvm/Support/CommandLine.h"
67#include "llvm/Support/Debug.h"
68#include "llvm/Support/raw_ostream.h"
69#include "llvm/Transforms/Scalar.h"
70#include "llvm/Transforms/Utils/BasicBlockUtils.h"
71#include "llvm/Transforms/Utils/Cloning.h"
72#include "llvm/Transforms/Utils/Local.h"
73#include "llvm/Transforms/Utils/SSAUpdater.h"
74#include "llvm/Transforms/Utils/ValueMapper.h"
75#include <algorithm>
76#include <cassert>
77#include <cstddef>
78#include <cstdint>
79#include <iterator>
80#include <memory>
81#include <utility>
82
83using namespace llvm;
84using namespace jumpthreading;
85
86#define DEBUG_TYPE"jump-threading" "jump-threading"
87
88STATISTIC(NumThreads, "Number of jumps threaded")static llvm::Statistic NumThreads = {"jump-threading", "NumThreads"
, "Number of jumps threaded"}
;
89STATISTIC(NumFolds, "Number of terminators folded")static llvm::Statistic NumFolds = {"jump-threading", "NumFolds"
, "Number of terminators folded"}
;
90STATISTIC(NumDupes, "Number of branch blocks duplicated to eliminate phi")static llvm::Statistic NumDupes = {"jump-threading", "NumDupes"
, "Number of branch blocks duplicated to eliminate phi"}
;
91
92static cl::opt<unsigned>
93BBDuplicateThreshold("jump-threading-threshold",
94 cl::desc("Max block size to duplicate for jump threading"),
95 cl::init(6), cl::Hidden);
96
97static cl::opt<unsigned>
98ImplicationSearchThreshold(
99 "jump-threading-implication-search-threshold",
100 cl::desc("The number of predecessors to search for a stronger "
101 "condition to use to thread over a weaker condition"),
102 cl::init(3), cl::Hidden);
103
104static cl::opt<bool> PrintLVIAfterJumpThreading(
105 "print-lvi-after-jump-threading",
106 cl::desc("Print the LazyValueInfo cache after JumpThreading"), cl::init(false),
107 cl::Hidden);
108
109static cl::opt<bool> JumpThreadingFreezeSelectCond(
110 "jump-threading-freeze-select-cond",
111 cl::desc("Freeze the condition when unfolding select"), cl::init(false),
112 cl::Hidden);
113
114static cl::opt<bool> ThreadAcrossLoopHeaders(
115 "jump-threading-across-loop-headers",
116 cl::desc("Allow JumpThreading to thread across loop headers, for testing"),
117 cl::init(false), cl::Hidden);
118
119
120namespace {
121
122 /// This pass performs 'jump threading', which looks at blocks that have
123 /// multiple predecessors and multiple successors. If one or more of the
124 /// predecessors of the block can be proven to always jump to one of the
125 /// successors, we forward the edge from the predecessor to the successor by
126 /// duplicating the contents of this block.
127 ///
128 /// An example of when this can occur is code like this:
129 ///
130 /// if () { ...
131 /// X = 4;
132 /// }
133 /// if (X < 3) {
134 ///
135 /// In this case, the unconditional branch at the end of the first if can be
136 /// revectored to the false side of the second if.
137 class JumpThreading : public FunctionPass {
138 JumpThreadingPass Impl;
139
140 public:
141 static char ID; // Pass identification
142
143 JumpThreading(bool InsertFreezeWhenUnfoldingSelect = false, int T = -1)
144 : FunctionPass(ID), Impl(InsertFreezeWhenUnfoldingSelect, T) {
145 initializeJumpThreadingPass(*PassRegistry::getPassRegistry());
146 }
147
148 bool runOnFunction(Function &F) override;
149
150 void getAnalysisUsage(AnalysisUsage &AU) const override {
151 AU.addRequired<DominatorTreeWrapperPass>();
152 AU.addPreserved<DominatorTreeWrapperPass>();
153 AU.addRequired<AAResultsWrapperPass>();
154 AU.addRequired<LazyValueInfoWrapperPass>();
155 AU.addPreserved<LazyValueInfoWrapperPass>();
156 AU.addPreserved<GlobalsAAWrapperPass>();
157 AU.addRequired<TargetLibraryInfoWrapperPass>();
158 AU.addRequired<TargetTransformInfoWrapperPass>();
159 }
160
161 void releaseMemory() override { Impl.releaseMemory(); }
162 };
163
164} // end anonymous namespace
165
166char JumpThreading::ID = 0;
167
168INITIALIZE_PASS_BEGIN(JumpThreading, "jump-threading",static void *initializeJumpThreadingPassOnce(PassRegistry &
Registry) {
169 "Jump Threading", false, false)static void *initializeJumpThreadingPassOnce(PassRegistry &
Registry) {
170INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry);
171INITIALIZE_PASS_DEPENDENCY(LazyValueInfoWrapperPass)initializeLazyValueInfoWrapperPassPass(Registry);
172INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry);
173INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)initializeAAResultsWrapperPassPass(Registry);
174INITIALIZE_PASS_END(JumpThreading, "jump-threading",PassInfo *PI = new PassInfo( "Jump Threading", "jump-threading"
, &JumpThreading::ID, PassInfo::NormalCtor_t(callDefaultCtor
<JumpThreading>), false, false); Registry.registerPass(
*PI, true); return PI; } static llvm::once_flag InitializeJumpThreadingPassFlag
; void llvm::initializeJumpThreadingPass(PassRegistry &Registry
) { llvm::call_once(InitializeJumpThreadingPassFlag, initializeJumpThreadingPassOnce
, std::ref(Registry)); }
175 "Jump Threading", false, false)PassInfo *PI = new PassInfo( "Jump Threading", "jump-threading"
, &JumpThreading::ID, PassInfo::NormalCtor_t(callDefaultCtor
<JumpThreading>), false, false); Registry.registerPass(
*PI, true); return PI; } static llvm::once_flag InitializeJumpThreadingPassFlag
; void llvm::initializeJumpThreadingPass(PassRegistry &Registry
) { llvm::call_once(InitializeJumpThreadingPassFlag, initializeJumpThreadingPassOnce
, std::ref(Registry)); }
176
177// Public interface to the Jump Threading pass
178FunctionPass *llvm::createJumpThreadingPass(bool InsertFr, int Threshold) {
179 return new JumpThreading(InsertFr, Threshold);
180}
181
182JumpThreadingPass::JumpThreadingPass(bool InsertFr, int T) {
183 InsertFreezeWhenUnfoldingSelect = JumpThreadingFreezeSelectCond | InsertFr;
184 DefaultBBDupThreshold = (T == -1) ? BBDuplicateThreshold : unsigned(T);
185}
186
187// Update branch probability information according to conditional
188// branch probability. This is usually made possible for cloned branches
189// in inline instances by the context specific profile in the caller.
190// For instance,
191//
192// [Block PredBB]
193// [Branch PredBr]
194// if (t) {
195// Block A;
196// } else {
197// Block B;
198// }
199//
200// [Block BB]
201// cond = PN([true, %A], [..., %B]); // PHI node
202// [Branch CondBr]
203// if (cond) {
204// ... // P(cond == true) = 1%
205// }
206//
207// Here we know that when block A is taken, cond must be true, which means
208// P(cond == true | A) = 1
209//
210// Given that P(cond == true) = P(cond == true | A) * P(A) +
211// P(cond == true | B) * P(B)
212// we get:
213// P(cond == true ) = P(A) + P(cond == true | B) * P(B)
214//
215// which gives us:
216// P(A) is less than P(cond == true), i.e.
217// P(t == true) <= P(cond == true)
218//
219// In other words, if we know P(cond == true) is unlikely, we know
220// that P(t == true) is also unlikely.
221//
222static void updatePredecessorProfileMetadata(PHINode *PN, BasicBlock *BB) {
223 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator());
224 if (!CondBr)
225 return;
226
227 uint64_t TrueWeight, FalseWeight;
228 if (!CondBr->extractProfMetadata(TrueWeight, FalseWeight))
229 return;
230
231 if (TrueWeight + FalseWeight == 0)
232 // Zero branch_weights do not give a hint for getting branch probabilities.
233 // Technically it would result in division by zero denominator, which is
234 // TrueWeight + FalseWeight.
235 return;
236
237 // Returns the outgoing edge of the dominating predecessor block
238 // that leads to the PhiNode's incoming block:
239 auto GetPredOutEdge =
240 [](BasicBlock *IncomingBB,
241 BasicBlock *PhiBB) -> std::pair<BasicBlock *, BasicBlock *> {
242 auto *PredBB = IncomingBB;
243 auto *SuccBB = PhiBB;
244 SmallPtrSet<BasicBlock *, 16> Visited;
245 while (true) {
246 BranchInst *PredBr = dyn_cast<BranchInst>(PredBB->getTerminator());
247 if (PredBr && PredBr->isConditional())
248 return {PredBB, SuccBB};
249 Visited.insert(PredBB);
250 auto *SinglePredBB = PredBB->getSinglePredecessor();
251 if (!SinglePredBB)
252 return {nullptr, nullptr};
253
254 // Stop searching when SinglePredBB has been visited. It means we see
255 // an unreachable loop.
256 if (Visited.count(SinglePredBB))
257 return {nullptr, nullptr};
258
259 SuccBB = PredBB;
260 PredBB = SinglePredBB;
261 }
262 };
263
264 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
265 Value *PhiOpnd = PN->getIncomingValue(i);
266 ConstantInt *CI = dyn_cast<ConstantInt>(PhiOpnd);
267
268 if (!CI || !CI->getType()->isIntegerTy(1))
269 continue;
270
271 BranchProbability BP =
272 (CI->isOne() ? BranchProbability::getBranchProbability(
273 TrueWeight, TrueWeight + FalseWeight)
274 : BranchProbability::getBranchProbability(
275 FalseWeight, TrueWeight + FalseWeight));
276
277 auto PredOutEdge = GetPredOutEdge(PN->getIncomingBlock(i), BB);
278 if (!PredOutEdge.first)
279 return;
280
281 BasicBlock *PredBB = PredOutEdge.first;
282 BranchInst *PredBr = dyn_cast<BranchInst>(PredBB->getTerminator());
283 if (!PredBr)
284 return;
285
286 uint64_t PredTrueWeight, PredFalseWeight;
287 // FIXME: We currently only set the profile data when it is missing.
288 // With PGO, this can be used to refine even existing profile data with
289 // context information. This needs to be done after more performance
290 // testing.
291 if (PredBr->extractProfMetadata(PredTrueWeight, PredFalseWeight))
292 continue;
293
294 // We can not infer anything useful when BP >= 50%, because BP is the
295 // upper bound probability value.
296 if (BP >= BranchProbability(50, 100))
297 continue;
298
299 SmallVector<uint32_t, 2> Weights;
300 if (PredBr->getSuccessor(0) == PredOutEdge.second) {
301 Weights.push_back(BP.getNumerator());
302 Weights.push_back(BP.getCompl().getNumerator());
303 } else {
304 Weights.push_back(BP.getCompl().getNumerator());
305 Weights.push_back(BP.getNumerator());
306 }
307 PredBr->setMetadata(LLVMContext::MD_prof,
308 MDBuilder(PredBr->getParent()->getContext())
309 .createBranchWeights(Weights));
310 }
311}
312
313/// runOnFunction - Toplevel algorithm.
314bool JumpThreading::runOnFunction(Function &F) {
315 if (skipFunction(F))
316 return false;
317 auto TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
318 // Jump Threading has no sense for the targets with divergent CF
319 if (TTI->hasBranchDivergence())
320 return false;
321 auto TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
322 auto DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
323 auto LVI = &getAnalysis<LazyValueInfoWrapperPass>().getLVI();
324 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
325 DomTreeUpdater DTU(*DT, DomTreeUpdater::UpdateStrategy::Lazy);
326 std::unique_ptr<BlockFrequencyInfo> BFI;
327 std::unique_ptr<BranchProbabilityInfo> BPI;
328 if (F.hasProfileData()) {
329 LoopInfo LI{DominatorTree(F)};
330 BPI.reset(new BranchProbabilityInfo(F, LI, TLI));
331 BFI.reset(new BlockFrequencyInfo(F, *BPI, LI));
332 }
333
334 bool Changed = Impl.runImpl(F, TLI, TTI, LVI, AA, &DTU, F.hasProfileData(),
335 std::move(BFI), std::move(BPI));
336 if (PrintLVIAfterJumpThreading) {
337 dbgs() << "LVI for function '" << F.getName() << "':\n";
338 LVI->printLVI(F, DTU.getDomTree(), dbgs());
339 }
340 return Changed;
341}
342
343PreservedAnalyses JumpThreadingPass::run(Function &F,
344 FunctionAnalysisManager &AM) {
345 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
346 // Jump Threading has no sense for the targets with divergent CF
347 if (TTI.hasBranchDivergence())
348 return PreservedAnalyses::all();
349 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
350 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
351 auto &LVI = AM.getResult<LazyValueAnalysis>(F);
352 auto &AA = AM.getResult<AAManager>(F);
353 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
354
355 std::unique_ptr<BlockFrequencyInfo> BFI;
356 std::unique_ptr<BranchProbabilityInfo> BPI;
357 if (F.hasProfileData()) {
358 LoopInfo LI{DominatorTree(F)};
359 BPI.reset(new BranchProbabilityInfo(F, LI, &TLI));
360 BFI.reset(new BlockFrequencyInfo(F, *BPI, LI));
361 }
362
363 bool Changed = runImpl(F, &TLI, &TTI, &LVI, &AA, &DTU, F.hasProfileData(),
364 std::move(BFI), std::move(BPI));
365
366 if (PrintLVIAfterJumpThreading) {
367 dbgs() << "LVI for function '" << F.getName() << "':\n";
368 LVI.printLVI(F, DTU.getDomTree(), dbgs());
369 }
370
371 if (!Changed)
372 return PreservedAnalyses::all();
373 PreservedAnalyses PA;
374 PA.preserve<DominatorTreeAnalysis>();
375 PA.preserve<LazyValueAnalysis>();
376 return PA;
377}
378
379bool JumpThreadingPass::runImpl(Function &F, TargetLibraryInfo *TLI_,
380 TargetTransformInfo *TTI_, LazyValueInfo *LVI_,
381 AliasAnalysis *AA_, DomTreeUpdater *DTU_,
382 bool HasProfileData_,
383 std::unique_ptr<BlockFrequencyInfo> BFI_,
384 std::unique_ptr<BranchProbabilityInfo> BPI_) {
385 LLVM_DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << "Jump threading on function '"
<< F.getName() << "'\n"; } } while (false)
;
386 TLI = TLI_;
387 TTI = TTI_;
388 LVI = LVI_;
389 AA = AA_;
390 DTU = DTU_;
391 BFI.reset();
392 BPI.reset();
393 // When profile data is available, we need to update edge weights after
394 // successful jump threading, which requires both BPI and BFI being available.
395 HasProfileData = HasProfileData_;
396 auto *GuardDecl = F.getParent()->getFunction(
397 Intrinsic::getName(Intrinsic::experimental_guard));
398 HasGuards = GuardDecl && !GuardDecl->use_empty();
399 if (HasProfileData) {
400 BPI = std::move(BPI_);
401 BFI = std::move(BFI_);
402 }
403
404 // Reduce the number of instructions duplicated when optimizing strictly for
405 // size.
406 if (BBDuplicateThreshold.getNumOccurrences())
407 BBDupThreshold = BBDuplicateThreshold;
408 else if (F.hasFnAttribute(Attribute::MinSize))
409 BBDupThreshold = 3;
410 else
411 BBDupThreshold = DefaultBBDupThreshold;
412
413 // JumpThreading must not processes blocks unreachable from entry. It's a
414 // waste of compute time and can potentially lead to hangs.
415 SmallPtrSet<BasicBlock *, 16> Unreachable;
416 assert(DTU && "DTU isn't passed into JumpThreading before using it.")(static_cast <bool> (DTU && "DTU isn't passed into JumpThreading before using it."
) ? void (0) : __assert_fail ("DTU && \"DTU isn't passed into JumpThreading before using it.\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 416, __extension__
__PRETTY_FUNCTION__))
;
417 assert(DTU->hasDomTree() && "JumpThreading relies on DomTree to proceed.")(static_cast <bool> (DTU->hasDomTree() && "JumpThreading relies on DomTree to proceed."
) ? void (0) : __assert_fail ("DTU->hasDomTree() && \"JumpThreading relies on DomTree to proceed.\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 417, __extension__
__PRETTY_FUNCTION__))
;
418 DominatorTree &DT = DTU->getDomTree();
419 for (auto &BB : F)
420 if (!DT.isReachableFromEntry(&BB))
421 Unreachable.insert(&BB);
422
423 if (!ThreadAcrossLoopHeaders)
424 findLoopHeaders(F);
425
426 bool EverChanged = false;
427 bool Changed;
428 do {
429 Changed = false;
430 for (auto &BB : F) {
431 if (Unreachable.count(&BB))
432 continue;
433 while (processBlock(&BB)) // Thread all of the branches we can over BB.
434 Changed = true;
435
436 // Jump threading may have introduced redundant debug values into BB
437 // which should be removed.
438 if (Changed)
439 RemoveRedundantDbgInstrs(&BB);
440
441 // Stop processing BB if it's the entry or is now deleted. The following
442 // routines attempt to eliminate BB and locating a suitable replacement
443 // for the entry is non-trivial.
444 if (&BB == &F.getEntryBlock() || DTU->isBBPendingDeletion(&BB))
445 continue;
446
447 if (pred_empty(&BB)) {
448 // When processBlock makes BB unreachable it doesn't bother to fix up
449 // the instructions in it. We must remove BB to prevent invalid IR.
450 LLVM_DEBUG(dbgs() << " JT: Deleting dead block '" << BB.getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " JT: Deleting dead block '"
<< BB.getName() << "' with terminator: " <<
*BB.getTerminator() << '\n'; } } while (false)
451 << "' with terminator: " << *BB.getTerminator()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " JT: Deleting dead block '"
<< BB.getName() << "' with terminator: " <<
*BB.getTerminator() << '\n'; } } while (false)
452 << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " JT: Deleting dead block '"
<< BB.getName() << "' with terminator: " <<
*BB.getTerminator() << '\n'; } } while (false)
;
453 LoopHeaders.erase(&BB);
454 LVI->eraseBlock(&BB);
455 DeleteDeadBlock(&BB, DTU);
456 Changed = true;
457 continue;
458 }
459
460 // processBlock doesn't thread BBs with unconditional TIs. However, if BB
461 // is "almost empty", we attempt to merge BB with its sole successor.
462 auto *BI = dyn_cast<BranchInst>(BB.getTerminator());
463 if (BI && BI->isUnconditional()) {
464 BasicBlock *Succ = BI->getSuccessor(0);
465 if (
466 // The terminator must be the only non-phi instruction in BB.
467 BB.getFirstNonPHIOrDbg(true)->isTerminator() &&
468 // Don't alter Loop headers and latches to ensure another pass can
469 // detect and transform nested loops later.
470 !LoopHeaders.count(&BB) && !LoopHeaders.count(Succ) &&
471 TryToSimplifyUncondBranchFromEmptyBlock(&BB, DTU)) {
472 RemoveRedundantDbgInstrs(Succ);
473 // BB is valid for cleanup here because we passed in DTU. F remains
474 // BB's parent until a DTU->getDomTree() event.
475 LVI->eraseBlock(&BB);
476 Changed = true;
477 }
478 }
479 }
480 EverChanged |= Changed;
481 } while (Changed);
482
483 LoopHeaders.clear();
484 return EverChanged;
485}
486
487// Replace uses of Cond with ToVal when safe to do so. If all uses are
488// replaced, we can remove Cond. We cannot blindly replace all uses of Cond
489// because we may incorrectly replace uses when guards/assumes are uses of
490// of `Cond` and we used the guards/assume to reason about the `Cond` value
491// at the end of block. RAUW unconditionally replaces all uses
492// including the guards/assumes themselves and the uses before the
493// guard/assume.
494static void replaceFoldableUses(Instruction *Cond, Value *ToVal) {
495 assert(Cond->getType() == ToVal->getType())(static_cast <bool> (Cond->getType() == ToVal->getType
()) ? void (0) : __assert_fail ("Cond->getType() == ToVal->getType()"
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 495, __extension__
__PRETTY_FUNCTION__))
;
496 auto *BB = Cond->getParent();
497 // We can unconditionally replace all uses in non-local blocks (i.e. uses
498 // strictly dominated by BB), since LVI information is true from the
499 // terminator of BB.
500 replaceNonLocalUsesWith(Cond, ToVal);
501 for (Instruction &I : reverse(*BB)) {
502 // Reached the Cond whose uses we are trying to replace, so there are no
503 // more uses.
504 if (&I == Cond)
505 break;
506 // We only replace uses in instructions that are guaranteed to reach the end
507 // of BB, where we know Cond is ToVal.
508 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
509 break;
510 I.replaceUsesOfWith(Cond, ToVal);
511 }
512 if (Cond->use_empty() && !Cond->mayHaveSideEffects())
513 Cond->eraseFromParent();
514}
515
516/// Return the cost of duplicating a piece of this block from first non-phi
517/// and before StopAt instruction to thread across it. Stop scanning the block
518/// when exceeding the threshold. If duplication is impossible, returns ~0U.
519static unsigned getJumpThreadDuplicationCost(const TargetTransformInfo *TTI,
520 BasicBlock *BB,
521 Instruction *StopAt,
522 unsigned Threshold) {
523 assert(StopAt->getParent() == BB && "Not an instruction from proper BB?")(static_cast <bool> (StopAt->getParent() == BB &&
"Not an instruction from proper BB?") ? void (0) : __assert_fail
("StopAt->getParent() == BB && \"Not an instruction from proper BB?\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 523, __extension__
__PRETTY_FUNCTION__))
;
524 /// Ignore PHI nodes, these will be flattened when duplication happens.
525 BasicBlock::const_iterator I(BB->getFirstNonPHI());
526
527 // FIXME: THREADING will delete values that are just used to compute the
528 // branch, so they shouldn't count against the duplication cost.
529
530 unsigned Bonus = 0;
531 if (BB->getTerminator() == StopAt) {
532 // Threading through a switch statement is particularly profitable. If this
533 // block ends in a switch, decrease its cost to make it more likely to
534 // happen.
535 if (isa<SwitchInst>(StopAt))
536 Bonus = 6;
537
538 // The same holds for indirect branches, but slightly more so.
539 if (isa<IndirectBrInst>(StopAt))
540 Bonus = 8;
541 }
542
543 // Bump the threshold up so the early exit from the loop doesn't skip the
544 // terminator-based Size adjustment at the end.
545 Threshold += Bonus;
546
547 // Sum up the cost of each instruction until we get to the terminator. Don't
548 // include the terminator because the copy won't include it.
549 unsigned Size = 0;
550 for (; &*I != StopAt; ++I) {
551
552 // Stop scanning the block if we've reached the threshold.
553 if (Size > Threshold)
554 return Size;
555
556 // Bail out if this instruction gives back a token type, it is not possible
557 // to duplicate it if it is used outside this BB.
558 if (I->getType()->isTokenTy() && I->isUsedOutsideOfBlock(BB))
559 return ~0U;
560
561 // Blocks with NoDuplicate are modelled as having infinite cost, so they
562 // are never duplicated.
563 if (const CallInst *CI = dyn_cast<CallInst>(I))
564 if (CI->cannotDuplicate() || CI->isConvergent())
565 return ~0U;
566
567 if (TTI->getUserCost(&*I, TargetTransformInfo::TCK_SizeAndLatency)
568 == TargetTransformInfo::TCC_Free)
569 continue;
570
571 // All other instructions count for at least one unit.
572 ++Size;
573
574 // Calls are more expensive. If they are non-intrinsic calls, we model them
575 // as having cost of 4. If they are a non-vector intrinsic, we model them
576 // as having cost of 2 total, and if they are a vector intrinsic, we model
577 // them as having cost 1.
578 if (const CallInst *CI = dyn_cast<CallInst>(I)) {
579 if (!isa<IntrinsicInst>(CI))
580 Size += 3;
581 else if (!CI->getType()->isVectorTy())
582 Size += 1;
583 }
584 }
585
586 return Size > Bonus ? Size - Bonus : 0;
587}
588
589/// findLoopHeaders - We do not want jump threading to turn proper loop
590/// structures into irreducible loops. Doing this breaks up the loop nesting
591/// hierarchy and pessimizes later transformations. To prevent this from
592/// happening, we first have to find the loop headers. Here we approximate this
593/// by finding targets of backedges in the CFG.
594///
595/// Note that there definitely are cases when we want to allow threading of
596/// edges across a loop header. For example, threading a jump from outside the
597/// loop (the preheader) to an exit block of the loop is definitely profitable.
598/// It is also almost always profitable to thread backedges from within the loop
599/// to exit blocks, and is often profitable to thread backedges to other blocks
600/// within the loop (forming a nested loop). This simple analysis is not rich
601/// enough to track all of these properties and keep it up-to-date as the CFG
602/// mutates, so we don't allow any of these transformations.
603void JumpThreadingPass::findLoopHeaders(Function &F) {
604 SmallVector<std::pair<const BasicBlock*,const BasicBlock*>, 32> Edges;
605 FindFunctionBackedges(F, Edges);
606
607 for (const auto &Edge : Edges)
608 LoopHeaders.insert(Edge.second);
609}
610
611/// getKnownConstant - Helper method to determine if we can thread over a
612/// terminator with the given value as its condition, and if so what value to
613/// use for that. What kind of value this is depends on whether we want an
614/// integer or a block address, but an undef is always accepted.
615/// Returns null if Val is null or not an appropriate constant.
616static Constant *getKnownConstant(Value *Val, ConstantPreference Preference) {
617 if (!Val)
618 return nullptr;
619
620 // Undef is "known" enough.
621 if (UndefValue *U = dyn_cast<UndefValue>(Val))
622 return U;
623
624 if (Preference == WantBlockAddress)
625 return dyn_cast<BlockAddress>(Val->stripPointerCasts());
626
627 return dyn_cast<ConstantInt>(Val);
628}
629
630/// computeValueKnownInPredecessors - Given a basic block BB and a value V, see
631/// if we can infer that the value is a known ConstantInt/BlockAddress or undef
632/// in any of our predecessors. If so, return the known list of value and pred
633/// BB in the result vector.
634///
635/// This returns true if there were any known values.
636bool JumpThreadingPass::computeValueKnownInPredecessorsImpl(
637 Value *V, BasicBlock *BB, PredValueInfo &Result,
638 ConstantPreference Preference, DenseSet<Value *> &RecursionSet,
639 Instruction *CxtI) {
640 // This method walks up use-def chains recursively. Because of this, we could
641 // get into an infinite loop going around loops in the use-def chain. To
642 // prevent this, keep track of what (value, block) pairs we've already visited
643 // and terminate the search if we loop back to them
644 if (!RecursionSet.insert(V).second)
645 return false;
646
647 // If V is a constant, then it is known in all predecessors.
648 if (Constant *KC = getKnownConstant(V, Preference)) {
649 for (BasicBlock *Pred : predecessors(BB))
650 Result.emplace_back(KC, Pred);
651
652 return !Result.empty();
653 }
654
655 // If V is a non-instruction value, or an instruction in a different block,
656 // then it can't be derived from a PHI.
657 Instruction *I = dyn_cast<Instruction>(V);
658 if (!I || I->getParent() != BB) {
659
660 // Okay, if this is a live-in value, see if it has a known value at the end
661 // of any of our predecessors.
662 //
663 // FIXME: This should be an edge property, not a block end property.
664 /// TODO: Per PR2563, we could infer value range information about a
665 /// predecessor based on its terminator.
666 //
667 // FIXME: change this to use the more-rich 'getPredicateOnEdge' method if
668 // "I" is a non-local compare-with-a-constant instruction. This would be
669 // able to handle value inequalities better, for example if the compare is
670 // "X < 4" and "X < 3" is known true but "X < 4" itself is not available.
671 // Perhaps getConstantOnEdge should be smart enough to do this?
672 for (BasicBlock *P : predecessors(BB)) {
673 // If the value is known by LazyValueInfo to be a constant in a
674 // predecessor, use that information to try to thread this block.
675 Constant *PredCst = LVI->getConstantOnEdge(V, P, BB, CxtI);
676 if (Constant *KC = getKnownConstant(PredCst, Preference))
677 Result.emplace_back(KC, P);
678 }
679
680 return !Result.empty();
681 }
682
683 /// If I is a PHI node, then we know the incoming values for any constants.
684 if (PHINode *PN = dyn_cast<PHINode>(I)) {
685 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
686 Value *InVal = PN->getIncomingValue(i);
687 if (Constant *KC = getKnownConstant(InVal, Preference)) {
688 Result.emplace_back(KC, PN->getIncomingBlock(i));
689 } else {
690 Constant *CI = LVI->getConstantOnEdge(InVal,
691 PN->getIncomingBlock(i),
692 BB, CxtI);
693 if (Constant *KC = getKnownConstant(CI, Preference))
694 Result.emplace_back(KC, PN->getIncomingBlock(i));
695 }
696 }
697
698 return !Result.empty();
699 }
700
701 // Handle Cast instructions.
702 if (CastInst *CI = dyn_cast<CastInst>(I)) {
703 Value *Source = CI->getOperand(0);
704 computeValueKnownInPredecessorsImpl(Source, BB, Result, Preference,
705 RecursionSet, CxtI);
706 if (Result.empty())
707 return false;
708
709 // Convert the known values.
710 for (auto &R : Result)
711 R.first = ConstantExpr::getCast(CI->getOpcode(), R.first, CI->getType());
712
713 return true;
714 }
715
716 if (FreezeInst *FI = dyn_cast<FreezeInst>(I)) {
717 Value *Source = FI->getOperand(0);
718 computeValueKnownInPredecessorsImpl(Source, BB, Result, Preference,
719 RecursionSet, CxtI);
720
721 erase_if(Result, [](auto &Pair) {
722 return !isGuaranteedNotToBeUndefOrPoison(Pair.first);
723 });
724
725 return !Result.empty();
726 }
727
728 // Handle some boolean conditions.
729 if (I->getType()->getPrimitiveSizeInBits() == 1) {
730 using namespace PatternMatch;
731 if (Preference != WantInteger)
732 return false;
733 // X | true -> true
734 // X & false -> false
735 Value *Op0, *Op1;
736 if (match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))) ||
737 match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) {
738 PredValueInfoTy LHSVals, RHSVals;
739
740 computeValueKnownInPredecessorsImpl(Op0, BB, LHSVals, WantInteger,
741 RecursionSet, CxtI);
742 computeValueKnownInPredecessorsImpl(Op1, BB, RHSVals, WantInteger,
743 RecursionSet, CxtI);
744
745 if (LHSVals.empty() && RHSVals.empty())
746 return false;
747
748 ConstantInt *InterestingVal;
749 if (match(I, m_LogicalOr()))
750 InterestingVal = ConstantInt::getTrue(I->getContext());
751 else
752 InterestingVal = ConstantInt::getFalse(I->getContext());
753
754 SmallPtrSet<BasicBlock*, 4> LHSKnownBBs;
755
756 // Scan for the sentinel. If we find an undef, force it to the
757 // interesting value: x|undef -> true and x&undef -> false.
758 for (const auto &LHSVal : LHSVals)
759 if (LHSVal.first == InterestingVal || isa<UndefValue>(LHSVal.first)) {
760 Result.emplace_back(InterestingVal, LHSVal.second);
761 LHSKnownBBs.insert(LHSVal.second);
762 }
763 for (const auto &RHSVal : RHSVals)
764 if (RHSVal.first == InterestingVal || isa<UndefValue>(RHSVal.first)) {
765 // If we already inferred a value for this block on the LHS, don't
766 // re-add it.
767 if (!LHSKnownBBs.count(RHSVal.second))
768 Result.emplace_back(InterestingVal, RHSVal.second);
769 }
770
771 return !Result.empty();
772 }
773
774 // Handle the NOT form of XOR.
775 if (I->getOpcode() == Instruction::Xor &&
776 isa<ConstantInt>(I->getOperand(1)) &&
777 cast<ConstantInt>(I->getOperand(1))->isOne()) {
778 computeValueKnownInPredecessorsImpl(I->getOperand(0), BB, Result,
779 WantInteger, RecursionSet, CxtI);
780 if (Result.empty())
781 return false;
782
783 // Invert the known values.
784 for (auto &R : Result)
785 R.first = ConstantExpr::getNot(R.first);
786
787 return true;
788 }
789
790 // Try to simplify some other binary operator values.
791 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
792 if (Preference != WantInteger)
793 return false;
794 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) {
795 PredValueInfoTy LHSVals;
796 computeValueKnownInPredecessorsImpl(BO->getOperand(0), BB, LHSVals,
797 WantInteger, RecursionSet, CxtI);
798
799 // Try to use constant folding to simplify the binary operator.
800 for (const auto &LHSVal : LHSVals) {
801 Constant *V = LHSVal.first;
802 Constant *Folded = ConstantExpr::get(BO->getOpcode(), V, CI);
803
804 if (Constant *KC = getKnownConstant(Folded, WantInteger))
805 Result.emplace_back(KC, LHSVal.second);
806 }
807 }
808
809 return !Result.empty();
810 }
811
812 // Handle compare with phi operand, where the PHI is defined in this block.
813 if (CmpInst *Cmp = dyn_cast<CmpInst>(I)) {
814 if (Preference != WantInteger)
815 return false;
816 Type *CmpType = Cmp->getType();
817 Value *CmpLHS = Cmp->getOperand(0);
818 Value *CmpRHS = Cmp->getOperand(1);
819 CmpInst::Predicate Pred = Cmp->getPredicate();
820
821 PHINode *PN = dyn_cast<PHINode>(CmpLHS);
822 if (!PN)
823 PN = dyn_cast<PHINode>(CmpRHS);
824 if (PN && PN->getParent() == BB) {
825 const DataLayout &DL = PN->getModule()->getDataLayout();
826 // We can do this simplification if any comparisons fold to true or false.
827 // See if any do.
828 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
829 BasicBlock *PredBB = PN->getIncomingBlock(i);
830 Value *LHS, *RHS;
831 if (PN == CmpLHS) {
832 LHS = PN->getIncomingValue(i);
833 RHS = CmpRHS->DoPHITranslation(BB, PredBB);
834 } else {
835 LHS = CmpLHS->DoPHITranslation(BB, PredBB);
836 RHS = PN->getIncomingValue(i);
837 }
838 Value *Res = SimplifyCmpInst(Pred, LHS, RHS, {DL});
839 if (!Res) {
840 if (!isa<Constant>(RHS))
841 continue;
842
843 // getPredicateOnEdge call will make no sense if LHS is defined in BB.
844 auto LHSInst = dyn_cast<Instruction>(LHS);
845 if (LHSInst && LHSInst->getParent() == BB)
846 continue;
847
848 LazyValueInfo::Tristate
849 ResT = LVI->getPredicateOnEdge(Pred, LHS,
850 cast<Constant>(RHS), PredBB, BB,
851 CxtI ? CxtI : Cmp);
852 if (ResT == LazyValueInfo::Unknown)
853 continue;
854 Res = ConstantInt::get(Type::getInt1Ty(LHS->getContext()), ResT);
855 }
856
857 if (Constant *KC = getKnownConstant(Res, WantInteger))
858 Result.emplace_back(KC, PredBB);
859 }
860
861 return !Result.empty();
862 }
863
864 // If comparing a live-in value against a constant, see if we know the
865 // live-in value on any predecessors.
866 if (isa<Constant>(CmpRHS) && !CmpType->isVectorTy()) {
867 Constant *CmpConst = cast<Constant>(CmpRHS);
868
869 if (!isa<Instruction>(CmpLHS) ||
870 cast<Instruction>(CmpLHS)->getParent() != BB) {
871 for (BasicBlock *P : predecessors(BB)) {
872 // If the value is known by LazyValueInfo to be a constant in a
873 // predecessor, use that information to try to thread this block.
874 LazyValueInfo::Tristate Res =
875 LVI->getPredicateOnEdge(Pred, CmpLHS,
876 CmpConst, P, BB, CxtI ? CxtI : Cmp);
877 if (Res == LazyValueInfo::Unknown)
878 continue;
879
880 Constant *ResC = ConstantInt::get(CmpType, Res);
881 Result.emplace_back(ResC, P);
882 }
883
884 return !Result.empty();
885 }
886
887 // InstCombine can fold some forms of constant range checks into
888 // (icmp (add (x, C1)), C2). See if we have we have such a thing with
889 // x as a live-in.
890 {
891 using namespace PatternMatch;
892
893 Value *AddLHS;
894 ConstantInt *AddConst;
895 if (isa<ConstantInt>(CmpConst) &&
896 match(CmpLHS, m_Add(m_Value(AddLHS), m_ConstantInt(AddConst)))) {
897 if (!isa<Instruction>(AddLHS) ||
898 cast<Instruction>(AddLHS)->getParent() != BB) {
899 for (BasicBlock *P : predecessors(BB)) {
900 // If the value is known by LazyValueInfo to be a ConstantRange in
901 // a predecessor, use that information to try to thread this
902 // block.
903 ConstantRange CR = LVI->getConstantRangeOnEdge(
904 AddLHS, P, BB, CxtI ? CxtI : cast<Instruction>(CmpLHS));
905 // Propagate the range through the addition.
906 CR = CR.add(AddConst->getValue());
907
908 // Get the range where the compare returns true.
909 ConstantRange CmpRange = ConstantRange::makeExactICmpRegion(
910 Pred, cast<ConstantInt>(CmpConst)->getValue());
911
912 Constant *ResC;
913 if (CmpRange.contains(CR))
914 ResC = ConstantInt::getTrue(CmpType);
915 else if (CmpRange.inverse().contains(CR))
916 ResC = ConstantInt::getFalse(CmpType);
917 else
918 continue;
919
920 Result.emplace_back(ResC, P);
921 }
922
923 return !Result.empty();
924 }
925 }
926 }
927
928 // Try to find a constant value for the LHS of a comparison,
929 // and evaluate it statically if we can.
930 PredValueInfoTy LHSVals;
931 computeValueKnownInPredecessorsImpl(I->getOperand(0), BB, LHSVals,
932 WantInteger, RecursionSet, CxtI);
933
934 for (const auto &LHSVal : LHSVals) {
935 Constant *V = LHSVal.first;
936 Constant *Folded = ConstantExpr::getCompare(Pred, V, CmpConst);
937 if (Constant *KC = getKnownConstant(Folded, WantInteger))
938 Result.emplace_back(KC, LHSVal.second);
939 }
940
941 return !Result.empty();
942 }
943 }
944
945 if (SelectInst *SI = dyn_cast<SelectInst>(I)) {
946 // Handle select instructions where at least one operand is a known constant
947 // and we can figure out the condition value for any predecessor block.
948 Constant *TrueVal = getKnownConstant(SI->getTrueValue(), Preference);
949 Constant *FalseVal = getKnownConstant(SI->getFalseValue(), Preference);
950 PredValueInfoTy Conds;
951 if ((TrueVal || FalseVal) &&
952 computeValueKnownInPredecessorsImpl(SI->getCondition(), BB, Conds,
953 WantInteger, RecursionSet, CxtI)) {
954 for (auto &C : Conds) {
955 Constant *Cond = C.first;
956
957 // Figure out what value to use for the condition.
958 bool KnownCond;
959 if (ConstantInt *CI = dyn_cast<ConstantInt>(Cond)) {
960 // A known boolean.
961 KnownCond = CI->isOne();
962 } else {
963 assert(isa<UndefValue>(Cond) && "Unexpected condition value")(static_cast <bool> (isa<UndefValue>(Cond) &&
"Unexpected condition value") ? void (0) : __assert_fail ("isa<UndefValue>(Cond) && \"Unexpected condition value\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 963, __extension__
__PRETTY_FUNCTION__))
;
964 // Either operand will do, so be sure to pick the one that's a known
965 // constant.
966 // FIXME: Do this more cleverly if both values are known constants?
967 KnownCond = (TrueVal != nullptr);
968 }
969
970 // See if the select has a known constant value for this predecessor.
971 if (Constant *Val = KnownCond ? TrueVal : FalseVal)
972 Result.emplace_back(Val, C.second);
973 }
974
975 return !Result.empty();
976 }
977 }
978
979 // If all else fails, see if LVI can figure out a constant value for us.
980 assert(CxtI->getParent() == BB && "CxtI should be in BB")(static_cast <bool> (CxtI->getParent() == BB &&
"CxtI should be in BB") ? void (0) : __assert_fail ("CxtI->getParent() == BB && \"CxtI should be in BB\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 980, __extension__
__PRETTY_FUNCTION__))
;
981 Constant *CI = LVI->getConstant(V, CxtI);
982 if (Constant *KC = getKnownConstant(CI, Preference)) {
983 for (BasicBlock *Pred : predecessors(BB))
984 Result.emplace_back(KC, Pred);
985 }
986
987 return !Result.empty();
988}
989
990/// GetBestDestForBranchOnUndef - If we determine that the specified block ends
991/// in an undefined jump, decide which block is best to revector to.
992///
993/// Since we can pick an arbitrary destination, we pick the successor with the
994/// fewest predecessors. This should reduce the in-degree of the others.
995static unsigned getBestDestForJumpOnUndef(BasicBlock *BB) {
996 Instruction *BBTerm = BB->getTerminator();
997 unsigned MinSucc = 0;
998 BasicBlock *TestBB = BBTerm->getSuccessor(MinSucc);
999 // Compute the successor with the minimum number of predecessors.
1000 unsigned MinNumPreds = pred_size(TestBB);
1001 for (unsigned i = 1, e = BBTerm->getNumSuccessors(); i != e; ++i) {
1002 TestBB = BBTerm->getSuccessor(i);
1003 unsigned NumPreds = pred_size(TestBB);
1004 if (NumPreds < MinNumPreds) {
1005 MinSucc = i;
1006 MinNumPreds = NumPreds;
1007 }
1008 }
1009
1010 return MinSucc;
1011}
1012
1013static bool hasAddressTakenAndUsed(BasicBlock *BB) {
1014 if (!BB->hasAddressTaken()) return false;
1015
1016 // If the block has its address taken, it may be a tree of dead constants
1017 // hanging off of it. These shouldn't keep the block alive.
1018 BlockAddress *BA = BlockAddress::get(BB);
1019 BA->removeDeadConstantUsers();
1020 return !BA->use_empty();
1021}
1022
1023/// processBlock - If there are any predecessors whose control can be threaded
1024/// through to a successor, transform them now.
1025bool JumpThreadingPass::processBlock(BasicBlock *BB) {
1026 // If the block is trivially dead, just return and let the caller nuke it.
1027 // This simplifies other transformations.
1028 if (DTU->isBBPendingDeletion(BB) ||
1029 (pred_empty(BB) && BB != &BB->getParent()->getEntryBlock()))
1030 return false;
1031
1032 // If this block has a single predecessor, and if that pred has a single
1033 // successor, merge the blocks. This encourages recursive jump threading
1034 // because now the condition in this block can be threaded through
1035 // predecessors of our predecessor block.
1036 if (maybeMergeBasicBlockIntoOnlyPred(BB))
1037 return true;
1038
1039 if (tryToUnfoldSelectInCurrBB(BB))
1040 return true;
1041
1042 // Look if we can propagate guards to predecessors.
1043 if (HasGuards && processGuards(BB))
1044 return true;
1045
1046 // What kind of constant we're looking for.
1047 ConstantPreference Preference = WantInteger;
1048
1049 // Look to see if the terminator is a conditional branch, switch or indirect
1050 // branch, if not we can't thread it.
1051 Value *Condition;
1052 Instruction *Terminator = BB->getTerminator();
1053 if (BranchInst *BI = dyn_cast<BranchInst>(Terminator)) {
1054 // Can't thread an unconditional jump.
1055 if (BI->isUnconditional()) return false;
1056 Condition = BI->getCondition();
1057 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(Terminator)) {
1058 Condition = SI->getCondition();
1059 } else if (IndirectBrInst *IB = dyn_cast<IndirectBrInst>(Terminator)) {
1060 // Can't thread indirect branch with no successors.
1061 if (IB->getNumSuccessors() == 0) return false;
1062 Condition = IB->getAddress()->stripPointerCasts();
1063 Preference = WantBlockAddress;
1064 } else {
1065 return false; // Must be an invoke or callbr.
1066 }
1067
1068 // Keep track if we constant folded the condition in this invocation.
1069 bool ConstantFolded = false;
1070
1071 // Run constant folding to see if we can reduce the condition to a simple
1072 // constant.
1073 if (Instruction *I = dyn_cast<Instruction>(Condition)) {
1074 Value *SimpleVal =
1075 ConstantFoldInstruction(I, BB->getModule()->getDataLayout(), TLI);
1076 if (SimpleVal) {
1077 I->replaceAllUsesWith(SimpleVal);
1078 if (isInstructionTriviallyDead(I, TLI))
1079 I->eraseFromParent();
1080 Condition = SimpleVal;
1081 ConstantFolded = true;
1082 }
1083 }
1084
1085 // If the terminator is branching on an undef or freeze undef, we can pick any
1086 // of the successors to branch to. Let getBestDestForJumpOnUndef decide.
1087 auto *FI = dyn_cast<FreezeInst>(Condition);
1088 if (isa<UndefValue>(Condition) ||
1089 (FI && isa<UndefValue>(FI->getOperand(0)) && FI->hasOneUse())) {
1090 unsigned BestSucc = getBestDestForJumpOnUndef(BB);
1091 std::vector<DominatorTree::UpdateType> Updates;
1092
1093 // Fold the branch/switch.
1094 Instruction *BBTerm = BB->getTerminator();
1095 Updates.reserve(BBTerm->getNumSuccessors());
1096 for (unsigned i = 0, e = BBTerm->getNumSuccessors(); i != e; ++i) {
1097 if (i == BestSucc) continue;
1098 BasicBlock *Succ = BBTerm->getSuccessor(i);
1099 Succ->removePredecessor(BB, true);
1100 Updates.push_back({DominatorTree::Delete, BB, Succ});
1101 }
1102
1103 LLVM_DEBUG(dbgs() << " In block '" << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " In block '" <<
BB->getName() << "' folding undef terminator: " <<
*BBTerm << '\n'; } } while (false)
1104 << "' folding undef terminator: " << *BBTerm << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " In block '" <<
BB->getName() << "' folding undef terminator: " <<
*BBTerm << '\n'; } } while (false)
;
1105 BranchInst::Create(BBTerm->getSuccessor(BestSucc), BBTerm);
1106 ++NumFolds;
1107 BBTerm->eraseFromParent();
1108 DTU->applyUpdatesPermissive(Updates);
1109 if (FI)
1110 FI->eraseFromParent();
1111 return true;
1112 }
1113
1114 // If the terminator of this block is branching on a constant, simplify the
1115 // terminator to an unconditional branch. This can occur due to threading in
1116 // other blocks.
1117 if (getKnownConstant(Condition, Preference)) {
1118 LLVM_DEBUG(dbgs() << " In block '" << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " In block '" <<
BB->getName() << "' folding terminator: " << *
BB->getTerminator() << '\n'; } } while (false)
1119 << "' folding terminator: " << *BB->getTerminator()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " In block '" <<
BB->getName() << "' folding terminator: " << *
BB->getTerminator() << '\n'; } } while (false)
1120 << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " In block '" <<
BB->getName() << "' folding terminator: " << *
BB->getTerminator() << '\n'; } } while (false)
;
1121 ++NumFolds;
1122 ConstantFoldTerminator(BB, true, nullptr, DTU);
1123 if (HasProfileData)
1124 BPI->eraseBlock(BB);
1125 return true;
1126 }
1127
1128 Instruction *CondInst = dyn_cast<Instruction>(Condition);
1129
1130 // All the rest of our checks depend on the condition being an instruction.
1131 if (!CondInst) {
1132 // FIXME: Unify this with code below.
1133 if (processThreadableEdges(Condition, BB, Preference, Terminator))
1134 return true;
1135 return ConstantFolded;
1136 }
1137
1138 if (CmpInst *CondCmp = dyn_cast<CmpInst>(CondInst)) {
1139 // If we're branching on a conditional, LVI might be able to determine
1140 // it's value at the branch instruction. We only handle comparisons
1141 // against a constant at this time.
1142 // TODO: This should be extended to handle switches as well.
1143 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator());
1144 Constant *CondConst = dyn_cast<Constant>(CondCmp->getOperand(1));
1145 if (CondBr && CondConst) {
1146 // We should have returned as soon as we turn a conditional branch to
1147 // unconditional. Because its no longer interesting as far as jump
1148 // threading is concerned.
1149 assert(CondBr->isConditional() && "Threading on unconditional terminator")(static_cast <bool> (CondBr->isConditional() &&
"Threading on unconditional terminator") ? void (0) : __assert_fail
("CondBr->isConditional() && \"Threading on unconditional terminator\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 1149, __extension__
__PRETTY_FUNCTION__))
;
1150
1151 LazyValueInfo::Tristate Ret =
1152 LVI->getPredicateAt(CondCmp->getPredicate(), CondCmp->getOperand(0),
1153 CondConst, CondBr, /*UseBlockValue=*/false);
1154 if (Ret != LazyValueInfo::Unknown) {
1155 unsigned ToRemove = Ret == LazyValueInfo::True ? 1 : 0;
1156 unsigned ToKeep = Ret == LazyValueInfo::True ? 0 : 1;
1157 BasicBlock *ToRemoveSucc = CondBr->getSuccessor(ToRemove);
1158 ToRemoveSucc->removePredecessor(BB, true);
1159 BranchInst *UncondBr =
1160 BranchInst::Create(CondBr->getSuccessor(ToKeep), CondBr);
1161 UncondBr->setDebugLoc(CondBr->getDebugLoc());
1162 ++NumFolds;
1163 CondBr->eraseFromParent();
1164 if (CondCmp->use_empty())
1165 CondCmp->eraseFromParent();
1166 // We can safely replace *some* uses of the CondInst if it has
1167 // exactly one value as returned by LVI. RAUW is incorrect in the
1168 // presence of guards and assumes, that have the `Cond` as the use. This
1169 // is because we use the guards/assume to reason about the `Cond` value
1170 // at the end of block, but RAUW unconditionally replaces all uses
1171 // including the guards/assumes themselves and the uses before the
1172 // guard/assume.
1173 else if (CondCmp->getParent() == BB) {
1174 auto *CI = Ret == LazyValueInfo::True ?
1175 ConstantInt::getTrue(CondCmp->getType()) :
1176 ConstantInt::getFalse(CondCmp->getType());
1177 replaceFoldableUses(CondCmp, CI);
1178 }
1179 DTU->applyUpdatesPermissive(
1180 {{DominatorTree::Delete, BB, ToRemoveSucc}});
1181 if (HasProfileData)
1182 BPI->eraseBlock(BB);
1183 return true;
1184 }
1185
1186 // We did not manage to simplify this branch, try to see whether
1187 // CondCmp depends on a known phi-select pattern.
1188 if (tryToUnfoldSelect(CondCmp, BB))
1189 return true;
1190 }
1191 }
1192
1193 if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator()))
1194 if (tryToUnfoldSelect(SI, BB))
1195 return true;
1196
1197 // Check for some cases that are worth simplifying. Right now we want to look
1198 // for loads that are used by a switch or by the condition for the branch. If
1199 // we see one, check to see if it's partially redundant. If so, insert a PHI
1200 // which can then be used to thread the values.
1201 Value *SimplifyValue = CondInst;
1202
1203 if (auto *FI = dyn_cast<FreezeInst>(SimplifyValue))
1204 // Look into freeze's operand
1205 SimplifyValue = FI->getOperand(0);
1206
1207 if (CmpInst *CondCmp = dyn_cast<CmpInst>(SimplifyValue))
1208 if (isa<Constant>(CondCmp->getOperand(1)))
1209 SimplifyValue = CondCmp->getOperand(0);
1210
1211 // TODO: There are other places where load PRE would be profitable, such as
1212 // more complex comparisons.
1213 if (LoadInst *LoadI = dyn_cast<LoadInst>(SimplifyValue))
1214 if (simplifyPartiallyRedundantLoad(LoadI))
1215 return true;
1216
1217 // Before threading, try to propagate profile data backwards:
1218 if (PHINode *PN = dyn_cast<PHINode>(CondInst))
1219 if (PN->getParent() == BB && isa<BranchInst>(BB->getTerminator()))
1220 updatePredecessorProfileMetadata(PN, BB);
1221
1222 // Handle a variety of cases where we are branching on something derived from
1223 // a PHI node in the current block. If we can prove that any predecessors
1224 // compute a predictable value based on a PHI node, thread those predecessors.
1225 if (processThreadableEdges(CondInst, BB, Preference, Terminator))
1226 return true;
1227
1228 // If this is an otherwise-unfoldable branch on a phi node or freeze(phi) in
1229 // the current block, see if we can simplify.
1230 PHINode *PN = dyn_cast<PHINode>(
1231 isa<FreezeInst>(CondInst) ? cast<FreezeInst>(CondInst)->getOperand(0)
1232 : CondInst);
1233
1234 if (PN && PN->getParent() == BB && isa<BranchInst>(BB->getTerminator()))
1235 return processBranchOnPHI(PN);
1236
1237 // If this is an otherwise-unfoldable branch on a XOR, see if we can simplify.
1238 if (CondInst->getOpcode() == Instruction::Xor &&
1239 CondInst->getParent() == BB && isa<BranchInst>(BB->getTerminator()))
1240 return processBranchOnXOR(cast<BinaryOperator>(CondInst));
1241
1242 // Search for a stronger dominating condition that can be used to simplify a
1243 // conditional branch leaving BB.
1244 if (processImpliedCondition(BB))
1245 return true;
1246
1247 return false;
1248}
1249
1250bool JumpThreadingPass::processImpliedCondition(BasicBlock *BB) {
1251 auto *BI = dyn_cast<BranchInst>(BB->getTerminator());
1252 if (!BI || !BI->isConditional())
1253 return false;
1254
1255 Value *Cond = BI->getCondition();
1256 BasicBlock *CurrentBB = BB;
1257 BasicBlock *CurrentPred = BB->getSinglePredecessor();
1258 unsigned Iter = 0;
1259
1260 auto &DL = BB->getModule()->getDataLayout();
1261
1262 while (CurrentPred && Iter++ < ImplicationSearchThreshold) {
1263 auto *PBI = dyn_cast<BranchInst>(CurrentPred->getTerminator());
1264 if (!PBI || !PBI->isConditional())
1265 return false;
1266 if (PBI->getSuccessor(0) != CurrentBB && PBI->getSuccessor(1) != CurrentBB)
1267 return false;
1268
1269 bool CondIsTrue = PBI->getSuccessor(0) == CurrentBB;
1270 Optional<bool> Implication =
1271 isImpliedCondition(PBI->getCondition(), Cond, DL, CondIsTrue);
1272 if (Implication) {
1273 BasicBlock *KeepSucc = BI->getSuccessor(*Implication ? 0 : 1);
1274 BasicBlock *RemoveSucc = BI->getSuccessor(*Implication ? 1 : 0);
1275 RemoveSucc->removePredecessor(BB);
1276 BranchInst *UncondBI = BranchInst::Create(KeepSucc, BI);
1277 UncondBI->setDebugLoc(BI->getDebugLoc());
1278 ++NumFolds;
1279 BI->eraseFromParent();
1280 DTU->applyUpdatesPermissive({{DominatorTree::Delete, BB, RemoveSucc}});
1281 if (HasProfileData)
1282 BPI->eraseBlock(BB);
1283 return true;
1284 }
1285 CurrentBB = CurrentPred;
1286 CurrentPred = CurrentBB->getSinglePredecessor();
1287 }
1288
1289 return false;
1290}
1291
1292/// Return true if Op is an instruction defined in the given block.
1293static bool isOpDefinedInBlock(Value *Op, BasicBlock *BB) {
1294 if (Instruction *OpInst = dyn_cast<Instruction>(Op))
13
Assuming 'OpInst' is null
14
Taking false branch
1295 if (OpInst->getParent() == BB)
1296 return true;
1297 return false;
15
Returning zero, which participates in a condition later
1298}
1299
1300/// simplifyPartiallyRedundantLoad - If LoadI is an obviously partially
1301/// redundant load instruction, eliminate it by replacing it with a PHI node.
1302/// This is an important optimization that encourages jump threading, and needs
1303/// to be run interlaced with other jump threading tasks.
1304bool JumpThreadingPass::simplifyPartiallyRedundantLoad(LoadInst *LoadI) {
1305 // Don't hack volatile and ordered loads.
1306 if (!LoadI->isUnordered()) return false;
1
Calling 'LoadInst::isUnordered'
6
Returning from 'LoadInst::isUnordered'
7
Taking false branch
1307
1308 // If the load is defined in a block with exactly one predecessor, it can't be
1309 // partially redundant.
1310 BasicBlock *LoadBB = LoadI->getParent();
1311 if (LoadBB->getSinglePredecessor())
8
Assuming the condition is false
9
Taking false branch
1312 return false;
1313
1314 // If the load is defined in an EH pad, it can't be partially redundant,
1315 // because the edges between the invoke and the EH pad cannot have other
1316 // instructions between them.
1317 if (LoadBB->isEHPad())
10
Assuming the condition is false
11
Taking false branch
1318 return false;
1319
1320 Value *LoadedPtr = LoadI->getOperand(0);
1321
1322 // If the loaded operand is defined in the LoadBB and its not a phi,
1323 // it can't be available in predecessors.
1324 if (isOpDefinedInBlock(LoadedPtr, LoadBB) && !isa<PHINode>(LoadedPtr))
12
Calling 'isOpDefinedInBlock'
16
Returning from 'isOpDefinedInBlock'
1325 return false;
1326
1327 // Scan a few instructions up from the load, to see if it is obviously live at
1328 // the entry to its block.
1329 BasicBlock::iterator BBIt(LoadI);
1330 bool IsLoadCSE;
1331 if (Value *AvailableVal = FindAvailableLoadedValue(
17
Assuming 'AvailableVal' is null
18
Taking false branch
1332 LoadI, LoadBB, BBIt, DefMaxInstsToScan, AA, &IsLoadCSE)) {
1333 // If the value of the load is locally available within the block, just use
1334 // it. This frequently occurs for reg2mem'd allocas.
1335
1336 if (IsLoadCSE) {
1337 LoadInst *NLoadI = cast<LoadInst>(AvailableVal);
1338 combineMetadataForCSE(NLoadI, LoadI, false);
1339 };
1340
1341 // If the returned value is the load itself, replace with an undef. This can
1342 // only happen in dead loops.
1343 if (AvailableVal == LoadI)
1344 AvailableVal = UndefValue::get(LoadI->getType());
1345 if (AvailableVal->getType() != LoadI->getType())
1346 AvailableVal = CastInst::CreateBitOrPointerCast(
1347 AvailableVal, LoadI->getType(), "", LoadI);
1348 LoadI->replaceAllUsesWith(AvailableVal);
1349 LoadI->eraseFromParent();
1350 return true;
1351 }
1352
1353 // Otherwise, if we scanned the whole block and got to the top of the block,
1354 // we know the block is locally transparent to the load. If not, something
1355 // might clobber its value.
1356 if (BBIt != LoadBB->begin())
19
Calling 'operator!='
22
Returning from 'operator!='
23
Taking false branch
1357 return false;
1358
1359 // If all of the loads and stores that feed the value have the same AA tags,
1360 // then we can propagate them onto any newly inserted loads.
1361 AAMDNodes AATags = LoadI->getAAMetadata();
1362
1363 SmallPtrSet<BasicBlock*, 8> PredsScanned;
1364
1365 using AvailablePredsTy = SmallVector<std::pair<BasicBlock *, Value *>, 8>;
1366
1367 AvailablePredsTy AvailablePreds;
1368 BasicBlock *OneUnavailablePred = nullptr;
24
'OneUnavailablePred' initialized to a null pointer value
1369 SmallVector<LoadInst*, 8> CSELoads;
1370
1371 // If we got here, the loaded value is transparent through to the start of the
1372 // block. Check to see if it is available in any of the predecessor blocks.
1373 for (BasicBlock *PredBB : predecessors(LoadBB)) {
1374 // If we already scanned this predecessor, skip it.
1375 if (!PredsScanned.insert(PredBB).second)
1376 continue;
1377
1378 BBIt = PredBB->end();
1379 unsigned NumScanedInst = 0;
1380 Value *PredAvailable = nullptr;
1381 // NOTE: We don't CSE load that is volatile or anything stronger than
1382 // unordered, that should have been checked when we entered the function.
1383 assert(LoadI->isUnordered() &&(static_cast <bool> (LoadI->isUnordered() &&
"Attempting to CSE volatile or atomic loads") ? void (0) : __assert_fail
("LoadI->isUnordered() && \"Attempting to CSE volatile or atomic loads\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 1384, __extension__
__PRETTY_FUNCTION__))
1384 "Attempting to CSE volatile or atomic loads")(static_cast <bool> (LoadI->isUnordered() &&
"Attempting to CSE volatile or atomic loads") ? void (0) : __assert_fail
("LoadI->isUnordered() && \"Attempting to CSE volatile or atomic loads\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 1384, __extension__
__PRETTY_FUNCTION__))
;
1385 // If this is a load on a phi pointer, phi-translate it and search
1386 // for available load/store to the pointer in predecessors.
1387 Type *AccessTy = LoadI->getType();
1388 const auto &DL = LoadI->getModule()->getDataLayout();
1389 MemoryLocation Loc(LoadedPtr->DoPHITranslation(LoadBB, PredBB),
1390 LocationSize::precise(DL.getTypeStoreSize(AccessTy)),
1391 AATags);
1392 PredAvailable = findAvailablePtrLoadStore(Loc, AccessTy, LoadI->isAtomic(),
1393 PredBB, BBIt, DefMaxInstsToScan,
1394 AA, &IsLoadCSE, &NumScanedInst);
1395
1396 // If PredBB has a single predecessor, continue scanning through the
1397 // single predecessor.
1398 BasicBlock *SinglePredBB = PredBB;
1399 while (!PredAvailable && SinglePredBB && BBIt == SinglePredBB->begin() &&
1400 NumScanedInst < DefMaxInstsToScan) {
1401 SinglePredBB = SinglePredBB->getSinglePredecessor();
1402 if (SinglePredBB) {
1403 BBIt = SinglePredBB->end();
1404 PredAvailable = findAvailablePtrLoadStore(
1405 Loc, AccessTy, LoadI->isAtomic(), SinglePredBB, BBIt,
1406 (DefMaxInstsToScan - NumScanedInst), AA, &IsLoadCSE,
1407 &NumScanedInst);
1408 }
1409 }
1410
1411 if (!PredAvailable) {
1412 OneUnavailablePred = PredBB;
1413 continue;
1414 }
1415
1416 if (IsLoadCSE)
1417 CSELoads.push_back(cast<LoadInst>(PredAvailable));
1418
1419 // If so, this load is partially redundant. Remember this info so that we
1420 // can create a PHI node.
1421 AvailablePreds.emplace_back(PredBB, PredAvailable);
1422 }
1423
1424 // If the loaded value isn't available in any predecessor, it isn't partially
1425 // redundant.
1426 if (AvailablePreds.empty()) return false;
25
Calling 'SmallVectorBase::empty'
28
Returning from 'SmallVectorBase::empty'
29
Taking false branch
1427
1428 // Okay, the loaded value is available in at least one (and maybe all!)
1429 // predecessors. If the value is unavailable in more than one unique
1430 // predecessor, we want to insert a merge block for those common predecessors.
1431 // This ensures that we only have to insert one reload, thus not increasing
1432 // code size.
1433 BasicBlock *UnavailablePred = nullptr;
1434
1435 // If the value is unavailable in one of predecessors, we will end up
1436 // inserting a new instruction into them. It is only valid if all the
1437 // instructions before LoadI are guaranteed to pass execution to its
1438 // successor, or if LoadI is safe to speculate.
1439 // TODO: If this logic becomes more complex, and we will perform PRE insertion
1440 // farther than to a predecessor, we need to reuse the code from GVN's PRE.
1441 // It requires domination tree analysis, so for this simple case it is an
1442 // overkill.
1443 if (PredsScanned.size() != AvailablePreds.size() &&
30
Assuming the condition is false
1444 !isSafeToSpeculativelyExecute(LoadI))
1445 for (auto I = LoadBB->begin(); &*I != LoadI; ++I)
1446 if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
1447 return false;
1448
1449 // If there is exactly one predecessor where the value is unavailable, the
1450 // already computed 'OneUnavailablePred' block is it. If it ends in an
1451 // unconditional branch, we know that it isn't a critical edge.
1452 if (PredsScanned.size() == AvailablePreds.size()+1 &&
31
Assuming the condition is true
1453 OneUnavailablePred->getTerminator()->getNumSuccessors() == 1) {
32
Called C++ object pointer is null
1454 UnavailablePred = OneUnavailablePred;
1455 } else if (PredsScanned.size() != AvailablePreds.size()) {
1456 // Otherwise, we had multiple unavailable predecessors or we had a critical
1457 // edge from the one.
1458 SmallVector<BasicBlock*, 8> PredsToSplit;
1459 SmallPtrSet<BasicBlock*, 8> AvailablePredSet;
1460
1461 for (const auto &AvailablePred : AvailablePreds)
1462 AvailablePredSet.insert(AvailablePred.first);
1463
1464 // Add all the unavailable predecessors to the PredsToSplit list.
1465 for (BasicBlock *P : predecessors(LoadBB)) {
1466 // If the predecessor is an indirect goto, we can't split the edge.
1467 // Same for CallBr.
1468 if (isa<IndirectBrInst>(P->getTerminator()) ||
1469 isa<CallBrInst>(P->getTerminator()))
1470 return false;
1471
1472 if (!AvailablePredSet.count(P))
1473 PredsToSplit.push_back(P);
1474 }
1475
1476 // Split them out to their own block.
1477 UnavailablePred = splitBlockPreds(LoadBB, PredsToSplit, "thread-pre-split");
1478 }
1479
1480 // If the value isn't available in all predecessors, then there will be
1481 // exactly one where it isn't available. Insert a load on that edge and add
1482 // it to the AvailablePreds list.
1483 if (UnavailablePred) {
1484 assert(UnavailablePred->getTerminator()->getNumSuccessors() == 1 &&(static_cast <bool> (UnavailablePred->getTerminator(
)->getNumSuccessors() == 1 && "Can't handle critical edge here!"
) ? void (0) : __assert_fail ("UnavailablePred->getTerminator()->getNumSuccessors() == 1 && \"Can't handle critical edge here!\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 1485, __extension__
__PRETTY_FUNCTION__))
1485 "Can't handle critical edge here!")(static_cast <bool> (UnavailablePred->getTerminator(
)->getNumSuccessors() == 1 && "Can't handle critical edge here!"
) ? void (0) : __assert_fail ("UnavailablePred->getTerminator()->getNumSuccessors() == 1 && \"Can't handle critical edge here!\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 1485, __extension__
__PRETTY_FUNCTION__))
;
1486 LoadInst *NewVal = new LoadInst(
1487 LoadI->getType(), LoadedPtr->DoPHITranslation(LoadBB, UnavailablePred),
1488 LoadI->getName() + ".pr", false, LoadI->getAlign(),
1489 LoadI->getOrdering(), LoadI->getSyncScopeID(),
1490 UnavailablePred->getTerminator());
1491 NewVal->setDebugLoc(LoadI->getDebugLoc());
1492 if (AATags)
1493 NewVal->setAAMetadata(AATags);
1494
1495 AvailablePreds.emplace_back(UnavailablePred, NewVal);
1496 }
1497
1498 // Now we know that each predecessor of this block has a value in
1499 // AvailablePreds, sort them for efficient access as we're walking the preds.
1500 array_pod_sort(AvailablePreds.begin(), AvailablePreds.end());
1501
1502 // Create a PHI node at the start of the block for the PRE'd load value.
1503 pred_iterator PB = pred_begin(LoadBB), PE = pred_end(LoadBB);
1504 PHINode *PN = PHINode::Create(LoadI->getType(), std::distance(PB, PE), "",
1505 &LoadBB->front());
1506 PN->takeName(LoadI);
1507 PN->setDebugLoc(LoadI->getDebugLoc());
1508
1509 // Insert new entries into the PHI for each predecessor. A single block may
1510 // have multiple entries here.
1511 for (pred_iterator PI = PB; PI != PE; ++PI) {
1512 BasicBlock *P = *PI;
1513 AvailablePredsTy::iterator I =
1514 llvm::lower_bound(AvailablePreds, std::make_pair(P, (Value *)nullptr));
1515
1516 assert(I != AvailablePreds.end() && I->first == P &&(static_cast <bool> (I != AvailablePreds.end() &&
I->first == P && "Didn't find entry for predecessor!"
) ? void (0) : __assert_fail ("I != AvailablePreds.end() && I->first == P && \"Didn't find entry for predecessor!\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 1517, __extension__
__PRETTY_FUNCTION__))
1517 "Didn't find entry for predecessor!")(static_cast <bool> (I != AvailablePreds.end() &&
I->first == P && "Didn't find entry for predecessor!"
) ? void (0) : __assert_fail ("I != AvailablePreds.end() && I->first == P && \"Didn't find entry for predecessor!\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 1517, __extension__
__PRETTY_FUNCTION__))
;
1518
1519 // If we have an available predecessor but it requires casting, insert the
1520 // cast in the predecessor and use the cast. Note that we have to update the
1521 // AvailablePreds vector as we go so that all of the PHI entries for this
1522 // predecessor use the same bitcast.
1523 Value *&PredV = I->second;
1524 if (PredV->getType() != LoadI->getType())
1525 PredV = CastInst::CreateBitOrPointerCast(PredV, LoadI->getType(), "",
1526 P->getTerminator());
1527
1528 PN->addIncoming(PredV, I->first);
1529 }
1530
1531 for (LoadInst *PredLoadI : CSELoads) {
1532 combineMetadataForCSE(PredLoadI, LoadI, true);
1533 }
1534
1535 LoadI->replaceAllUsesWith(PN);
1536 LoadI->eraseFromParent();
1537
1538 return true;
1539}
1540
1541/// findMostPopularDest - The specified list contains multiple possible
1542/// threadable destinations. Pick the one that occurs the most frequently in
1543/// the list.
1544static BasicBlock *
1545findMostPopularDest(BasicBlock *BB,
1546 const SmallVectorImpl<std::pair<BasicBlock *,
1547 BasicBlock *>> &PredToDestList) {
1548 assert(!PredToDestList.empty())(static_cast <bool> (!PredToDestList.empty()) ? void (0
) : __assert_fail ("!PredToDestList.empty()", "llvm/lib/Transforms/Scalar/JumpThreading.cpp"
, 1548, __extension__ __PRETTY_FUNCTION__))
;
1549
1550 // Determine popularity. If there are multiple possible destinations, we
1551 // explicitly choose to ignore 'undef' destinations. We prefer to thread
1552 // blocks with known and real destinations to threading undef. We'll handle
1553 // them later if interesting.
1554 MapVector<BasicBlock *, unsigned> DestPopularity;
1555
1556 // Populate DestPopularity with the successors in the order they appear in the
1557 // successor list. This way, we ensure determinism by iterating it in the
1558 // same order in std::max_element below. We map nullptr to 0 so that we can
1559 // return nullptr when PredToDestList contains nullptr only.
1560 DestPopularity[nullptr] = 0;
1561 for (auto *SuccBB : successors(BB))
1562 DestPopularity[SuccBB] = 0;
1563
1564 for (const auto &PredToDest : PredToDestList)
1565 if (PredToDest.second)
1566 DestPopularity[PredToDest.second]++;
1567
1568 // Find the most popular dest.
1569 using VT = decltype(DestPopularity)::value_type;
1570 auto MostPopular = std::max_element(
1571 DestPopularity.begin(), DestPopularity.end(),
1572 [](const VT &L, const VT &R) { return L.second < R.second; });
1573
1574 // Okay, we have finally picked the most popular destination.
1575 return MostPopular->first;
1576}
1577
1578// Try to evaluate the value of V when the control flows from PredPredBB to
1579// BB->getSinglePredecessor() and then on to BB.
1580Constant *JumpThreadingPass::evaluateOnPredecessorEdge(BasicBlock *BB,
1581 BasicBlock *PredPredBB,
1582 Value *V) {
1583 BasicBlock *PredBB = BB->getSinglePredecessor();
1584 assert(PredBB && "Expected a single predecessor")(static_cast <bool> (PredBB && "Expected a single predecessor"
) ? void (0) : __assert_fail ("PredBB && \"Expected a single predecessor\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 1584, __extension__
__PRETTY_FUNCTION__))
;
1585
1586 if (Constant *Cst = dyn_cast<Constant>(V)) {
1587 return Cst;
1588 }
1589
1590 // Consult LVI if V is not an instruction in BB or PredBB.
1591 Instruction *I = dyn_cast<Instruction>(V);
1592 if (!I || (I->getParent() != BB && I->getParent() != PredBB)) {
1593 return LVI->getConstantOnEdge(V, PredPredBB, PredBB, nullptr);
1594 }
1595
1596 // Look into a PHI argument.
1597 if (PHINode *PHI = dyn_cast<PHINode>(V)) {
1598 if (PHI->getParent() == PredBB)
1599 return dyn_cast<Constant>(PHI->getIncomingValueForBlock(PredPredBB));
1600 return nullptr;
1601 }
1602
1603 // If we have a CmpInst, try to fold it for each incoming edge into PredBB.
1604 if (CmpInst *CondCmp = dyn_cast<CmpInst>(V)) {
1605 if (CondCmp->getParent() == BB) {
1606 Constant *Op0 =
1607 evaluateOnPredecessorEdge(BB, PredPredBB, CondCmp->getOperand(0));
1608 Constant *Op1 =
1609 evaluateOnPredecessorEdge(BB, PredPredBB, CondCmp->getOperand(1));
1610 if (Op0 && Op1) {
1611 return ConstantExpr::getCompare(CondCmp->getPredicate(), Op0, Op1);
1612 }
1613 }
1614 return nullptr;
1615 }
1616
1617 return nullptr;
1618}
1619
1620bool JumpThreadingPass::processThreadableEdges(Value *Cond, BasicBlock *BB,
1621 ConstantPreference Preference,
1622 Instruction *CxtI) {
1623 // If threading this would thread across a loop header, don't even try to
1624 // thread the edge.
1625 if (LoopHeaders.count(BB))
1626 return false;
1627
1628 PredValueInfoTy PredValues;
1629 if (!computeValueKnownInPredecessors(Cond, BB, PredValues, Preference,
1630 CxtI)) {
1631 // We don't have known values in predecessors. See if we can thread through
1632 // BB and its sole predecessor.
1633 return maybethreadThroughTwoBasicBlocks(BB, Cond);
1634 }
1635
1636 assert(!PredValues.empty() &&(static_cast <bool> (!PredValues.empty() && "computeValueKnownInPredecessors returned true with no values"
) ? void (0) : __assert_fail ("!PredValues.empty() && \"computeValueKnownInPredecessors returned true with no values\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 1637, __extension__
__PRETTY_FUNCTION__))
1637 "computeValueKnownInPredecessors returned true with no values")(static_cast <bool> (!PredValues.empty() && "computeValueKnownInPredecessors returned true with no values"
) ? void (0) : __assert_fail ("!PredValues.empty() && \"computeValueKnownInPredecessors returned true with no values\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 1637, __extension__
__PRETTY_FUNCTION__))
;
1638
1639 LLVM_DEBUG(dbgs() << "IN BB: " << *BB;do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << "IN BB: " << *BB;
for (const auto &PredValue : PredValues) { dbgs() <<
" BB '" << BB->getName() << "': FOUND condition = "
<< *PredValue.first << " for pred '" << PredValue
.second->getName() << "'.\n"; }; } } while (false)
1640 for (const auto &PredValue : PredValues) {do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << "IN BB: " << *BB;
for (const auto &PredValue : PredValues) { dbgs() <<
" BB '" << BB->getName() << "': FOUND condition = "
<< *PredValue.first << " for pred '" << PredValue
.second->getName() << "'.\n"; }; } } while (false)
1641 dbgs() << " BB '" << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << "IN BB: " << *BB;
for (const auto &PredValue : PredValues) { dbgs() <<
" BB '" << BB->getName() << "': FOUND condition = "
<< *PredValue.first << " for pred '" << PredValue
.second->getName() << "'.\n"; }; } } while (false)
1642 << "': FOUND condition = " << *PredValue.firstdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << "IN BB: " << *BB;
for (const auto &PredValue : PredValues) { dbgs() <<
" BB '" << BB->getName() << "': FOUND condition = "
<< *PredValue.first << " for pred '" << PredValue
.second->getName() << "'.\n"; }; } } while (false)
1643 << " for pred '" << PredValue.second->getName() << "'.\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << "IN BB: " << *BB;
for (const auto &PredValue : PredValues) { dbgs() <<
" BB '" << BB->getName() << "': FOUND condition = "
<< *PredValue.first << " for pred '" << PredValue
.second->getName() << "'.\n"; }; } } while (false)
1644 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << "IN BB: " << *BB;
for (const auto &PredValue : PredValues) { dbgs() <<
" BB '" << BB->getName() << "': FOUND condition = "
<< *PredValue.first << " for pred '" << PredValue
.second->getName() << "'.\n"; }; } } while (false)
;
1645
1646 // Decide what we want to thread through. Convert our list of known values to
1647 // a list of known destinations for each pred. This also discards duplicate
1648 // predecessors and keeps track of the undefined inputs (which are represented
1649 // as a null dest in the PredToDestList).
1650 SmallPtrSet<BasicBlock*, 16> SeenPreds;
1651 SmallVector<std::pair<BasicBlock*, BasicBlock*>, 16> PredToDestList;
1652
1653 BasicBlock *OnlyDest = nullptr;
1654 BasicBlock *MultipleDestSentinel = (BasicBlock*)(intptr_t)~0ULL;
1655 Constant *OnlyVal = nullptr;
1656 Constant *MultipleVal = (Constant *)(intptr_t)~0ULL;
1657
1658 for (const auto &PredValue : PredValues) {
1659 BasicBlock *Pred = PredValue.second;
1660 if (!SeenPreds.insert(Pred).second)
1661 continue; // Duplicate predecessor entry.
1662
1663 Constant *Val = PredValue.first;
1664
1665 BasicBlock *DestBB;
1666 if (isa<UndefValue>(Val))
1667 DestBB = nullptr;
1668 else if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) {
1669 assert(isa<ConstantInt>(Val) && "Expecting a constant integer")(static_cast <bool> (isa<ConstantInt>(Val) &&
"Expecting a constant integer") ? void (0) : __assert_fail (
"isa<ConstantInt>(Val) && \"Expecting a constant integer\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 1669, __extension__
__PRETTY_FUNCTION__))
;
1670 DestBB = BI->getSuccessor(cast<ConstantInt>(Val)->isZero());
1671 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator())) {
1672 assert(isa<ConstantInt>(Val) && "Expecting a constant integer")(static_cast <bool> (isa<ConstantInt>(Val) &&
"Expecting a constant integer") ? void (0) : __assert_fail (
"isa<ConstantInt>(Val) && \"Expecting a constant integer\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 1672, __extension__
__PRETTY_FUNCTION__))
;
1673 DestBB = SI->findCaseValue(cast<ConstantInt>(Val))->getCaseSuccessor();
1674 } else {
1675 assert(isa<IndirectBrInst>(BB->getTerminator())(static_cast <bool> (isa<IndirectBrInst>(BB->getTerminator
()) && "Unexpected terminator") ? void (0) : __assert_fail
("isa<IndirectBrInst>(BB->getTerminator()) && \"Unexpected terminator\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 1676, __extension__
__PRETTY_FUNCTION__))
1676 && "Unexpected terminator")(static_cast <bool> (isa<IndirectBrInst>(BB->getTerminator
()) && "Unexpected terminator") ? void (0) : __assert_fail
("isa<IndirectBrInst>(BB->getTerminator()) && \"Unexpected terminator\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 1676, __extension__
__PRETTY_FUNCTION__))
;
1677 assert(isa<BlockAddress>(Val) && "Expecting a constant blockaddress")(static_cast <bool> (isa<BlockAddress>(Val) &&
"Expecting a constant blockaddress") ? void (0) : __assert_fail
("isa<BlockAddress>(Val) && \"Expecting a constant blockaddress\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 1677, __extension__
__PRETTY_FUNCTION__))
;
1678 DestBB = cast<BlockAddress>(Val)->getBasicBlock();
1679 }
1680
1681 // If we have exactly one destination, remember it for efficiency below.
1682 if (PredToDestList.empty()) {
1683 OnlyDest = DestBB;
1684 OnlyVal = Val;
1685 } else {
1686 if (OnlyDest != DestBB)
1687 OnlyDest = MultipleDestSentinel;
1688 // It possible we have same destination, but different value, e.g. default
1689 // case in switchinst.
1690 if (Val != OnlyVal)
1691 OnlyVal = MultipleVal;
1692 }
1693
1694 // If the predecessor ends with an indirect goto, we can't change its
1695 // destination. Same for CallBr.
1696 if (isa<IndirectBrInst>(Pred->getTerminator()) ||
1697 isa<CallBrInst>(Pred->getTerminator()))
1698 continue;
1699
1700 PredToDestList.emplace_back(Pred, DestBB);
1701 }
1702
1703 // If all edges were unthreadable, we fail.
1704 if (PredToDestList.empty())
1705 return false;
1706
1707 // If all the predecessors go to a single known successor, we want to fold,
1708 // not thread. By doing so, we do not need to duplicate the current block and
1709 // also miss potential opportunities in case we dont/cant duplicate.
1710 if (OnlyDest && OnlyDest != MultipleDestSentinel) {
1711 if (BB->hasNPredecessors(PredToDestList.size())) {
1712 bool SeenFirstBranchToOnlyDest = false;
1713 std::vector <DominatorTree::UpdateType> Updates;
1714 Updates.reserve(BB->getTerminator()->getNumSuccessors() - 1);
1715 for (BasicBlock *SuccBB : successors(BB)) {
1716 if (SuccBB == OnlyDest && !SeenFirstBranchToOnlyDest) {
1717 SeenFirstBranchToOnlyDest = true; // Don't modify the first branch.
1718 } else {
1719 SuccBB->removePredecessor(BB, true); // This is unreachable successor.
1720 Updates.push_back({DominatorTree::Delete, BB, SuccBB});
1721 }
1722 }
1723
1724 // Finally update the terminator.
1725 Instruction *Term = BB->getTerminator();
1726 BranchInst::Create(OnlyDest, Term);
1727 ++NumFolds;
1728 Term->eraseFromParent();
1729 DTU->applyUpdatesPermissive(Updates);
1730 if (HasProfileData)
1731 BPI->eraseBlock(BB);
1732
1733 // If the condition is now dead due to the removal of the old terminator,
1734 // erase it.
1735 if (auto *CondInst = dyn_cast<Instruction>(Cond)) {
1736 if (CondInst->use_empty() && !CondInst->mayHaveSideEffects())
1737 CondInst->eraseFromParent();
1738 // We can safely replace *some* uses of the CondInst if it has
1739 // exactly one value as returned by LVI. RAUW is incorrect in the
1740 // presence of guards and assumes, that have the `Cond` as the use. This
1741 // is because we use the guards/assume to reason about the `Cond` value
1742 // at the end of block, but RAUW unconditionally replaces all uses
1743 // including the guards/assumes themselves and the uses before the
1744 // guard/assume.
1745 else if (OnlyVal && OnlyVal != MultipleVal &&
1746 CondInst->getParent() == BB)
1747 replaceFoldableUses(CondInst, OnlyVal);
1748 }
1749 return true;
1750 }
1751 }
1752
1753 // Determine which is the most common successor. If we have many inputs and
1754 // this block is a switch, we want to start by threading the batch that goes
1755 // to the most popular destination first. If we only know about one
1756 // threadable destination (the common case) we can avoid this.
1757 BasicBlock *MostPopularDest = OnlyDest;
1758
1759 if (MostPopularDest == MultipleDestSentinel) {
1760 // Remove any loop headers from the Dest list, threadEdge conservatively
1761 // won't process them, but we might have other destination that are eligible
1762 // and we still want to process.
1763 erase_if(PredToDestList,
1764 [&](const std::pair<BasicBlock *, BasicBlock *> &PredToDest) {
1765 return LoopHeaders.contains(PredToDest.second);
1766 });
1767
1768 if (PredToDestList.empty())
1769 return false;
1770
1771 MostPopularDest = findMostPopularDest(BB, PredToDestList);
1772 }
1773
1774 // Now that we know what the most popular destination is, factor all
1775 // predecessors that will jump to it into a single predecessor.
1776 SmallVector<BasicBlock*, 16> PredsToFactor;
1777 for (const auto &PredToDest : PredToDestList)
1778 if (PredToDest.second == MostPopularDest) {
1779 BasicBlock *Pred = PredToDest.first;
1780
1781 // This predecessor may be a switch or something else that has multiple
1782 // edges to the block. Factor each of these edges by listing them
1783 // according to # occurrences in PredsToFactor.
1784 for (BasicBlock *Succ : successors(Pred))
1785 if (Succ == BB)
1786 PredsToFactor.push_back(Pred);
1787 }
1788
1789 // If the threadable edges are branching on an undefined value, we get to pick
1790 // the destination that these predecessors should get to.
1791 if (!MostPopularDest)
1792 MostPopularDest = BB->getTerminator()->
1793 getSuccessor(getBestDestForJumpOnUndef(BB));
1794
1795 // Ok, try to thread it!
1796 return tryThreadEdge(BB, PredsToFactor, MostPopularDest);
1797}
1798
1799/// processBranchOnPHI - We have an otherwise unthreadable conditional branch on
1800/// a PHI node (or freeze PHI) in the current block. See if there are any
1801/// simplifications we can do based on inputs to the phi node.
1802bool JumpThreadingPass::processBranchOnPHI(PHINode *PN) {
1803 BasicBlock *BB = PN->getParent();
1804
1805 // TODO: We could make use of this to do it once for blocks with common PHI
1806 // values.
1807 SmallVector<BasicBlock*, 1> PredBBs;
1808 PredBBs.resize(1);
1809
1810 // If any of the predecessor blocks end in an unconditional branch, we can
1811 // *duplicate* the conditional branch into that block in order to further
1812 // encourage jump threading and to eliminate cases where we have branch on a
1813 // phi of an icmp (branch on icmp is much better).
1814 // This is still beneficial when a frozen phi is used as the branch condition
1815 // because it allows CodeGenPrepare to further canonicalize br(freeze(icmp))
1816 // to br(icmp(freeze ...)).
1817 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1818 BasicBlock *PredBB = PN->getIncomingBlock(i);
1819 if (BranchInst *PredBr = dyn_cast<BranchInst>(PredBB->getTerminator()))
1820 if (PredBr->isUnconditional()) {
1821 PredBBs[0] = PredBB;
1822 // Try to duplicate BB into PredBB.
1823 if (duplicateCondBranchOnPHIIntoPred(BB, PredBBs))
1824 return true;
1825 }
1826 }
1827
1828 return false;
1829}
1830
1831/// processBranchOnXOR - We have an otherwise unthreadable conditional branch on
1832/// a xor instruction in the current block. See if there are any
1833/// simplifications we can do based on inputs to the xor.
1834bool JumpThreadingPass::processBranchOnXOR(BinaryOperator *BO) {
1835 BasicBlock *BB = BO->getParent();
1836
1837 // If either the LHS or RHS of the xor is a constant, don't do this
1838 // optimization.
1839 if (isa<ConstantInt>(BO->getOperand(0)) ||
1840 isa<ConstantInt>(BO->getOperand(1)))
1841 return false;
1842
1843 // If the first instruction in BB isn't a phi, we won't be able to infer
1844 // anything special about any particular predecessor.
1845 if (!isa<PHINode>(BB->front()))
1846 return false;
1847
1848 // If this BB is a landing pad, we won't be able to split the edge into it.
1849 if (BB->isEHPad())
1850 return false;
1851
1852 // If we have a xor as the branch input to this block, and we know that the
1853 // LHS or RHS of the xor in any predecessor is true/false, then we can clone
1854 // the condition into the predecessor and fix that value to true, saving some
1855 // logical ops on that path and encouraging other paths to simplify.
1856 //
1857 // This copies something like this:
1858 //
1859 // BB:
1860 // %X = phi i1 [1], [%X']
1861 // %Y = icmp eq i32 %A, %B
1862 // %Z = xor i1 %X, %Y
1863 // br i1 %Z, ...
1864 //
1865 // Into:
1866 // BB':
1867 // %Y = icmp ne i32 %A, %B
1868 // br i1 %Y, ...
1869
1870 PredValueInfoTy XorOpValues;
1871 bool isLHS = true;
1872 if (!computeValueKnownInPredecessors(BO->getOperand(0), BB, XorOpValues,
1873 WantInteger, BO)) {
1874 assert(XorOpValues.empty())(static_cast <bool> (XorOpValues.empty()) ? void (0) : __assert_fail
("XorOpValues.empty()", "llvm/lib/Transforms/Scalar/JumpThreading.cpp"
, 1874, __extension__ __PRETTY_FUNCTION__))
;
1875 if (!computeValueKnownInPredecessors(BO->getOperand(1), BB, XorOpValues,
1876 WantInteger, BO))
1877 return false;
1878 isLHS = false;
1879 }
1880
1881 assert(!XorOpValues.empty() &&(static_cast <bool> (!XorOpValues.empty() && "computeValueKnownInPredecessors returned true with no values"
) ? void (0) : __assert_fail ("!XorOpValues.empty() && \"computeValueKnownInPredecessors returned true with no values\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 1882, __extension__
__PRETTY_FUNCTION__))
1882 "computeValueKnownInPredecessors returned true with no values")(static_cast <bool> (!XorOpValues.empty() && "computeValueKnownInPredecessors returned true with no values"
) ? void (0) : __assert_fail ("!XorOpValues.empty() && \"computeValueKnownInPredecessors returned true with no values\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 1882, __extension__
__PRETTY_FUNCTION__))
;
1883
1884 // Scan the information to see which is most popular: true or false. The
1885 // predecessors can be of the set true, false, or undef.
1886 unsigned NumTrue = 0, NumFalse = 0;
1887 for (const auto &XorOpValue : XorOpValues) {
1888 if (isa<UndefValue>(XorOpValue.first))
1889 // Ignore undefs for the count.
1890 continue;
1891 if (cast<ConstantInt>(XorOpValue.first)->isZero())
1892 ++NumFalse;
1893 else
1894 ++NumTrue;
1895 }
1896
1897 // Determine which value to split on, true, false, or undef if neither.
1898 ConstantInt *SplitVal = nullptr;
1899 if (NumTrue > NumFalse)
1900 SplitVal = ConstantInt::getTrue(BB->getContext());
1901 else if (NumTrue != 0 || NumFalse != 0)
1902 SplitVal = ConstantInt::getFalse(BB->getContext());
1903
1904 // Collect all of the blocks that this can be folded into so that we can
1905 // factor this once and clone it once.
1906 SmallVector<BasicBlock*, 8> BlocksToFoldInto;
1907 for (const auto &XorOpValue : XorOpValues) {
1908 if (XorOpValue.first != SplitVal && !isa<UndefValue>(XorOpValue.first))
1909 continue;
1910
1911 BlocksToFoldInto.push_back(XorOpValue.second);
1912 }
1913
1914 // If we inferred a value for all of the predecessors, then duplication won't
1915 // help us. However, we can just replace the LHS or RHS with the constant.
1916 if (BlocksToFoldInto.size() ==
1917 cast<PHINode>(BB->front()).getNumIncomingValues()) {
1918 if (!SplitVal) {
1919 // If all preds provide undef, just nuke the xor, because it is undef too.
1920 BO->replaceAllUsesWith(UndefValue::get(BO->getType()));
1921 BO->eraseFromParent();
1922 } else if (SplitVal->isZero()) {
1923 // If all preds provide 0, replace the xor with the other input.
1924 BO->replaceAllUsesWith(BO->getOperand(isLHS));
1925 BO->eraseFromParent();
1926 } else {
1927 // If all preds provide 1, set the computed value to 1.
1928 BO->setOperand(!isLHS, SplitVal);
1929 }
1930
1931 return true;
1932 }
1933
1934 // If any of predecessors end with an indirect goto, we can't change its
1935 // destination. Same for CallBr.
1936 if (any_of(BlocksToFoldInto, [](BasicBlock *Pred) {
1937 return isa<IndirectBrInst>(Pred->getTerminator()) ||
1938 isa<CallBrInst>(Pred->getTerminator());
1939 }))
1940 return false;
1941
1942 // Try to duplicate BB into PredBB.
1943 return duplicateCondBranchOnPHIIntoPred(BB, BlocksToFoldInto);
1944}
1945
1946/// addPHINodeEntriesForMappedBlock - We're adding 'NewPred' as a new
1947/// predecessor to the PHIBB block. If it has PHI nodes, add entries for
1948/// NewPred using the entries from OldPred (suitably mapped).
1949static void addPHINodeEntriesForMappedBlock(BasicBlock *PHIBB,
1950 BasicBlock *OldPred,
1951 BasicBlock *NewPred,
1952 DenseMap<Instruction*, Value*> &ValueMap) {
1953 for (PHINode &PN : PHIBB->phis()) {
1954 // Ok, we have a PHI node. Figure out what the incoming value was for the
1955 // DestBlock.
1956 Value *IV = PN.getIncomingValueForBlock(OldPred);
1957
1958 // Remap the value if necessary.
1959 if (Instruction *Inst = dyn_cast<Instruction>(IV)) {
1960 DenseMap<Instruction*, Value*>::iterator I = ValueMap.find(Inst);
1961 if (I != ValueMap.end())
1962 IV = I->second;
1963 }
1964
1965 PN.addIncoming(IV, NewPred);
1966 }
1967}
1968
1969/// Merge basic block BB into its sole predecessor if possible.
1970bool JumpThreadingPass::maybeMergeBasicBlockIntoOnlyPred(BasicBlock *BB) {
1971 BasicBlock *SinglePred = BB->getSinglePredecessor();
1972 if (!SinglePred)
1973 return false;
1974
1975 const Instruction *TI = SinglePred->getTerminator();
1976 if (TI->isExceptionalTerminator() || TI->getNumSuccessors() != 1 ||
1977 SinglePred == BB || hasAddressTakenAndUsed(BB))
1978 return false;
1979
1980 // If SinglePred was a loop header, BB becomes one.
1981 if (LoopHeaders.erase(SinglePred))
1982 LoopHeaders.insert(BB);
1983
1984 LVI->eraseBlock(SinglePred);
1985 MergeBasicBlockIntoOnlyPred(BB, DTU);
1986
1987 // Now that BB is merged into SinglePred (i.e. SinglePred code followed by
1988 // BB code within one basic block `BB`), we need to invalidate the LVI
1989 // information associated with BB, because the LVI information need not be
1990 // true for all of BB after the merge. For example,
1991 // Before the merge, LVI info and code is as follows:
1992 // SinglePred: <LVI info1 for %p val>
1993 // %y = use of %p
1994 // call @exit() // need not transfer execution to successor.
1995 // assume(%p) // from this point on %p is true
1996 // br label %BB
1997 // BB: <LVI info2 for %p val, i.e. %p is true>
1998 // %x = use of %p
1999 // br label exit
2000 //
2001 // Note that this LVI info for blocks BB and SinglPred is correct for %p
2002 // (info2 and info1 respectively). After the merge and the deletion of the
2003 // LVI info1 for SinglePred. We have the following code:
2004 // BB: <LVI info2 for %p val>
2005 // %y = use of %p
2006 // call @exit()
2007 // assume(%p)
2008 // %x = use of %p <-- LVI info2 is correct from here onwards.
2009 // br label exit
2010 // LVI info2 for BB is incorrect at the beginning of BB.
2011
2012 // Invalidate LVI information for BB if the LVI is not provably true for
2013 // all of BB.
2014 if (!isGuaranteedToTransferExecutionToSuccessor(BB))
2015 LVI->eraseBlock(BB);
2016 return true;
2017}
2018
2019/// Update the SSA form. NewBB contains instructions that are copied from BB.
2020/// ValueMapping maps old values in BB to new ones in NewBB.
2021void JumpThreadingPass::updateSSA(
2022 BasicBlock *BB, BasicBlock *NewBB,
2023 DenseMap<Instruction *, Value *> &ValueMapping) {
2024 // If there were values defined in BB that are used outside the block, then we
2025 // now have to update all uses of the value to use either the original value,
2026 // the cloned value, or some PHI derived value. This can require arbitrary
2027 // PHI insertion, of which we are prepared to do, clean these up now.
2028 SSAUpdater SSAUpdate;
2029 SmallVector<Use *, 16> UsesToRename;
2030
2031 for (Instruction &I : *BB) {
2032 // Scan all uses of this instruction to see if it is used outside of its
2033 // block, and if so, record them in UsesToRename.
2034 for (Use &U : I.uses()) {
2035 Instruction *User = cast<Instruction>(U.getUser());
2036 if (PHINode *UserPN = dyn_cast<PHINode>(User)) {
2037 if (UserPN->getIncomingBlock(U) == BB)
2038 continue;
2039 } else if (User->getParent() == BB)
2040 continue;
2041
2042 UsesToRename.push_back(&U);
2043 }
2044
2045 // If there are no uses outside the block, we're done with this instruction.
2046 if (UsesToRename.empty())
2047 continue;
2048 LLVM_DEBUG(dbgs() << "JT: Renaming non-local uses of: " << I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << "JT: Renaming non-local uses of: "
<< I << "\n"; } } while (false)
;
2049
2050 // We found a use of I outside of BB. Rename all uses of I that are outside
2051 // its block to be uses of the appropriate PHI node etc. See ValuesInBlocks
2052 // with the two values we know.
2053 SSAUpdate.Initialize(I.getType(), I.getName());
2054 SSAUpdate.AddAvailableValue(BB, &I);
2055 SSAUpdate.AddAvailableValue(NewBB, ValueMapping[&I]);
2056
2057 while (!UsesToRename.empty())
2058 SSAUpdate.RewriteUse(*UsesToRename.pop_back_val());
2059 LLVM_DEBUG(dbgs() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << "\n"; } } while (false)
;
2060 }
2061}
2062
2063/// Clone instructions in range [BI, BE) to NewBB. For PHI nodes, we only clone
2064/// arguments that come from PredBB. Return the map from the variables in the
2065/// source basic block to the variables in the newly created basic block.
2066DenseMap<Instruction *, Value *>
2067JumpThreadingPass::cloneInstructions(BasicBlock::iterator BI,
2068 BasicBlock::iterator BE, BasicBlock *NewBB,
2069 BasicBlock *PredBB) {
2070 // We are going to have to map operands from the source basic block to the new
2071 // copy of the block 'NewBB'. If there are PHI nodes in the source basic
2072 // block, evaluate them to account for entry from PredBB.
2073 DenseMap<Instruction *, Value *> ValueMapping;
2074
2075 // Clone the phi nodes of the source basic block into NewBB. The resulting
2076 // phi nodes are trivial since NewBB only has one predecessor, but SSAUpdater
2077 // might need to rewrite the operand of the cloned phi.
2078 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI) {
2079 PHINode *NewPN = PHINode::Create(PN->getType(), 1, PN->getName(), NewBB);
2080 NewPN->addIncoming(PN->getIncomingValueForBlock(PredBB), PredBB);
2081 ValueMapping[PN] = NewPN;
2082 }
2083
2084 // Clone noalias scope declarations in the threaded block. When threading a
2085 // loop exit, we would otherwise end up with two idential scope declarations
2086 // visible at the same time.
2087 SmallVector<MDNode *> NoAliasScopes;
2088 DenseMap<MDNode *, MDNode *> ClonedScopes;
2089 LLVMContext &Context = PredBB->getContext();
2090 identifyNoAliasScopesToClone(BI, BE, NoAliasScopes);
2091 cloneNoAliasScopes(NoAliasScopes, ClonedScopes, "thread", Context);
2092
2093 // Clone the non-phi instructions of the source basic block into NewBB,
2094 // keeping track of the mapping and using it to remap operands in the cloned
2095 // instructions.
2096 for (; BI != BE; ++BI) {
2097 Instruction *New = BI->clone();
2098 New->setName(BI->getName());
2099 NewBB->getInstList().push_back(New);
2100 ValueMapping[&*BI] = New;
2101 adaptNoAliasScopes(New, ClonedScopes, Context);
2102
2103 // Remap operands to patch up intra-block references.
2104 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i)
2105 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) {
2106 DenseMap<Instruction *, Value *>::iterator I = ValueMapping.find(Inst);
2107 if (I != ValueMapping.end())
2108 New->setOperand(i, I->second);
2109 }
2110 }
2111
2112 return ValueMapping;
2113}
2114
2115/// Attempt to thread through two successive basic blocks.
2116bool JumpThreadingPass::maybethreadThroughTwoBasicBlocks(BasicBlock *BB,
2117 Value *Cond) {
2118 // Consider:
2119 //
2120 // PredBB:
2121 // %var = phi i32* [ null, %bb1 ], [ @a, %bb2 ]
2122 // %tobool = icmp eq i32 %cond, 0
2123 // br i1 %tobool, label %BB, label ...
2124 //
2125 // BB:
2126 // %cmp = icmp eq i32* %var, null
2127 // br i1 %cmp, label ..., label ...
2128 //
2129 // We don't know the value of %var at BB even if we know which incoming edge
2130 // we take to BB. However, once we duplicate PredBB for each of its incoming
2131 // edges (say, PredBB1 and PredBB2), we know the value of %var in each copy of
2132 // PredBB. Then we can thread edges PredBB1->BB and PredBB2->BB through BB.
2133
2134 // Require that BB end with a Branch for simplicity.
2135 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator());
2136 if (!CondBr)
2137 return false;
2138
2139 // BB must have exactly one predecessor.
2140 BasicBlock *PredBB = BB->getSinglePredecessor();
2141 if (!PredBB)
2142 return false;
2143
2144 // Require that PredBB end with a conditional Branch. If PredBB ends with an
2145 // unconditional branch, we should be merging PredBB and BB instead. For
2146 // simplicity, we don't deal with a switch.
2147 BranchInst *PredBBBranch = dyn_cast<BranchInst>(PredBB->getTerminator());
2148 if (!PredBBBranch || PredBBBranch->isUnconditional())
2149 return false;
2150
2151 // If PredBB has exactly one incoming edge, we don't gain anything by copying
2152 // PredBB.
2153 if (PredBB->getSinglePredecessor())
2154 return false;
2155
2156 // Don't thread through PredBB if it contains a successor edge to itself, in
2157 // which case we would infinite loop. Suppose we are threading an edge from
2158 // PredPredBB through PredBB and BB to SuccBB with PredBB containing a
2159 // successor edge to itself. If we allowed jump threading in this case, we
2160 // could duplicate PredBB and BB as, say, PredBB.thread and BB.thread. Since
2161 // PredBB.thread has a successor edge to PredBB, we would immediately come up
2162 // with another jump threading opportunity from PredBB.thread through PredBB
2163 // and BB to SuccBB. This jump threading would repeatedly occur. That is, we
2164 // would keep peeling one iteration from PredBB.
2165 if (llvm::is_contained(successors(PredBB), PredBB))
2166 return false;
2167
2168 // Don't thread across a loop header.
2169 if (LoopHeaders.count(PredBB))
2170 return false;
2171
2172 // Avoid complication with duplicating EH pads.
2173 if (PredBB->isEHPad())
2174 return false;
2175
2176 // Find a predecessor that we can thread. For simplicity, we only consider a
2177 // successor edge out of BB to which we thread exactly one incoming edge into
2178 // PredBB.
2179 unsigned ZeroCount = 0;
2180 unsigned OneCount = 0;
2181 BasicBlock *ZeroPred = nullptr;
2182 BasicBlock *OnePred = nullptr;
2183 for (BasicBlock *P : predecessors(PredBB)) {
2184 if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>(
2185 evaluateOnPredecessorEdge(BB, P, Cond))) {
2186 if (CI->isZero()) {
2187 ZeroCount++;
2188 ZeroPred = P;
2189 } else if (CI->isOne()) {
2190 OneCount++;
2191 OnePred = P;
2192 }
2193 }
2194 }
2195
2196 // Disregard complicated cases where we have to thread multiple edges.
2197 BasicBlock *PredPredBB;
2198 if (ZeroCount == 1) {
2199 PredPredBB = ZeroPred;
2200 } else if (OneCount == 1) {
2201 PredPredBB = OnePred;
2202 } else {
2203 return false;
2204 }
2205
2206 BasicBlock *SuccBB = CondBr->getSuccessor(PredPredBB == ZeroPred);
2207
2208 // If threading to the same block as we come from, we would infinite loop.
2209 if (SuccBB == BB) {
2210 LLVM_DEBUG(dbgs() << " Not threading across BB '" << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " Not threading across BB '"
<< BB->getName() << "' - would thread to self!\n"
; } } while (false)
2211 << "' - would thread to self!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " Not threading across BB '"
<< BB->getName() << "' - would thread to self!\n"
; } } while (false)
;
2212 return false;
2213 }
2214
2215 // If threading this would thread across a loop header, don't thread the edge.
2216 // See the comments above findLoopHeaders for justifications and caveats.
2217 if (LoopHeaders.count(BB) || LoopHeaders.count(SuccBB)) {
2218 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB
); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() <<
" Not threading across " << (BBIsHeader ? "loop header BB '"
: "block BB '") << BB->getName() << "' to dest "
<< (SuccIsHeader ? "loop header BB '" : "block BB '") <<
SuccBB->getName() << "' - it might create an irreducible loop!\n"
; }; } } while (false)
2219 bool BBIsHeader = LoopHeaders.count(BB);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB
); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() <<
" Not threading across " << (BBIsHeader ? "loop header BB '"
: "block BB '") << BB->getName() << "' to dest "
<< (SuccIsHeader ? "loop header BB '" : "block BB '") <<
SuccBB->getName() << "' - it might create an irreducible loop!\n"
; }; } } while (false)
2220 bool SuccIsHeader = LoopHeaders.count(SuccBB);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB
); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() <<
" Not threading across " << (BBIsHeader ? "loop header BB '"
: "block BB '") << BB->getName() << "' to dest "
<< (SuccIsHeader ? "loop header BB '" : "block BB '") <<
SuccBB->getName() << "' - it might create an irreducible loop!\n"
; }; } } while (false)
2221 dbgs() << " Not threading across "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB
); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() <<
" Not threading across " << (BBIsHeader ? "loop header BB '"
: "block BB '") << BB->getName() << "' to dest "
<< (SuccIsHeader ? "loop header BB '" : "block BB '") <<
SuccBB->getName() << "' - it might create an irreducible loop!\n"
; }; } } while (false)
2222 << (BBIsHeader ? "loop header BB '" : "block BB '")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB
); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() <<
" Not threading across " << (BBIsHeader ? "loop header BB '"
: "block BB '") << BB->getName() << "' to dest "
<< (SuccIsHeader ? "loop header BB '" : "block BB '") <<
SuccBB->getName() << "' - it might create an irreducible loop!\n"
; }; } } while (false)
2223 << BB->getName() << "' to dest "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB
); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() <<
" Not threading across " << (BBIsHeader ? "loop header BB '"
: "block BB '") << BB->getName() << "' to dest "
<< (SuccIsHeader ? "loop header BB '" : "block BB '") <<
SuccBB->getName() << "' - it might create an irreducible loop!\n"
; }; } } while (false)
2224 << (SuccIsHeader ? "loop header BB '" : "block BB '")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB
); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() <<
" Not threading across " << (BBIsHeader ? "loop header BB '"
: "block BB '") << BB->getName() << "' to dest "
<< (SuccIsHeader ? "loop header BB '" : "block BB '") <<
SuccBB->getName() << "' - it might create an irreducible loop!\n"
; }; } } while (false)
2225 << SuccBB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB
); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() <<
" Not threading across " << (BBIsHeader ? "loop header BB '"
: "block BB '") << BB->getName() << "' to dest "
<< (SuccIsHeader ? "loop header BB '" : "block BB '") <<
SuccBB->getName() << "' - it might create an irreducible loop!\n"
; }; } } while (false)
2226 << "' - it might create an irreducible loop!\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB
); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() <<
" Not threading across " << (BBIsHeader ? "loop header BB '"
: "block BB '") << BB->getName() << "' to dest "
<< (SuccIsHeader ? "loop header BB '" : "block BB '") <<
SuccBB->getName() << "' - it might create an irreducible loop!\n"
; }; } } while (false)
2227 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB
); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() <<
" Not threading across " << (BBIsHeader ? "loop header BB '"
: "block BB '") << BB->getName() << "' to dest "
<< (SuccIsHeader ? "loop header BB '" : "block BB '") <<
SuccBB->getName() << "' - it might create an irreducible loop!\n"
; }; } } while (false)
;
2228 return false;
2229 }
2230
2231 // Compute the cost of duplicating BB and PredBB.
2232 unsigned BBCost = getJumpThreadDuplicationCost(
2233 TTI, BB, BB->getTerminator(), BBDupThreshold);
2234 unsigned PredBBCost = getJumpThreadDuplicationCost(
2235 TTI, PredBB, PredBB->getTerminator(), BBDupThreshold);
2236
2237 // Give up if costs are too high. We need to check BBCost and PredBBCost
2238 // individually before checking their sum because getJumpThreadDuplicationCost
2239 // return (unsigned)~0 for those basic blocks that cannot be duplicated.
2240 if (BBCost > BBDupThreshold || PredBBCost > BBDupThreshold ||
2241 BBCost + PredBBCost > BBDupThreshold) {
2242 LLVM_DEBUG(dbgs() << " Not threading BB '" << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " Not threading BB '" <<
BB->getName() << "' - Cost is too high: " << PredBBCost
<< " for PredBB, " << BBCost << "for BB\n"
; } } while (false)
2243 << "' - Cost is too high: " << PredBBCostdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " Not threading BB '" <<
BB->getName() << "' - Cost is too high: " << PredBBCost
<< " for PredBB, " << BBCost << "for BB\n"
; } } while (false)
2244 << " for PredBB, " << BBCost << "for BB\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " Not threading BB '" <<
BB->getName() << "' - Cost is too high: " << PredBBCost
<< " for PredBB, " << BBCost << "for BB\n"
; } } while (false)
;
2245 return false;
2246 }
2247
2248 // Now we are ready to duplicate PredBB.
2249 threadThroughTwoBasicBlocks(PredPredBB, PredBB, BB, SuccBB);
2250 return true;
2251}
2252
2253void JumpThreadingPass::threadThroughTwoBasicBlocks(BasicBlock *PredPredBB,
2254 BasicBlock *PredBB,
2255 BasicBlock *BB,
2256 BasicBlock *SuccBB) {
2257 LLVM_DEBUG(dbgs() << " Threading through '" << PredBB->getName() << "' and '"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " Threading through '"
<< PredBB->getName() << "' and '" << BB
->getName() << "'\n"; } } while (false)
2258 << BB->getName() << "'\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " Threading through '"
<< PredBB->getName() << "' and '" << BB
->getName() << "'\n"; } } while (false)
;
2259
2260 BranchInst *CondBr = cast<BranchInst>(BB->getTerminator());
2261 BranchInst *PredBBBranch = cast<BranchInst>(PredBB->getTerminator());
2262
2263 BasicBlock *NewBB =
2264 BasicBlock::Create(PredBB->getContext(), PredBB->getName() + ".thread",
2265 PredBB->getParent(), PredBB);
2266 NewBB->moveAfter(PredBB);
2267
2268 // Set the block frequency of NewBB.
2269 if (HasProfileData) {
2270 auto NewBBFreq = BFI->getBlockFreq(PredPredBB) *
2271 BPI->getEdgeProbability(PredPredBB, PredBB);
2272 BFI->setBlockFreq(NewBB, NewBBFreq.getFrequency());
2273 }
2274
2275 // We are going to have to map operands from the original BB block to the new
2276 // copy of the block 'NewBB'. If there are PHI nodes in PredBB, evaluate them
2277 // to account for entry from PredPredBB.
2278 DenseMap<Instruction *, Value *> ValueMapping =
2279 cloneInstructions(PredBB->begin(), PredBB->end(), NewBB, PredPredBB);
2280
2281 // Copy the edge probabilities from PredBB to NewBB.
2282 if (HasProfileData)
2283 BPI->copyEdgeProbabilities(PredBB, NewBB);
2284
2285 // Update the terminator of PredPredBB to jump to NewBB instead of PredBB.
2286 // This eliminates predecessors from PredPredBB, which requires us to simplify
2287 // any PHI nodes in PredBB.
2288 Instruction *PredPredTerm = PredPredBB->getTerminator();
2289 for (unsigned i = 0, e = PredPredTerm->getNumSuccessors(); i != e; ++i)
2290 if (PredPredTerm->getSuccessor(i) == PredBB) {
2291 PredBB->removePredecessor(PredPredBB, true);
2292 PredPredTerm->setSuccessor(i, NewBB);
2293 }
2294
2295 addPHINodeEntriesForMappedBlock(PredBBBranch->getSuccessor(0), PredBB, NewBB,
2296 ValueMapping);
2297 addPHINodeEntriesForMappedBlock(PredBBBranch->getSuccessor(1), PredBB, NewBB,
2298 ValueMapping);
2299
2300 DTU->applyUpdatesPermissive(
2301 {{DominatorTree::Insert, NewBB, CondBr->getSuccessor(0)},
2302 {DominatorTree::Insert, NewBB, CondBr->getSuccessor(1)},
2303 {DominatorTree::Insert, PredPredBB, NewBB},
2304 {DominatorTree::Delete, PredPredBB, PredBB}});
2305
2306 updateSSA(PredBB, NewBB, ValueMapping);
2307
2308 // Clean up things like PHI nodes with single operands, dead instructions,
2309 // etc.
2310 SimplifyInstructionsInBlock(NewBB, TLI);
2311 SimplifyInstructionsInBlock(PredBB, TLI);
2312
2313 SmallVector<BasicBlock *, 1> PredsToFactor;
2314 PredsToFactor.push_back(NewBB);
2315 threadEdge(BB, PredsToFactor, SuccBB);
2316}
2317
2318/// tryThreadEdge - Thread an edge if it's safe and profitable to do so.
2319bool JumpThreadingPass::tryThreadEdge(
2320 BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs,
2321 BasicBlock *SuccBB) {
2322 // If threading to the same block as we come from, we would infinite loop.
2323 if (SuccBB == BB) {
2324 LLVM_DEBUG(dbgs() << " Not threading across BB '" << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " Not threading across BB '"
<< BB->getName() << "' - would thread to self!\n"
; } } while (false)
2325 << "' - would thread to self!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " Not threading across BB '"
<< BB->getName() << "' - would thread to self!\n"
; } } while (false)
;
2326 return false;
2327 }
2328
2329 // If threading this would thread across a loop header, don't thread the edge.
2330 // See the comments above findLoopHeaders for justifications and caveats.
2331 if (LoopHeaders.count(BB) || LoopHeaders.count(SuccBB)) {
2332 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB
); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() <<
" Not threading across " << (BBIsHeader ? "loop header BB '"
: "block BB '") << BB->getName() << "' to dest "
<< (SuccIsHeader ? "loop header BB '" : "block BB '") <<
SuccBB->getName() << "' - it might create an irreducible loop!\n"
; }; } } while (false)
2333 bool BBIsHeader = LoopHeaders.count(BB);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB
); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() <<
" Not threading across " << (BBIsHeader ? "loop header BB '"
: "block BB '") << BB->getName() << "' to dest "
<< (SuccIsHeader ? "loop header BB '" : "block BB '") <<
SuccBB->getName() << "' - it might create an irreducible loop!\n"
; }; } } while (false)
2334 bool SuccIsHeader = LoopHeaders.count(SuccBB);do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB
); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() <<
" Not threading across " << (BBIsHeader ? "loop header BB '"
: "block BB '") << BB->getName() << "' to dest "
<< (SuccIsHeader ? "loop header BB '" : "block BB '") <<
SuccBB->getName() << "' - it might create an irreducible loop!\n"
; }; } } while (false)
2335 dbgs() << " Not threading across "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB
); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() <<
" Not threading across " << (BBIsHeader ? "loop header BB '"
: "block BB '") << BB->getName() << "' to dest "
<< (SuccIsHeader ? "loop header BB '" : "block BB '") <<
SuccBB->getName() << "' - it might create an irreducible loop!\n"
; }; } } while (false)
2336 << (BBIsHeader ? "loop header BB '" : "block BB '") << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB
); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() <<
" Not threading across " << (BBIsHeader ? "loop header BB '"
: "block BB '") << BB->getName() << "' to dest "
<< (SuccIsHeader ? "loop header BB '" : "block BB '") <<
SuccBB->getName() << "' - it might create an irreducible loop!\n"
; }; } } while (false)
2337 << "' to dest " << (SuccIsHeader ? "loop header BB '" : "block BB '")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB
); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() <<
" Not threading across " << (BBIsHeader ? "loop header BB '"
: "block BB '") << BB->getName() << "' to dest "
<< (SuccIsHeader ? "loop header BB '" : "block BB '") <<
SuccBB->getName() << "' - it might create an irreducible loop!\n"
; }; } } while (false)
2338 << SuccBB->getName() << "' - it might create an irreducible loop!\n";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB
); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() <<
" Not threading across " << (BBIsHeader ? "loop header BB '"
: "block BB '") << BB->getName() << "' to dest "
<< (SuccIsHeader ? "loop header BB '" : "block BB '") <<
SuccBB->getName() << "' - it might create an irreducible loop!\n"
; }; } } while (false)
2339 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { { bool BBIsHeader = LoopHeaders.count(BB
); bool SuccIsHeader = LoopHeaders.count(SuccBB); dbgs() <<
" Not threading across " << (BBIsHeader ? "loop header BB '"
: "block BB '") << BB->getName() << "' to dest "
<< (SuccIsHeader ? "loop header BB '" : "block BB '") <<
SuccBB->getName() << "' - it might create an irreducible loop!\n"
; }; } } while (false)
;
2340 return false;
2341 }
2342
2343 unsigned JumpThreadCost = getJumpThreadDuplicationCost(
2344 TTI, BB, BB->getTerminator(), BBDupThreshold);
2345 if (JumpThreadCost > BBDupThreshold) {
2346 LLVM_DEBUG(dbgs() << " Not threading BB '" << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " Not threading BB '" <<
BB->getName() << "' - Cost is too high: " << JumpThreadCost
<< "\n"; } } while (false)
2347 << "' - Cost is too high: " << JumpThreadCost << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " Not threading BB '" <<
BB->getName() << "' - Cost is too high: " << JumpThreadCost
<< "\n"; } } while (false)
;
2348 return false;
2349 }
2350
2351 threadEdge(BB, PredBBs, SuccBB);
2352 return true;
2353}
2354
2355/// threadEdge - We have decided that it is safe and profitable to factor the
2356/// blocks in PredBBs to one predecessor, then thread an edge from it to SuccBB
2357/// across BB. Transform the IR to reflect this change.
2358void JumpThreadingPass::threadEdge(BasicBlock *BB,
2359 const SmallVectorImpl<BasicBlock *> &PredBBs,
2360 BasicBlock *SuccBB) {
2361 assert(SuccBB != BB && "Don't create an infinite loop")(static_cast <bool> (SuccBB != BB && "Don't create an infinite loop"
) ? void (0) : __assert_fail ("SuccBB != BB && \"Don't create an infinite loop\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 2361, __extension__
__PRETTY_FUNCTION__))
;
2362
2363 assert(!LoopHeaders.count(BB) && !LoopHeaders.count(SuccBB) &&(static_cast <bool> (!LoopHeaders.count(BB) && !
LoopHeaders.count(SuccBB) && "Don't thread across loop headers"
) ? void (0) : __assert_fail ("!LoopHeaders.count(BB) && !LoopHeaders.count(SuccBB) && \"Don't thread across loop headers\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 2364, __extension__
__PRETTY_FUNCTION__))
2364 "Don't thread across loop headers")(static_cast <bool> (!LoopHeaders.count(BB) && !
LoopHeaders.count(SuccBB) && "Don't thread across loop headers"
) ? void (0) : __assert_fail ("!LoopHeaders.count(BB) && !LoopHeaders.count(SuccBB) && \"Don't thread across loop headers\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 2364, __extension__
__PRETTY_FUNCTION__))
;
2365
2366 // And finally, do it! Start by factoring the predecessors if needed.
2367 BasicBlock *PredBB;
2368 if (PredBBs.size() == 1)
2369 PredBB = PredBBs[0];
2370 else {
2371 LLVM_DEBUG(dbgs() << " Factoring out " << PredBBs.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " Factoring out " <<
PredBBs.size() << " common predecessors.\n"; } } while
(false)
2372 << " common predecessors.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " Factoring out " <<
PredBBs.size() << " common predecessors.\n"; } } while
(false)
;
2373 PredBB = splitBlockPreds(BB, PredBBs, ".thr_comm");
2374 }
2375
2376 // And finally, do it!
2377 LLVM_DEBUG(dbgs() << " Threading edge from '" << PredBB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " Threading edge from '"
<< PredBB->getName() << "' to '" << SuccBB
->getName() << ", across block:\n " << *BB <<
"\n"; } } while (false)
2378 << "' to '" << SuccBB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " Threading edge from '"
<< PredBB->getName() << "' to '" << SuccBB
->getName() << ", across block:\n " << *BB <<
"\n"; } } while (false)
2379 << ", across block:\n " << *BB << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " Threading edge from '"
<< PredBB->getName() << "' to '" << SuccBB
->getName() << ", across block:\n " << *BB <<
"\n"; } } while (false)
;
2380
2381 LVI->threadEdge(PredBB, BB, SuccBB);
2382
2383 BasicBlock *NewBB = BasicBlock::Create(BB->getContext(),
2384 BB->getName()+".thread",
2385 BB->getParent(), BB);
2386 NewBB->moveAfter(PredBB);
2387
2388 // Set the block frequency of NewBB.
2389 if (HasProfileData) {
2390 auto NewBBFreq =
2391 BFI->getBlockFreq(PredBB) * BPI->getEdgeProbability(PredBB, BB);
2392 BFI->setBlockFreq(NewBB, NewBBFreq.getFrequency());
2393 }
2394
2395 // Copy all the instructions from BB to NewBB except the terminator.
2396 DenseMap<Instruction *, Value *> ValueMapping =
2397 cloneInstructions(BB->begin(), std::prev(BB->end()), NewBB, PredBB);
2398
2399 // We didn't copy the terminator from BB over to NewBB, because there is now
2400 // an unconditional jump to SuccBB. Insert the unconditional jump.
2401 BranchInst *NewBI = BranchInst::Create(SuccBB, NewBB);
2402 NewBI->setDebugLoc(BB->getTerminator()->getDebugLoc());
2403
2404 // Check to see if SuccBB has PHI nodes. If so, we need to add entries to the
2405 // PHI nodes for NewBB now.
2406 addPHINodeEntriesForMappedBlock(SuccBB, BB, NewBB, ValueMapping);
2407
2408 // Update the terminator of PredBB to jump to NewBB instead of BB. This
2409 // eliminates predecessors from BB, which requires us to simplify any PHI
2410 // nodes in BB.
2411 Instruction *PredTerm = PredBB->getTerminator();
2412 for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i)
2413 if (PredTerm->getSuccessor(i) == BB) {
2414 BB->removePredecessor(PredBB, true);
2415 PredTerm->setSuccessor(i, NewBB);
2416 }
2417
2418 // Enqueue required DT updates.
2419 DTU->applyUpdatesPermissive({{DominatorTree::Insert, NewBB, SuccBB},
2420 {DominatorTree::Insert, PredBB, NewBB},
2421 {DominatorTree::Delete, PredBB, BB}});
2422
2423 updateSSA(BB, NewBB, ValueMapping);
2424
2425 // At this point, the IR is fully up to date and consistent. Do a quick scan
2426 // over the new instructions and zap any that are constants or dead. This
2427 // frequently happens because of phi translation.
2428 SimplifyInstructionsInBlock(NewBB, TLI);
2429
2430 // Update the edge weight from BB to SuccBB, which should be less than before.
2431 updateBlockFreqAndEdgeWeight(PredBB, BB, NewBB, SuccBB);
2432
2433 // Threaded an edge!
2434 ++NumThreads;
2435}
2436
2437/// Create a new basic block that will be the predecessor of BB and successor of
2438/// all blocks in Preds. When profile data is available, update the frequency of
2439/// this new block.
2440BasicBlock *JumpThreadingPass::splitBlockPreds(BasicBlock *BB,
2441 ArrayRef<BasicBlock *> Preds,
2442 const char *Suffix) {
2443 SmallVector<BasicBlock *, 2> NewBBs;
2444
2445 // Collect the frequencies of all predecessors of BB, which will be used to
2446 // update the edge weight of the result of splitting predecessors.
2447 DenseMap<BasicBlock *, BlockFrequency> FreqMap;
2448 if (HasProfileData)
2449 for (auto Pred : Preds)
2450 FreqMap.insert(std::make_pair(
2451 Pred, BFI->getBlockFreq(Pred) * BPI->getEdgeProbability(Pred, BB)));
2452
2453 // In the case when BB is a LandingPad block we create 2 new predecessors
2454 // instead of just one.
2455 if (BB->isLandingPad()) {
2456 std::string NewName = std::string(Suffix) + ".split-lp";
2457 SplitLandingPadPredecessors(BB, Preds, Suffix, NewName.c_str(), NewBBs);
2458 } else {
2459 NewBBs.push_back(SplitBlockPredecessors(BB, Preds, Suffix));
2460 }
2461
2462 std::vector<DominatorTree::UpdateType> Updates;
2463 Updates.reserve((2 * Preds.size()) + NewBBs.size());
2464 for (auto NewBB : NewBBs) {
2465 BlockFrequency NewBBFreq(0);
2466 Updates.push_back({DominatorTree::Insert, NewBB, BB});
2467 for (auto Pred : predecessors(NewBB)) {
2468 Updates.push_back({DominatorTree::Delete, Pred, BB});
2469 Updates.push_back({DominatorTree::Insert, Pred, NewBB});
2470 if (HasProfileData) // Update frequencies between Pred -> NewBB.
2471 NewBBFreq += FreqMap.lookup(Pred);
2472 }
2473 if (HasProfileData) // Apply the summed frequency to NewBB.
2474 BFI->setBlockFreq(NewBB, NewBBFreq.getFrequency());
2475 }
2476
2477 DTU->applyUpdatesPermissive(Updates);
2478 return NewBBs[0];
2479}
2480
2481bool JumpThreadingPass::doesBlockHaveProfileData(BasicBlock *BB) {
2482 const Instruction *TI = BB->getTerminator();
2483 assert(TI->getNumSuccessors() > 1 && "not a split")(static_cast <bool> (TI->getNumSuccessors() > 1 &&
"not a split") ? void (0) : __assert_fail ("TI->getNumSuccessors() > 1 && \"not a split\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 2483, __extension__
__PRETTY_FUNCTION__))
;
2484
2485 MDNode *WeightsNode = TI->getMetadata(LLVMContext::MD_prof);
2486 if (!WeightsNode)
2487 return false;
2488
2489 MDString *MDName = cast<MDString>(WeightsNode->getOperand(0));
2490 if (MDName->getString() != "branch_weights")
2491 return false;
2492
2493 // Ensure there are weights for all of the successors. Note that the first
2494 // operand to the metadata node is a name, not a weight.
2495 return WeightsNode->getNumOperands() == TI->getNumSuccessors() + 1;
2496}
2497
2498/// Update the block frequency of BB and branch weight and the metadata on the
2499/// edge BB->SuccBB. This is done by scaling the weight of BB->SuccBB by 1 -
2500/// Freq(PredBB->BB) / Freq(BB->SuccBB).
2501void JumpThreadingPass::updateBlockFreqAndEdgeWeight(BasicBlock *PredBB,
2502 BasicBlock *BB,
2503 BasicBlock *NewBB,
2504 BasicBlock *SuccBB) {
2505 if (!HasProfileData)
2506 return;
2507
2508 assert(BFI && BPI && "BFI & BPI should have been created here")(static_cast <bool> (BFI && BPI && "BFI & BPI should have been created here"
) ? void (0) : __assert_fail ("BFI && BPI && \"BFI & BPI should have been created here\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 2508, __extension__
__PRETTY_FUNCTION__))
;
2509
2510 // As the edge from PredBB to BB is deleted, we have to update the block
2511 // frequency of BB.
2512 auto BBOrigFreq = BFI->getBlockFreq(BB);
2513 auto NewBBFreq = BFI->getBlockFreq(NewBB);
2514 auto BB2SuccBBFreq = BBOrigFreq * BPI->getEdgeProbability(BB, SuccBB);
2515 auto BBNewFreq = BBOrigFreq - NewBBFreq;
2516 BFI->setBlockFreq(BB, BBNewFreq.getFrequency());
2517
2518 // Collect updated outgoing edges' frequencies from BB and use them to update
2519 // edge probabilities.
2520 SmallVector<uint64_t, 4> BBSuccFreq;
2521 for (BasicBlock *Succ : successors(BB)) {
2522 auto SuccFreq = (Succ == SuccBB)
2523 ? BB2SuccBBFreq - NewBBFreq
2524 : BBOrigFreq * BPI->getEdgeProbability(BB, Succ);
2525 BBSuccFreq.push_back(SuccFreq.getFrequency());
2526 }
2527
2528 uint64_t MaxBBSuccFreq =
2529 *std::max_element(BBSuccFreq.begin(), BBSuccFreq.end());
2530
2531 SmallVector<BranchProbability, 4> BBSuccProbs;
2532 if (MaxBBSuccFreq == 0)
2533 BBSuccProbs.assign(BBSuccFreq.size(),
2534 {1, static_cast<unsigned>(BBSuccFreq.size())});
2535 else {
2536 for (uint64_t Freq : BBSuccFreq)
2537 BBSuccProbs.push_back(
2538 BranchProbability::getBranchProbability(Freq, MaxBBSuccFreq));
2539 // Normalize edge probabilities so that they sum up to one.
2540 BranchProbability::normalizeProbabilities(BBSuccProbs.begin(),
2541 BBSuccProbs.end());
2542 }
2543
2544 // Update edge probabilities in BPI.
2545 BPI->setEdgeProbability(BB, BBSuccProbs);
2546
2547 // Update the profile metadata as well.
2548 //
2549 // Don't do this if the profile of the transformed blocks was statically
2550 // estimated. (This could occur despite the function having an entry
2551 // frequency in completely cold parts of the CFG.)
2552 //
2553 // In this case we don't want to suggest to subsequent passes that the
2554 // calculated weights are fully consistent. Consider this graph:
2555 //
2556 // check_1
2557 // 50% / |
2558 // eq_1 | 50%
2559 // \ |
2560 // check_2
2561 // 50% / |
2562 // eq_2 | 50%
2563 // \ |
2564 // check_3
2565 // 50% / |
2566 // eq_3 | 50%
2567 // \ |
2568 //
2569 // Assuming the blocks check_* all compare the same value against 1, 2 and 3,
2570 // the overall probabilities are inconsistent; the total probability that the
2571 // value is either 1, 2 or 3 is 150%.
2572 //
2573 // As a consequence if we thread eq_1 -> check_2 to check_3, check_2->check_3
2574 // becomes 0%. This is even worse if the edge whose probability becomes 0% is
2575 // the loop exit edge. Then based solely on static estimation we would assume
2576 // the loop was extremely hot.
2577 //
2578 // FIXME this locally as well so that BPI and BFI are consistent as well. We
2579 // shouldn't make edges extremely likely or unlikely based solely on static
2580 // estimation.
2581 if (BBSuccProbs.size() >= 2 && doesBlockHaveProfileData(BB)) {
2582 SmallVector<uint32_t, 4> Weights;
2583 for (auto Prob : BBSuccProbs)
2584 Weights.push_back(Prob.getNumerator());
2585
2586 auto TI = BB->getTerminator();
2587 TI->setMetadata(
2588 LLVMContext::MD_prof,
2589 MDBuilder(TI->getParent()->getContext()).createBranchWeights(Weights));
2590 }
2591}
2592
2593/// duplicateCondBranchOnPHIIntoPred - PredBB contains an unconditional branch
2594/// to BB which contains an i1 PHI node and a conditional branch on that PHI.
2595/// If we can duplicate the contents of BB up into PredBB do so now, this
2596/// improves the odds that the branch will be on an analyzable instruction like
2597/// a compare.
2598bool JumpThreadingPass::duplicateCondBranchOnPHIIntoPred(
2599 BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs) {
2600 assert(!PredBBs.empty() && "Can't handle an empty set")(static_cast <bool> (!PredBBs.empty() && "Can't handle an empty set"
) ? void (0) : __assert_fail ("!PredBBs.empty() && \"Can't handle an empty set\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 2600, __extension__
__PRETTY_FUNCTION__))
;
2601
2602 // If BB is a loop header, then duplicating this block outside the loop would
2603 // cause us to transform this into an irreducible loop, don't do this.
2604 // See the comments above findLoopHeaders for justifications and caveats.
2605 if (LoopHeaders.count(BB)) {
2606 LLVM_DEBUG(dbgs() << " Not duplicating loop header '" << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " Not duplicating loop header '"
<< BB->getName() << "' into predecessor block '"
<< PredBBs[0]->getName() << "' - it might create an irreducible loop!\n"
; } } while (false)
2607 << "' into predecessor block '" << PredBBs[0]->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " Not duplicating loop header '"
<< BB->getName() << "' into predecessor block '"
<< PredBBs[0]->getName() << "' - it might create an irreducible loop!\n"
; } } while (false)
2608 << "' - it might create an irreducible loop!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " Not duplicating loop header '"
<< BB->getName() << "' into predecessor block '"
<< PredBBs[0]->getName() << "' - it might create an irreducible loop!\n"
; } } while (false)
;
2609 return false;
2610 }
2611
2612 unsigned DuplicationCost = getJumpThreadDuplicationCost(
2613 TTI, BB, BB->getTerminator(), BBDupThreshold);
2614 if (DuplicationCost > BBDupThreshold) {
2615 LLVM_DEBUG(dbgs() << " Not duplicating BB '" << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " Not duplicating BB '"
<< BB->getName() << "' - Cost is too high: " <<
DuplicationCost << "\n"; } } while (false)
2616 << "' - Cost is too high: " << DuplicationCost << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " Not duplicating BB '"
<< BB->getName() << "' - Cost is too high: " <<
DuplicationCost << "\n"; } } while (false)
;
2617 return false;
2618 }
2619
2620 // And finally, do it! Start by factoring the predecessors if needed.
2621 std::vector<DominatorTree::UpdateType> Updates;
2622 BasicBlock *PredBB;
2623 if (PredBBs.size() == 1)
2624 PredBB = PredBBs[0];
2625 else {
2626 LLVM_DEBUG(dbgs() << " Factoring out " << PredBBs.size()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " Factoring out " <<
PredBBs.size() << " common predecessors.\n"; } } while
(false)
2627 << " common predecessors.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " Factoring out " <<
PredBBs.size() << " common predecessors.\n"; } } while
(false)
;
2628 PredBB = splitBlockPreds(BB, PredBBs, ".thr_comm");
2629 }
2630 Updates.push_back({DominatorTree::Delete, PredBB, BB});
2631
2632 // Okay, we decided to do this! Clone all the instructions in BB onto the end
2633 // of PredBB.
2634 LLVM_DEBUG(dbgs() << " Duplicating block '" << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " Duplicating block '"
<< BB->getName() << "' into end of '" <<
PredBB->getName() << "' to eliminate branch on phi. Cost: "
<< DuplicationCost << " block is:" << *BB <<
"\n"; } } while (false)
2635 << "' into end of '" << PredBB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " Duplicating block '"
<< BB->getName() << "' into end of '" <<
PredBB->getName() << "' to eliminate branch on phi. Cost: "
<< DuplicationCost << " block is:" << *BB <<
"\n"; } } while (false)
2636 << "' to eliminate branch on phi. Cost: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " Duplicating block '"
<< BB->getName() << "' into end of '" <<
PredBB->getName() << "' to eliminate branch on phi. Cost: "
<< DuplicationCost << " block is:" << *BB <<
"\n"; } } while (false)
2637 << DuplicationCost << " block is:" << *BB << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << " Duplicating block '"
<< BB->getName() << "' into end of '" <<
PredBB->getName() << "' to eliminate branch on phi. Cost: "
<< DuplicationCost << " block is:" << *BB <<
"\n"; } } while (false)
;
2638
2639 // Unless PredBB ends with an unconditional branch, split the edge so that we
2640 // can just clone the bits from BB into the end of the new PredBB.
2641 BranchInst *OldPredBranch = dyn_cast<BranchInst>(PredBB->getTerminator());
2642
2643 if (!OldPredBranch || !OldPredBranch->isUnconditional()) {
2644 BasicBlock *OldPredBB = PredBB;
2645 PredBB = SplitEdge(OldPredBB, BB);
2646 Updates.push_back({DominatorTree::Insert, OldPredBB, PredBB});
2647 Updates.push_back({DominatorTree::Insert, PredBB, BB});
2648 Updates.push_back({DominatorTree::Delete, OldPredBB, BB});
2649 OldPredBranch = cast<BranchInst>(PredBB->getTerminator());
2650 }
2651
2652 // We are going to have to map operands from the original BB block into the
2653 // PredBB block. Evaluate PHI nodes in BB.
2654 DenseMap<Instruction*, Value*> ValueMapping;
2655
2656 BasicBlock::iterator BI = BB->begin();
2657 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI)
2658 ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB);
2659 // Clone the non-phi instructions of BB into PredBB, keeping track of the
2660 // mapping and using it to remap operands in the cloned instructions.
2661 for (; BI != BB->end(); ++BI) {
2662 Instruction *New = BI->clone();
2663
2664 // Remap operands to patch up intra-block references.
2665 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i)
2666 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) {
2667 DenseMap<Instruction*, Value*>::iterator I = ValueMapping.find(Inst);
2668 if (I != ValueMapping.end())
2669 New->setOperand(i, I->second);
2670 }
2671
2672 // If this instruction can be simplified after the operands are updated,
2673 // just use the simplified value instead. This frequently happens due to
2674 // phi translation.
2675 if (Value *IV = SimplifyInstruction(
2676 New,
2677 {BB->getModule()->getDataLayout(), TLI, nullptr, nullptr, New})) {
2678 ValueMapping[&*BI] = IV;
2679 if (!New->mayHaveSideEffects()) {
2680 New->deleteValue();
2681 New = nullptr;
2682 }
2683 } else {
2684 ValueMapping[&*BI] = New;
2685 }
2686 if (New) {
2687 // Otherwise, insert the new instruction into the block.
2688 New->setName(BI->getName());
2689 PredBB->getInstList().insert(OldPredBranch->getIterator(), New);
2690 // Update Dominance from simplified New instruction operands.
2691 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i)
2692 if (BasicBlock *SuccBB = dyn_cast<BasicBlock>(New->getOperand(i)))
2693 Updates.push_back({DominatorTree::Insert, PredBB, SuccBB});
2694 }
2695 }
2696
2697 // Check to see if the targets of the branch had PHI nodes. If so, we need to
2698 // add entries to the PHI nodes for branch from PredBB now.
2699 BranchInst *BBBranch = cast<BranchInst>(BB->getTerminator());
2700 addPHINodeEntriesForMappedBlock(BBBranch->getSuccessor(0), BB, PredBB,
2701 ValueMapping);
2702 addPHINodeEntriesForMappedBlock(BBBranch->getSuccessor(1), BB, PredBB,
2703 ValueMapping);
2704
2705 updateSSA(BB, PredBB, ValueMapping);
2706
2707 // PredBB no longer jumps to BB, remove entries in the PHI node for the edge
2708 // that we nuked.
2709 BB->removePredecessor(PredBB, true);
2710
2711 // Remove the unconditional branch at the end of the PredBB block.
2712 OldPredBranch->eraseFromParent();
2713 if (HasProfileData)
2714 BPI->copyEdgeProbabilities(BB, PredBB);
2715 DTU->applyUpdatesPermissive(Updates);
2716
2717 ++NumDupes;
2718 return true;
2719}
2720
2721// Pred is a predecessor of BB with an unconditional branch to BB. SI is
2722// a Select instruction in Pred. BB has other predecessors and SI is used in
2723// a PHI node in BB. SI has no other use.
2724// A new basic block, NewBB, is created and SI is converted to compare and
2725// conditional branch. SI is erased from parent.
2726void JumpThreadingPass::unfoldSelectInstr(BasicBlock *Pred, BasicBlock *BB,
2727 SelectInst *SI, PHINode *SIUse,
2728 unsigned Idx) {
2729 // Expand the select.
2730 //
2731 // Pred --
2732 // | v
2733 // | NewBB
2734 // | |
2735 // |-----
2736 // v
2737 // BB
2738 BranchInst *PredTerm = cast<BranchInst>(Pred->getTerminator());
2739 BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "select.unfold",
2740 BB->getParent(), BB);
2741 // Move the unconditional branch to NewBB.
2742 PredTerm->removeFromParent();
2743 NewBB->getInstList().insert(NewBB->end(), PredTerm);
2744 // Create a conditional branch and update PHI nodes.
2745 auto *BI = BranchInst::Create(NewBB, BB, SI->getCondition(), Pred);
2746 BI->applyMergedLocation(PredTerm->getDebugLoc(), SI->getDebugLoc());
2747 SIUse->setIncomingValue(Idx, SI->getFalseValue());
2748 SIUse->addIncoming(SI->getTrueValue(), NewBB);
2749
2750 // The select is now dead.
2751 SI->eraseFromParent();
2752 DTU->applyUpdatesPermissive({{DominatorTree::Insert, NewBB, BB},
2753 {DominatorTree::Insert, Pred, NewBB}});
2754
2755 // Update any other PHI nodes in BB.
2756 for (BasicBlock::iterator BI = BB->begin();
2757 PHINode *Phi = dyn_cast<PHINode>(BI); ++BI)
2758 if (Phi != SIUse)
2759 Phi->addIncoming(Phi->getIncomingValueForBlock(Pred), NewBB);
2760}
2761
2762bool JumpThreadingPass::tryToUnfoldSelect(SwitchInst *SI, BasicBlock *BB) {
2763 PHINode *CondPHI = dyn_cast<PHINode>(SI->getCondition());
2764
2765 if (!CondPHI || CondPHI->getParent() != BB)
2766 return false;
2767
2768 for (unsigned I = 0, E = CondPHI->getNumIncomingValues(); I != E; ++I) {
2769 BasicBlock *Pred = CondPHI->getIncomingBlock(I);
2770 SelectInst *PredSI = dyn_cast<SelectInst>(CondPHI->getIncomingValue(I));
2771
2772 // The second and third condition can be potentially relaxed. Currently
2773 // the conditions help to simplify the code and allow us to reuse existing
2774 // code, developed for tryToUnfoldSelect(CmpInst *, BasicBlock *)
2775 if (!PredSI || PredSI->getParent() != Pred || !PredSI->hasOneUse())
2776 continue;
2777
2778 BranchInst *PredTerm = dyn_cast<BranchInst>(Pred->getTerminator());
2779 if (!PredTerm || !PredTerm->isUnconditional())
2780 continue;
2781
2782 unfoldSelectInstr(Pred, BB, PredSI, CondPHI, I);
2783 return true;
2784 }
2785 return false;
2786}
2787
2788/// tryToUnfoldSelect - Look for blocks of the form
2789/// bb1:
2790/// %a = select
2791/// br bb2
2792///
2793/// bb2:
2794/// %p = phi [%a, %bb1] ...
2795/// %c = icmp %p
2796/// br i1 %c
2797///
2798/// And expand the select into a branch structure if one of its arms allows %c
2799/// to be folded. This later enables threading from bb1 over bb2.
2800bool JumpThreadingPass::tryToUnfoldSelect(CmpInst *CondCmp, BasicBlock *BB) {
2801 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator());
2802 PHINode *CondLHS = dyn_cast<PHINode>(CondCmp->getOperand(0));
2803 Constant *CondRHS = cast<Constant>(CondCmp->getOperand(1));
2804
2805 if (!CondBr || !CondBr->isConditional() || !CondLHS ||
2806 CondLHS->getParent() != BB)
2807 return false;
2808
2809 for (unsigned I = 0, E = CondLHS->getNumIncomingValues(); I != E; ++I) {
2810 BasicBlock *Pred = CondLHS->getIncomingBlock(I);
2811 SelectInst *SI = dyn_cast<SelectInst>(CondLHS->getIncomingValue(I));
2812
2813 // Look if one of the incoming values is a select in the corresponding
2814 // predecessor.
2815 if (!SI || SI->getParent() != Pred || !SI->hasOneUse())
2816 continue;
2817
2818 BranchInst *PredTerm = dyn_cast<BranchInst>(Pred->getTerminator());
2819 if (!PredTerm || !PredTerm->isUnconditional())
2820 continue;
2821
2822 // Now check if one of the select values would allow us to constant fold the
2823 // terminator in BB. We don't do the transform if both sides fold, those
2824 // cases will be threaded in any case.
2825 LazyValueInfo::Tristate LHSFolds =
2826 LVI->getPredicateOnEdge(CondCmp->getPredicate(), SI->getOperand(1),
2827 CondRHS, Pred, BB, CondCmp);
2828 LazyValueInfo::Tristate RHSFolds =
2829 LVI->getPredicateOnEdge(CondCmp->getPredicate(), SI->getOperand(2),
2830 CondRHS, Pred, BB, CondCmp);
2831 if ((LHSFolds != LazyValueInfo::Unknown ||
2832 RHSFolds != LazyValueInfo::Unknown) &&
2833 LHSFolds != RHSFolds) {
2834 unfoldSelectInstr(Pred, BB, SI, CondLHS, I);
2835 return true;
2836 }
2837 }
2838 return false;
2839}
2840
2841/// tryToUnfoldSelectInCurrBB - Look for PHI/Select or PHI/CMP/Select in the
2842/// same BB in the form
2843/// bb:
2844/// %p = phi [false, %bb1], [true, %bb2], [false, %bb3], [true, %bb4], ...
2845/// %s = select %p, trueval, falseval
2846///
2847/// or
2848///
2849/// bb:
2850/// %p = phi [0, %bb1], [1, %bb2], [0, %bb3], [1, %bb4], ...
2851/// %c = cmp %p, 0
2852/// %s = select %c, trueval, falseval
2853///
2854/// And expand the select into a branch structure. This later enables
2855/// jump-threading over bb in this pass.
2856///
2857/// Using the similar approach of SimplifyCFG::FoldCondBranchOnPHI(), unfold
2858/// select if the associated PHI has at least one constant. If the unfolded
2859/// select is not jump-threaded, it will be folded again in the later
2860/// optimizations.
2861bool JumpThreadingPass::tryToUnfoldSelectInCurrBB(BasicBlock *BB) {
2862 // This transform would reduce the quality of msan diagnostics.
2863 // Disable this transform under MemorySanitizer.
2864 if (BB->getParent()->hasFnAttribute(Attribute::SanitizeMemory))
2865 return false;
2866
2867 // If threading this would thread across a loop header, don't thread the edge.
2868 // See the comments above findLoopHeaders for justifications and caveats.
2869 if (LoopHeaders.count(BB))
2870 return false;
2871
2872 for (BasicBlock::iterator BI = BB->begin();
2873 PHINode *PN = dyn_cast<PHINode>(BI); ++BI) {
2874 // Look for a Phi having at least one constant incoming value.
2875 if (llvm::all_of(PN->incoming_values(),
2876 [](Value *V) { return !isa<ConstantInt>(V); }))
2877 continue;
2878
2879 auto isUnfoldCandidate = [BB](SelectInst *SI, Value *V) {
2880 using namespace PatternMatch;
2881
2882 // Check if SI is in BB and use V as condition.
2883 if (SI->getParent() != BB)
2884 return false;
2885 Value *Cond = SI->getCondition();
2886 bool IsAndOr = match(SI, m_CombineOr(m_LogicalAnd(), m_LogicalOr()));
2887 return Cond && Cond == V && Cond->getType()->isIntegerTy(1) && !IsAndOr;
2888 };
2889
2890 SelectInst *SI = nullptr;
2891 for (Use &U : PN->uses()) {
2892 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(U.getUser())) {
2893 // Look for a ICmp in BB that compares PN with a constant and is the
2894 // condition of a Select.
2895 if (Cmp->getParent() == BB && Cmp->hasOneUse() &&
2896 isa<ConstantInt>(Cmp->getOperand(1 - U.getOperandNo())))
2897 if (SelectInst *SelectI = dyn_cast<SelectInst>(Cmp->user_back()))
2898 if (isUnfoldCandidate(SelectI, Cmp->use_begin()->get())) {
2899 SI = SelectI;
2900 break;
2901 }
2902 } else if (SelectInst *SelectI = dyn_cast<SelectInst>(U.getUser())) {
2903 // Look for a Select in BB that uses PN as condition.
2904 if (isUnfoldCandidate(SelectI, U.get())) {
2905 SI = SelectI;
2906 break;
2907 }
2908 }
2909 }
2910
2911 if (!SI)
2912 continue;
2913 // Expand the select.
2914 Value *Cond = SI->getCondition();
2915 if (InsertFreezeWhenUnfoldingSelect &&
2916 !isGuaranteedNotToBeUndefOrPoison(Cond, nullptr, SI,
2917 &DTU->getDomTree()))
2918 Cond = new FreezeInst(Cond, "cond.fr", SI);
2919 Instruction *Term = SplitBlockAndInsertIfThen(Cond, SI, false);
2920 BasicBlock *SplitBB = SI->getParent();
2921 BasicBlock *NewBB = Term->getParent();
2922 PHINode *NewPN = PHINode::Create(SI->getType(), 2, "", SI);
2923 NewPN->addIncoming(SI->getTrueValue(), Term->getParent());
2924 NewPN->addIncoming(SI->getFalseValue(), BB);
2925 SI->replaceAllUsesWith(NewPN);
2926 SI->eraseFromParent();
2927 // NewBB and SplitBB are newly created blocks which require insertion.
2928 std::vector<DominatorTree::UpdateType> Updates;
2929 Updates.reserve((2 * SplitBB->getTerminator()->getNumSuccessors()) + 3);
2930 Updates.push_back({DominatorTree::Insert, BB, SplitBB});
2931 Updates.push_back({DominatorTree::Insert, BB, NewBB});
2932 Updates.push_back({DominatorTree::Insert, NewBB, SplitBB});
2933 // BB's successors were moved to SplitBB, update DTU accordingly.
2934 for (auto *Succ : successors(SplitBB)) {
2935 Updates.push_back({DominatorTree::Delete, BB, Succ});
2936 Updates.push_back({DominatorTree::Insert, SplitBB, Succ});
2937 }
2938 DTU->applyUpdatesPermissive(Updates);
2939 return true;
2940 }
2941 return false;
2942}
2943
2944/// Try to propagate a guard from the current BB into one of its predecessors
2945/// in case if another branch of execution implies that the condition of this
2946/// guard is always true. Currently we only process the simplest case that
2947/// looks like:
2948///
2949/// Start:
2950/// %cond = ...
2951/// br i1 %cond, label %T1, label %F1
2952/// T1:
2953/// br label %Merge
2954/// F1:
2955/// br label %Merge
2956/// Merge:
2957/// %condGuard = ...
2958/// call void(i1, ...) @llvm.experimental.guard( i1 %condGuard )[ "deopt"() ]
2959///
2960/// And cond either implies condGuard or !condGuard. In this case all the
2961/// instructions before the guard can be duplicated in both branches, and the
2962/// guard is then threaded to one of them.
2963bool JumpThreadingPass::processGuards(BasicBlock *BB) {
2964 using namespace PatternMatch;
2965
2966 // We only want to deal with two predecessors.
2967 BasicBlock *Pred1, *Pred2;
2968 auto PI = pred_begin(BB), PE = pred_end(BB);
2969 if (PI == PE)
2970 return false;
2971 Pred1 = *PI++;
2972 if (PI == PE)
2973 return false;
2974 Pred2 = *PI++;
2975 if (PI != PE)
2976 return false;
2977 if (Pred1 == Pred2)
2978 return false;
2979
2980 // Try to thread one of the guards of the block.
2981 // TODO: Look up deeper than to immediate predecessor?
2982 auto *Parent = Pred1->getSinglePredecessor();
2983 if (!Parent || Parent != Pred2->getSinglePredecessor())
2984 return false;
2985
2986 if (auto *BI = dyn_cast<BranchInst>(Parent->getTerminator()))
2987 for (auto &I : *BB)
2988 if (isGuard(&I) && threadGuard(BB, cast<IntrinsicInst>(&I), BI))
2989 return true;
2990
2991 return false;
2992}
2993
2994/// Try to propagate the guard from BB which is the lower block of a diamond
2995/// to one of its branches, in case if diamond's condition implies guard's
2996/// condition.
2997bool JumpThreadingPass::threadGuard(BasicBlock *BB, IntrinsicInst *Guard,
2998 BranchInst *BI) {
2999 assert(BI->getNumSuccessors() == 2 && "Wrong number of successors?")(static_cast <bool> (BI->getNumSuccessors() == 2 &&
"Wrong number of successors?") ? void (0) : __assert_fail ("BI->getNumSuccessors() == 2 && \"Wrong number of successors?\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 2999, __extension__
__PRETTY_FUNCTION__))
;
3000 assert(BI->isConditional() && "Unconditional branch has 2 successors?")(static_cast <bool> (BI->isConditional() && "Unconditional branch has 2 successors?"
) ? void (0) : __assert_fail ("BI->isConditional() && \"Unconditional branch has 2 successors?\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 3000, __extension__
__PRETTY_FUNCTION__))
;
3001 Value *GuardCond = Guard->getArgOperand(0);
3002 Value *BranchCond = BI->getCondition();
3003 BasicBlock *TrueDest = BI->getSuccessor(0);
3004 BasicBlock *FalseDest = BI->getSuccessor(1);
3005
3006 auto &DL = BB->getModule()->getDataLayout();
3007 bool TrueDestIsSafe = false;
3008 bool FalseDestIsSafe = false;
3009
3010 // True dest is safe if BranchCond => GuardCond.
3011 auto Impl = isImpliedCondition(BranchCond, GuardCond, DL);
3012 if (Impl && *Impl)
3013 TrueDestIsSafe = true;
3014 else {
3015 // False dest is safe if !BranchCond => GuardCond.
3016 Impl = isImpliedCondition(BranchCond, GuardCond, DL, /* LHSIsTrue */ false);
3017 if (Impl && *Impl)
3018 FalseDestIsSafe = true;
3019 }
3020
3021 if (!TrueDestIsSafe && !FalseDestIsSafe)
3022 return false;
3023
3024 BasicBlock *PredUnguardedBlock = TrueDestIsSafe ? TrueDest : FalseDest;
3025 BasicBlock *PredGuardedBlock = FalseDestIsSafe ? TrueDest : FalseDest;
3026
3027 ValueToValueMapTy UnguardedMapping, GuardedMapping;
3028 Instruction *AfterGuard = Guard->getNextNode();
3029 unsigned Cost =
3030 getJumpThreadDuplicationCost(TTI, BB, AfterGuard, BBDupThreshold);
3031 if (Cost > BBDupThreshold)
3032 return false;
3033 // Duplicate all instructions before the guard and the guard itself to the
3034 // branch where implication is not proved.
3035 BasicBlock *GuardedBlock = DuplicateInstructionsInSplitBetween(
3036 BB, PredGuardedBlock, AfterGuard, GuardedMapping, *DTU);
3037 assert(GuardedBlock && "Could not create the guarded block?")(static_cast <bool> (GuardedBlock && "Could not create the guarded block?"
) ? void (0) : __assert_fail ("GuardedBlock && \"Could not create the guarded block?\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 3037, __extension__
__PRETTY_FUNCTION__))
;
3038 // Duplicate all instructions before the guard in the unguarded branch.
3039 // Since we have successfully duplicated the guarded block and this block
3040 // has fewer instructions, we expect it to succeed.
3041 BasicBlock *UnguardedBlock = DuplicateInstructionsInSplitBetween(
3042 BB, PredUnguardedBlock, Guard, UnguardedMapping, *DTU);
3043 assert(UnguardedBlock && "Could not create the unguarded block?")(static_cast <bool> (UnguardedBlock && "Could not create the unguarded block?"
) ? void (0) : __assert_fail ("UnguardedBlock && \"Could not create the unguarded block?\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 3043, __extension__
__PRETTY_FUNCTION__))
;
3044 LLVM_DEBUG(dbgs() << "Moved guard " << *Guard << " to block "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << "Moved guard " <<
*Guard << " to block " << GuardedBlock->getName
() << "\n"; } } while (false)
3045 << GuardedBlock->getName() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("jump-threading")) { dbgs() << "Moved guard " <<
*Guard << " to block " << GuardedBlock->getName
() << "\n"; } } while (false)
;
3046 // Some instructions before the guard may still have uses. For them, we need
3047 // to create Phi nodes merging their copies in both guarded and unguarded
3048 // branches. Those instructions that have no uses can be just removed.
3049 SmallVector<Instruction *, 4> ToRemove;
3050 for (auto BI = BB->begin(); &*BI != AfterGuard; ++BI)
3051 if (!isa<PHINode>(&*BI))
3052 ToRemove.push_back(&*BI);
3053
3054 Instruction *InsertionPoint = &*BB->getFirstInsertionPt();
3055 assert(InsertionPoint && "Empty block?")(static_cast <bool> (InsertionPoint && "Empty block?"
) ? void (0) : __assert_fail ("InsertionPoint && \"Empty block?\""
, "llvm/lib/Transforms/Scalar/JumpThreading.cpp", 3055, __extension__
__PRETTY_FUNCTION__))
;
3056 // Substitute with Phis & remove.
3057 for (auto *Inst : reverse(ToRemove)) {
3058 if (!Inst->use_empty()) {
3059 PHINode *NewPN = PHINode::Create(Inst->getType(), 2);
3060 NewPN->addIncoming(UnguardedMapping[Inst], UnguardedBlock);
3061 NewPN->addIncoming(GuardedMapping[Inst], GuardedBlock);
3062 NewPN->insertBefore(InsertionPoint);
3063 Inst->replaceAllUsesWith(NewPN);
3064 }
3065 Inst->eraseFromParent();
3066 }
3067 return true;
3068}

/build/llvm-toolchain-snapshot-14~++20220119111520+da61cb019eb2/llvm/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/None.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/StringRef.h"
25#include "llvm/ADT/Twine.h"
26#include "llvm/ADT/iterator.h"
27#include "llvm/ADT/iterator_range.h"
28#include "llvm/IR/Attributes.h"
29#include "llvm/IR/BasicBlock.h"
30#include "llvm/IR/CallingConv.h"
31#include "llvm/IR/CFG.h"
32#include "llvm/IR/Constant.h"
33#include "llvm/IR/DerivedTypes.h"
34#include "llvm/IR/Function.h"
35#include "llvm/IR/InstrTypes.h"
36#include "llvm/IR/Instruction.h"
37#include "llvm/IR/OperandTraits.h"
38#include "llvm/IR/Type.h"
39#include "llvm/IR/Use.h"
40#include "llvm/IR/User.h"
41#include "llvm/IR/Value.h"
42#include "llvm/Support/AtomicOrdering.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/ErrorHandling.h"
45#include <cassert>
46#include <cstddef>
47#include <cstdint>
48#include <iterator>
49
50namespace llvm {
51
52class APInt;
53class ConstantInt;
54class DataLayout;
55class LLVMContext;
56
57//===----------------------------------------------------------------------===//
58// AllocaInst Class
59//===----------------------------------------------------------------------===//
60
61/// an instruction to allocate memory on the stack
62class AllocaInst : public UnaryInstruction {
63 Type *AllocatedType;
64
65 using AlignmentField = AlignmentBitfieldElementT<0>;
66 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
67 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>;
68 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
69 SwiftErrorField>(),
70 "Bitfields must be contiguous");
71
72protected:
73 // Note: Instruction needs to be a friend here to call cloneImpl.
74 friend class Instruction;
75
76 AllocaInst *cloneImpl() const;
77
78public:
79 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
80 const Twine &Name, Instruction *InsertBefore);
81 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
82 const Twine &Name, BasicBlock *InsertAtEnd);
83
84 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
85 Instruction *InsertBefore);
86 AllocaInst(Type *Ty, unsigned AddrSpace,
87 const Twine &Name, BasicBlock *InsertAtEnd);
88
89 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
90 const Twine &Name = "", Instruction *InsertBefore = nullptr);
91 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
92 const Twine &Name, BasicBlock *InsertAtEnd);
93
94 /// Return true if there is an allocation size parameter to the allocation
95 /// instruction that is not 1.
96 bool isArrayAllocation() const;
97
98 /// Get the number of elements allocated. For a simple allocation of a single
99 /// element, this will return a constant 1 value.
100 const Value *getArraySize() const { return getOperand(0); }
101 Value *getArraySize() { return getOperand(0); }
102
103 /// Overload to return most specific pointer type.
104 PointerType *getType() const {
105 return cast<PointerType>(Instruction::getType());
106 }
107
108 /// Return the address space for the allocation.
109 unsigned getAddressSpace() const {
110 return getType()->getAddressSpace();
111 }
112
113 /// Get allocation size in bits. Returns None if size can't be determined,
114 /// e.g. in case of a VLA.
115 Optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
116
117 /// Return the type that is being allocated by the instruction.
118 Type *getAllocatedType() const { return AllocatedType; }
119 /// for use only in special circumstances that need to generically
120 /// transform a whole instruction (eg: IR linking and vectorization).
121 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
122
123 /// Return the alignment of the memory that is being allocated by the
124 /// instruction.
125 Align getAlign() const {
126 return Align(1ULL << getSubclassData<AlignmentField>());
127 }
128
129 void setAlignment(Align Align) {
130 setSubclassData<AlignmentField>(Log2(Align));
131 }
132
133 // FIXME: Remove this one transition to Align is over.
134 uint64_t getAlignment() const { return getAlign().value(); }
135
136 /// Return true if this alloca is in the entry block of the function and is a
137 /// constant size. If so, the code generator will fold it into the
138 /// prolog/epilog code, so it is basically free.
139 bool isStaticAlloca() const;
140
141 /// Return true if this alloca is used as an inalloca argument to a call. Such
142 /// allocas are never considered static even if they are in the entry block.
143 bool isUsedWithInAlloca() const {
144 return getSubclassData<UsedWithInAllocaField>();
145 }
146
147 /// Specify whether this alloca is used to represent the arguments to a call.
148 void setUsedWithInAlloca(bool V) {
149 setSubclassData<UsedWithInAllocaField>(V);
150 }
151
152 /// Return true if this alloca is used as a swifterror argument to a call.
153 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
154 /// Specify whether this alloca is used to represent a swifterror.
155 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
156
157 // Methods for support type inquiry through isa, cast, and dyn_cast:
158 static bool classof(const Instruction *I) {
159 return (I->getOpcode() == Instruction::Alloca);
160 }
161 static bool classof(const Value *V) {
162 return isa<Instruction>(V) && classof(cast<Instruction>(V));
163 }
164
165private:
166 // Shadow Instruction::setInstructionSubclassData with a private forwarding
167 // method so that subclasses cannot accidentally use it.
168 template <typename Bitfield>
169 void setSubclassData(typename Bitfield::Type Value) {
170 Instruction::setSubclassData<Bitfield>(Value);
171 }
172};
173
174//===----------------------------------------------------------------------===//
175// LoadInst Class
176//===----------------------------------------------------------------------===//
177
178/// An instruction for reading from memory. This uses the SubclassData field in
179/// Value to store whether or not the load is volatile.
180class LoadInst : public UnaryInstruction {
181 using VolatileField = BoolBitfieldElementT<0>;
182 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
183 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
184 static_assert(
185 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
186 "Bitfields must be contiguous");
187
188 void AssertOK();
189
190protected:
191 // Note: Instruction needs to be a friend here to call cloneImpl.
192 friend class Instruction;
193
194 LoadInst *cloneImpl() const;
195
196public:
197 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
198 Instruction *InsertBefore);
199 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
200 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
201 Instruction *InsertBefore);
202 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
203 BasicBlock *InsertAtEnd);
204 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
205 Align Align, Instruction *InsertBefore = nullptr);
206 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
207 Align Align, BasicBlock *InsertAtEnd);
208 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
209 Align Align, AtomicOrdering Order,
210 SyncScope::ID SSID = SyncScope::System,
211 Instruction *InsertBefore = nullptr);
212 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
213 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
214 BasicBlock *InsertAtEnd);
215
216 /// Return true if this is a load from a volatile memory location.
217 bool isVolatile() const { return getSubclassData<VolatileField>(); }
218
219 /// Specify whether this is a volatile load or not.
220 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
221
222 /// Return the alignment of the access that is being performed.
223 /// FIXME: Remove this function once transition to Align is over.
224 /// Use getAlign() instead.
225 uint64_t getAlignment() const { return getAlign().value(); }
226
227 /// Return the alignment of the access that is being performed.
228 Align getAlign() const {
229 return Align(1ULL << (getSubclassData<AlignmentField>()));
230 }
231
232 void setAlignment(Align Align) {
233 setSubclassData<AlignmentField>(Log2(Align));
234 }
235
236 /// Returns the ordering constraint of this load instruction.
237 AtomicOrdering getOrdering() const {
238 return getSubclassData<OrderingField>();
239 }
240 /// Sets the ordering constraint of this load instruction. May not be Release
241 /// or AcquireRelease.
242 void setOrdering(AtomicOrdering Ordering) {
243 setSubclassData<OrderingField>(Ordering);
244 }
245
246 /// Returns the synchronization scope ID of this load instruction.
247 SyncScope::ID getSyncScopeID() const {
248 return SSID;
249 }
250
251 /// Sets the synchronization scope ID of this load instruction.
252 void setSyncScopeID(SyncScope::ID SSID) {
253 this->SSID = SSID;
254 }
255
256 /// Sets the ordering constraint and the synchronization scope ID of this load
257 /// instruction.
258 void setAtomic(AtomicOrdering Ordering,
259 SyncScope::ID SSID = SyncScope::System) {
260 setOrdering(Ordering);
261 setSyncScopeID(SSID);
262 }
263
264 bool isSimple() const { return !isAtomic() && !isVolatile(); }
265
266 bool isUnordered() const {
267 return (getOrdering() == AtomicOrdering::NotAtomic ||
2
Assuming the condition is false
5
Returning the value 1, which participates in a condition later
268 getOrdering() == AtomicOrdering::Unordered) &&
3
Assuming the condition is true
269 !isVolatile();
4
Assuming the condition is true
270 }
271
272 Value *getPointerOperand() { return getOperand(0); }
273 const Value *getPointerOperand() const { return getOperand(0); }
274 static unsigned getPointerOperandIndex() { return 0U; }
275 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
276
277 /// Returns the address space of the pointer operand.
278 unsigned getPointerAddressSpace() const {
279 return getPointerOperandType()->getPointerAddressSpace();
280 }
281
282 // Methods for support type inquiry through isa, cast, and dyn_cast:
283 static bool classof(const Instruction *I) {
284 return I->getOpcode() == Instruction::Load;
285 }
286 static bool classof(const Value *V) {
287 return isa<Instruction>(V) && classof(cast<Instruction>(V));
288 }
289
290private:
291 // Shadow Instruction::setInstructionSubclassData with a private forwarding
292 // method so that subclasses cannot accidentally use it.
293 template <typename Bitfield>
294 void setSubclassData(typename Bitfield::Type Value) {
295 Instruction::setSubclassData<Bitfield>(Value);
296 }
297
298 /// The synchronization scope ID of this load instruction. Not quite enough
299 /// room in SubClassData for everything, so synchronization scope ID gets its
300 /// own field.
301 SyncScope::ID SSID;
302};
303
304//===----------------------------------------------------------------------===//
305// StoreInst Class
306//===----------------------------------------------------------------------===//
307
308/// An instruction for storing to memory.
309class StoreInst : public Instruction {
310 using VolatileField = BoolBitfieldElementT<0>;
311 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
312 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
313 static_assert(
314 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
315 "Bitfields must be contiguous");
316
317 void AssertOK();
318
319protected:
320 // Note: Instruction needs to be a friend here to call cloneImpl.
321 friend class Instruction;
322
323 StoreInst *cloneImpl() const;
324
325public:
326 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
327 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
328 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
329 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
330 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
331 Instruction *InsertBefore = nullptr);
332 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
333 BasicBlock *InsertAtEnd);
334 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
335 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
336 Instruction *InsertBefore = nullptr);
337 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
338 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
339
340 // allocate space for exactly two operands
341 void *operator new(size_t S) { return User::operator new(S, 2); }
342 void operator delete(void *Ptr) { User::operator delete(Ptr); }
343
344 /// Return true if this is a store to a volatile memory location.
345 bool isVolatile() const { return getSubclassData<VolatileField>(); }
346
347 /// Specify whether this is a volatile store or not.
348 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
349
350 /// Transparently provide more efficient getOperand methods.
351 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
352
353 /// Return the alignment of the access that is being performed
354 /// FIXME: Remove this function once transition to Align is over.
355 /// Use getAlign() instead.
356 uint64_t getAlignment() const { return getAlign().value(); }
357
358 Align getAlign() const {
359 return Align(1ULL << (getSubclassData<AlignmentField>()));
360 }
361
362 void setAlignment(Align Align) {
363 setSubclassData<AlignmentField>(Log2(Align));
364 }
365
366 /// Returns the ordering constraint of this store instruction.
367 AtomicOrdering getOrdering() const {
368 return getSubclassData<OrderingField>();
369 }
370
371 /// Sets the ordering constraint of this store instruction. May not be
372 /// Acquire or AcquireRelease.
373 void setOrdering(AtomicOrdering Ordering) {
374 setSubclassData<OrderingField>(Ordering);
375 }
376
377 /// Returns the synchronization scope ID of this store instruction.
378 SyncScope::ID getSyncScopeID() const {
379 return SSID;
380 }
381
382 /// Sets the synchronization scope ID of this store instruction.
383 void setSyncScopeID(SyncScope::ID SSID) {
384 this->SSID = SSID;
385 }
386
387 /// Sets the ordering constraint and the synchronization scope ID of this
388 /// store instruction.
389 void setAtomic(AtomicOrdering Ordering,
390 SyncScope::ID SSID = SyncScope::System) {
391 setOrdering(Ordering);
392 setSyncScopeID(SSID);
393 }
394
395 bool isSimple() const { return !isAtomic() && !isVolatile(); }
396
397 bool isUnordered() const {
398 return (getOrdering() == AtomicOrdering::NotAtomic ||
399 getOrdering() == AtomicOrdering::Unordered) &&
400 !isVolatile();
401 }
402
403 Value *getValueOperand() { return getOperand(0); }
404 const Value *getValueOperand() const { return getOperand(0); }
405
406 Value *getPointerOperand() { return getOperand(1); }
407 const Value *getPointerOperand() const { return getOperand(1); }
408 static unsigned getPointerOperandIndex() { return 1U; }
409 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
410
411 /// Returns the address space of the pointer operand.
412 unsigned getPointerAddressSpace() const {
413 return getPointerOperandType()->getPointerAddressSpace();
414 }
415
416 // Methods for support type inquiry through isa, cast, and dyn_cast:
417 static bool classof(const Instruction *I) {
418 return I->getOpcode() == Instruction::Store;
419 }
420 static bool classof(const Value *V) {
421 return isa<Instruction>(V) && classof(cast<Instruction>(V));
422 }
423
424private:
425 // Shadow Instruction::setInstructionSubclassData with a private forwarding
426 // method so that subclasses cannot accidentally use it.
427 template <typename Bitfield>
428 void setSubclassData(typename Bitfield::Type Value) {
429 Instruction::setSubclassData<Bitfield>(Value);
430 }
431
432 /// The synchronization scope ID of this store instruction. Not quite enough
433 /// room in SubClassData for everything, so synchronization scope ID gets its
434 /// own field.
435 SyncScope::ID SSID;
436};
437
438template <>
439struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
440};
441
442DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<StoreInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 442, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this))[i_nocapture
].get()); } void StoreInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<StoreInst>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 442, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<StoreInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned StoreInst::getNumOperands() const
{ return OperandTraits<StoreInst>::operands(this); } template
<int Idx_nocapture> Use &StoreInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &StoreInst::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
443
444//===----------------------------------------------------------------------===//
445// FenceInst Class
446//===----------------------------------------------------------------------===//
447
448/// An instruction for ordering other memory operations.
449class FenceInst : public Instruction {
450 using OrderingField = AtomicOrderingBitfieldElementT<0>;
451
452 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
453
454protected:
455 // Note: Instruction needs to be a friend here to call cloneImpl.
456 friend class Instruction;
457
458 FenceInst *cloneImpl() const;
459
460public:
461 // Ordering may only be Acquire, Release, AcquireRelease, or
462 // SequentiallyConsistent.
463 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
464 SyncScope::ID SSID = SyncScope::System,
465 Instruction *InsertBefore = nullptr);
466 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
467 BasicBlock *InsertAtEnd);
468
469 // allocate space for exactly zero operands
470 void *operator new(size_t S) { return User::operator new(S, 0); }
471 void operator delete(void *Ptr) { User::operator delete(Ptr); }
472
473 /// Returns the ordering constraint of this fence instruction.
474 AtomicOrdering getOrdering() const {
475 return getSubclassData<OrderingField>();
476 }
477
478 /// Sets the ordering constraint of this fence instruction. May only be
479 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
480 void setOrdering(AtomicOrdering Ordering) {
481 setSubclassData<OrderingField>(Ordering);
482 }
483
484 /// Returns the synchronization scope ID of this fence instruction.
485 SyncScope::ID getSyncScopeID() const {
486 return SSID;
487 }
488
489 /// Sets the synchronization scope ID of this fence instruction.
490 void setSyncScopeID(SyncScope::ID SSID) {
491 this->SSID = SSID;
492 }
493
494 // Methods for support type inquiry through isa, cast, and dyn_cast:
495 static bool classof(const Instruction *I) {
496 return I->getOpcode() == Instruction::Fence;
497 }
498 static bool classof(const Value *V) {
499 return isa<Instruction>(V) && classof(cast<Instruction>(V));
500 }
501
502private:
503 // Shadow Instruction::setInstructionSubclassData with a private forwarding
504 // method so that subclasses cannot accidentally use it.
505 template <typename Bitfield>
506 void setSubclassData(typename Bitfield::Type Value) {
507 Instruction::setSubclassData<Bitfield>(Value);
508 }
509
510 /// The synchronization scope ID of this fence instruction. Not quite enough
511 /// room in SubClassData for everything, so synchronization scope ID gets its
512 /// own field.
513 SyncScope::ID SSID;
514};
515
516//===----------------------------------------------------------------------===//
517// AtomicCmpXchgInst Class
518//===----------------------------------------------------------------------===//
519
520/// An instruction that atomically checks whether a
521/// specified value is in a memory location, and, if it is, stores a new value
522/// there. The value returned by this instruction is a pair containing the
523/// original value as first element, and an i1 indicating success (true) or
524/// failure (false) as second element.
525///
526class AtomicCmpXchgInst : public Instruction {
527 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
528 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
529 SyncScope::ID SSID);
530
531 template <unsigned Offset>
532 using AtomicOrderingBitfieldElement =
533 typename Bitfield::Element<AtomicOrdering, Offset, 3,
534 AtomicOrdering::LAST>;
535
536protected:
537 // Note: Instruction needs to be a friend here to call cloneImpl.
538 friend class Instruction;
539
540 AtomicCmpXchgInst *cloneImpl() const;
541
542public:
543 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
544 AtomicOrdering SuccessOrdering,
545 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
546 Instruction *InsertBefore = nullptr);
547 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
548 AtomicOrdering SuccessOrdering,
549 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
550 BasicBlock *InsertAtEnd);
551
552 // allocate space for exactly three operands
553 void *operator new(size_t S) { return User::operator new(S, 3); }
554 void operator delete(void *Ptr) { User::operator delete(Ptr); }
555
556 using VolatileField = BoolBitfieldElementT<0>;
557 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>;
558 using SuccessOrderingField =
559 AtomicOrderingBitfieldElementT<WeakField::NextBit>;
560 using FailureOrderingField =
561 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>;
562 using AlignmentField =
563 AlignmentBitfieldElementT<FailureOrderingField::NextBit>;
564 static_assert(
565 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField,
566 FailureOrderingField, AlignmentField>(),
567 "Bitfields must be contiguous");
568
569 /// Return the alignment of the memory that is being allocated by the
570 /// instruction.
571 Align getAlign() const {
572 return Align(1ULL << getSubclassData<AlignmentField>());
573 }
574
575 void setAlignment(Align Align) {
576 setSubclassData<AlignmentField>(Log2(Align));
577 }
578
579 /// Return true if this is a cmpxchg from a volatile memory
580 /// location.
581 ///
582 bool isVolatile() const { return getSubclassData<VolatileField>(); }
583
584 /// Specify whether this is a volatile cmpxchg.
585 ///
586 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
587
588 /// Return true if this cmpxchg may spuriously fail.
589 bool isWeak() const { return getSubclassData<WeakField>(); }
590
591 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
592
593 /// Transparently provide more efficient getOperand methods.
594 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
595
596 static bool isValidSuccessOrdering(AtomicOrdering Ordering) {
597 return Ordering != AtomicOrdering::NotAtomic &&
598 Ordering != AtomicOrdering::Unordered;
599 }
600
601 static bool isValidFailureOrdering(AtomicOrdering Ordering) {
602 return Ordering != AtomicOrdering::NotAtomic &&
603 Ordering != AtomicOrdering::Unordered &&
604 Ordering != AtomicOrdering::AcquireRelease &&
605 Ordering != AtomicOrdering::Release;
606 }
607
608 /// Returns the success ordering constraint of this cmpxchg instruction.
609 AtomicOrdering getSuccessOrdering() const {
610 return getSubclassData<SuccessOrderingField>();
611 }
612
613 /// Sets the success ordering constraint of this cmpxchg instruction.
614 void setSuccessOrdering(AtomicOrdering Ordering) {
615 assert(isValidSuccessOrdering(Ordering) &&(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "llvm/include/llvm/IR/Instructions.h", 616, __extension__ __PRETTY_FUNCTION__
))
616 "invalid CmpXchg success ordering")(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "llvm/include/llvm/IR/Instructions.h", 616, __extension__ __PRETTY_FUNCTION__
))
;
617 setSubclassData<SuccessOrderingField>(Ordering);
618 }
619
620 /// Returns the failure ordering constraint of this cmpxchg instruction.
621 AtomicOrdering getFailureOrdering() const {
622 return getSubclassData<FailureOrderingField>();
623 }
624
625 /// Sets the failure ordering constraint of this cmpxchg instruction.
626 void setFailureOrdering(AtomicOrdering Ordering) {
627 assert(isValidFailureOrdering(Ordering) &&(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "llvm/include/llvm/IR/Instructions.h", 628, __extension__ __PRETTY_FUNCTION__
))
628 "invalid CmpXchg failure ordering")(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "llvm/include/llvm/IR/Instructions.h", 628, __extension__ __PRETTY_FUNCTION__
))
;
629 setSubclassData<FailureOrderingField>(Ordering);
630 }
631
632 /// Returns a single ordering which is at least as strong as both the
633 /// success and failure orderings for this cmpxchg.
634 AtomicOrdering getMergedOrdering() const {
635 if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent)
636 return AtomicOrdering::SequentiallyConsistent;
637 if (getFailureOrdering() == AtomicOrdering::Acquire) {
638 if (getSuccessOrdering() == AtomicOrdering::Monotonic)
639 return AtomicOrdering::Acquire;
640 if (getSuccessOrdering() == AtomicOrdering::Release)
641 return AtomicOrdering::AcquireRelease;
642 }
643 return getSuccessOrdering();
644 }
645
646 /// Returns the synchronization scope ID of this cmpxchg instruction.
647 SyncScope::ID getSyncScopeID() const {
648 return SSID;
649 }
650
651 /// Sets the synchronization scope ID of this cmpxchg instruction.
652 void setSyncScopeID(SyncScope::ID SSID) {
653 this->SSID = SSID;
654 }
655
656 Value *getPointerOperand() { return getOperand(0); }
657 const Value *getPointerOperand() const { return getOperand(0); }
658 static unsigned getPointerOperandIndex() { return 0U; }
659
660 Value *getCompareOperand() { return getOperand(1); }
661 const Value *getCompareOperand() const { return getOperand(1); }
662
663 Value *getNewValOperand() { return getOperand(2); }
664 const Value *getNewValOperand() const { return getOperand(2); }
665
666 /// Returns the address space of the pointer operand.
667 unsigned getPointerAddressSpace() const {
668 return getPointerOperand()->getType()->getPointerAddressSpace();
669 }
670
671 /// Returns the strongest permitted ordering on failure, given the
672 /// desired ordering on success.
673 ///
674 /// If the comparison in a cmpxchg operation fails, there is no atomic store
675 /// so release semantics cannot be provided. So this function drops explicit
676 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
677 /// operation would remain SequentiallyConsistent.
678 static AtomicOrdering
679 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
680 switch (SuccessOrdering) {
681 default:
682 llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering"
, "llvm/include/llvm/IR/Instructions.h", 682)
;
683 case AtomicOrdering::Release:
684 case AtomicOrdering::Monotonic:
685 return AtomicOrdering::Monotonic;
686 case AtomicOrdering::AcquireRelease:
687 case AtomicOrdering::Acquire:
688 return AtomicOrdering::Acquire;
689 case AtomicOrdering::SequentiallyConsistent:
690 return AtomicOrdering::SequentiallyConsistent;
691 }
692 }
693
694 // Methods for support type inquiry through isa, cast, and dyn_cast:
695 static bool classof(const Instruction *I) {
696 return I->getOpcode() == Instruction::AtomicCmpXchg;
697 }
698 static bool classof(const Value *V) {
699 return isa<Instruction>(V) && classof(cast<Instruction>(V));
700 }
701
702private:
703 // Shadow Instruction::setInstructionSubclassData with a private forwarding
704 // method so that subclasses cannot accidentally use it.
705 template <typename Bitfield>
706 void setSubclassData(typename Bitfield::Type Value) {
707 Instruction::setSubclassData<Bitfield>(Value);
708 }
709
710 /// The synchronization scope ID of this cmpxchg instruction. Not quite
711 /// enough room in SubClassData for everything, so synchronization scope ID
712 /// gets its own field.
713 SyncScope::ID SSID;
714};
715
716template <>
717struct OperandTraits<AtomicCmpXchgInst> :
718 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
719};
720
721DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 721, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<AtomicCmpXchgInst
>::op_begin(const_cast<AtomicCmpXchgInst*>(this))[i_nocapture
].get()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 721, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<AtomicCmpXchgInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned AtomicCmpXchgInst::getNumOperands
() const { return OperandTraits<AtomicCmpXchgInst>::operands
(this); } template <int Idx_nocapture> Use &AtomicCmpXchgInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &AtomicCmpXchgInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
722
723//===----------------------------------------------------------------------===//
724// AtomicRMWInst Class
725//===----------------------------------------------------------------------===//
726
727/// an instruction that atomically reads a memory location,
728/// combines it with another value, and then stores the result back. Returns
729/// the old value.
730///
731class AtomicRMWInst : public Instruction {
732protected:
733 // Note: Instruction needs to be a friend here to call cloneImpl.
734 friend class Instruction;
735
736 AtomicRMWInst *cloneImpl() const;
737
738public:
739 /// This enumeration lists the possible modifications atomicrmw can make. In
740 /// the descriptions, 'p' is the pointer to the instruction's memory location,
741 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
742 /// instruction. These instructions always return 'old'.
743 enum BinOp : unsigned {
744 /// *p = v
745 Xchg,
746 /// *p = old + v
747 Add,
748 /// *p = old - v
749 Sub,
750 /// *p = old & v
751 And,
752 /// *p = ~(old & v)
753 Nand,
754 /// *p = old | v
755 Or,
756 /// *p = old ^ v
757 Xor,
758 /// *p = old >signed v ? old : v
759 Max,
760 /// *p = old <signed v ? old : v
761 Min,
762 /// *p = old >unsigned v ? old : v
763 UMax,
764 /// *p = old <unsigned v ? old : v
765 UMin,
766
767 /// *p = old + v
768 FAdd,
769
770 /// *p = old - v
771 FSub,
772
773 FIRST_BINOP = Xchg,
774 LAST_BINOP = FSub,
775 BAD_BINOP
776 };
777
778private:
779 template <unsigned Offset>
780 using AtomicOrderingBitfieldElement =
781 typename Bitfield::Element<AtomicOrdering, Offset, 3,
782 AtomicOrdering::LAST>;
783
784 template <unsigned Offset>
785 using BinOpBitfieldElement =
786 typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>;
787
788public:
789 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
790 AtomicOrdering Ordering, SyncScope::ID SSID,
791 Instruction *InsertBefore = nullptr);
792 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
793 AtomicOrdering Ordering, SyncScope::ID SSID,
794 BasicBlock *InsertAtEnd);
795
796 // allocate space for exactly two operands
797 void *operator new(size_t S) { return User::operator new(S, 2); }
798 void operator delete(void *Ptr) { User::operator delete(Ptr); }
799
800 using VolatileField = BoolBitfieldElementT<0>;
801 using AtomicOrderingField =
802 AtomicOrderingBitfieldElementT<VolatileField::NextBit>;
803 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
804 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>;
805 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField,
806 OperationField, AlignmentField>(),
807 "Bitfields must be contiguous");
808
809 BinOp getOperation() const { return getSubclassData<OperationField>(); }
810
811 static StringRef getOperationName(BinOp Op);
812
813 static bool isFPOperation(BinOp Op) {
814 switch (Op) {
815 case AtomicRMWInst::FAdd:
816 case AtomicRMWInst::FSub:
817 return true;
818 default:
819 return false;
820 }
821 }
822
823 void setOperation(BinOp Operation) {
824 setSubclassData<OperationField>(Operation);
825 }
826
827 /// Return the alignment of the memory that is being allocated by the
828 /// instruction.
829 Align getAlign() const {
830 return Align(1ULL << getSubclassData<AlignmentField>());
831 }
832
833 void setAlignment(Align Align) {
834 setSubclassData<AlignmentField>(Log2(Align));
835 }
836
837 /// Return true if this is a RMW on a volatile memory location.
838 ///
839 bool isVolatile() const { return getSubclassData<VolatileField>(); }
840
841 /// Specify whether this is a volatile RMW or not.
842 ///
843 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
844
845 /// Transparently provide more efficient getOperand methods.
846 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
847
848 /// Returns the ordering constraint of this rmw instruction.
849 AtomicOrdering getOrdering() const {
850 return getSubclassData<AtomicOrderingField>();
851 }
852
853 /// Sets the ordering constraint of this rmw instruction.
854 void setOrdering(AtomicOrdering Ordering) {
855 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "llvm/include/llvm/IR/Instructions.h", 856, __extension__ __PRETTY_FUNCTION__
))
856 "atomicrmw instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "llvm/include/llvm/IR/Instructions.h", 856, __extension__ __PRETTY_FUNCTION__
))
;
857 setSubclassData<AtomicOrderingField>(Ordering);
858 }
859
860 /// Returns the synchronization scope ID of this rmw instruction.
861 SyncScope::ID getSyncScopeID() const {
862 return SSID;
863 }
864
865 /// Sets the synchronization scope ID of this rmw instruction.
866 void setSyncScopeID(SyncScope::ID SSID) {
867 this->SSID = SSID;
868 }
869
870 Value *getPointerOperand() { return getOperand(0); }
871 const Value *getPointerOperand() const { return getOperand(0); }
872 static unsigned getPointerOperandIndex() { return 0U; }
873
874 Value *getValOperand() { return getOperand(1); }
875 const Value *getValOperand() const { return getOperand(1); }
876
877 /// Returns the address space of the pointer operand.
878 unsigned getPointerAddressSpace() const {
879 return getPointerOperand()->getType()->getPointerAddressSpace();
880 }
881
882 bool isFloatingPointOperation() const {
883 return isFPOperation(getOperation());
884 }
885
886 // Methods for support type inquiry through isa, cast, and dyn_cast:
887 static bool classof(const Instruction *I) {
888 return I->getOpcode() == Instruction::AtomicRMW;
889 }
890 static bool classof(const Value *V) {
891 return isa<Instruction>(V) && classof(cast<Instruction>(V));
892 }
893
894private:
895 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
896 AtomicOrdering Ordering, SyncScope::ID SSID);
897
898 // Shadow Instruction::setInstructionSubclassData with a private forwarding
899 // method so that subclasses cannot accidentally use it.
900 template <typename Bitfield>
901 void setSubclassData(typename Bitfield::Type Value) {
902 Instruction::setSubclassData<Bitfield>(Value);
903 }
904
905 /// The synchronization scope ID of this rmw instruction. Not quite enough
906 /// room in SubClassData for everything, so synchronization scope ID gets its
907 /// own field.
908 SyncScope::ID SSID;
909};
910
911template <>
912struct OperandTraits<AtomicRMWInst>
913 : public FixedNumOperandTraits<AtomicRMWInst,2> {
914};
915
916DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 916, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<AtomicRMWInst
>::op_begin(const_cast<AtomicRMWInst*>(this))[i_nocapture
].get()); } void AtomicRMWInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 916, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<AtomicRMWInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned AtomicRMWInst::getNumOperands()
const { return OperandTraits<AtomicRMWInst>::operands(
this); } template <int Idx_nocapture> Use &AtomicRMWInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &AtomicRMWInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
917
918//===----------------------------------------------------------------------===//
919// GetElementPtrInst Class
920//===----------------------------------------------------------------------===//
921
922// checkGEPType - Simple wrapper function to give a better assertion failure
923// message on bad indexes for a gep instruction.
924//
925inline Type *checkGEPType(Type *Ty) {
926 assert(Ty && "Invalid GetElementPtrInst indices for type!")(static_cast <bool> (Ty && "Invalid GetElementPtrInst indices for type!"
) ? void (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\""
, "llvm/include/llvm/IR/Instructions.h", 926, __extension__ __PRETTY_FUNCTION__
))
;
927 return Ty;
928}
929
930/// an instruction for type-safe pointer arithmetic to
931/// access elements of arrays and structs
932///
933class GetElementPtrInst : public Instruction {
934 Type *SourceElementType;
935 Type *ResultElementType;
936
937 GetElementPtrInst(const GetElementPtrInst &GEPI);
938
939 /// Constructors - Create a getelementptr instruction with a base pointer an
940 /// list of indices. The first ctor can optionally insert before an existing
941 /// instruction, the second appends the new instruction to the specified
942 /// BasicBlock.
943 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
944 ArrayRef<Value *> IdxList, unsigned Values,
945 const Twine &NameStr, Instruction *InsertBefore);
946 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
947 ArrayRef<Value *> IdxList, unsigned Values,
948 const Twine &NameStr, BasicBlock *InsertAtEnd);
949
950 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
951
952protected:
953 // Note: Instruction needs to be a friend here to call cloneImpl.
954 friend class Instruction;
955
956 GetElementPtrInst *cloneImpl() const;
957
958public:
959 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
960 ArrayRef<Value *> IdxList,
961 const Twine &NameStr = "",
962 Instruction *InsertBefore = nullptr) {
963 unsigned Values = 1 + unsigned(IdxList.size());
964 assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type"
) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\""
, "llvm/include/llvm/IR/Instructions.h", 964, __extension__ __PRETTY_FUNCTION__
))
;
965 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 966, __extension__ __PRETTY_FUNCTION__
))
966 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 966, __extension__ __PRETTY_FUNCTION__
))
;
967 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
968 NameStr, InsertBefore);
969 }
970
971 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
972 ArrayRef<Value *> IdxList,
973 const Twine &NameStr,
974 BasicBlock *InsertAtEnd) {
975 unsigned Values = 1 + unsigned(IdxList.size());
976 assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type"
) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\""
, "llvm/include/llvm/IR/Instructions.h", 976, __extension__ __PRETTY_FUNCTION__
))
;
977 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 978, __extension__ __PRETTY_FUNCTION__
))
978 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 978, __extension__ __PRETTY_FUNCTION__
))
;
979 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
980 NameStr, InsertAtEnd);
981 }
982
983 /// Create an "inbounds" getelementptr. See the documentation for the
984 /// "inbounds" flag in LangRef.html for details.
985 static GetElementPtrInst *
986 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
987 const Twine &NameStr = "",
988 Instruction *InsertBefore = nullptr) {
989 GetElementPtrInst *GEP =
990 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
991 GEP->setIsInBounds(true);
992 return GEP;
993 }
994
995 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
996 ArrayRef<Value *> IdxList,
997 const Twine &NameStr,
998 BasicBlock *InsertAtEnd) {
999 GetElementPtrInst *GEP =
1000 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
1001 GEP->setIsInBounds(true);
1002 return GEP;
1003 }
1004
1005 /// Transparently provide more efficient getOperand methods.
1006 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1007
1008 Type *getSourceElementType() const { return SourceElementType; }
1009
1010 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1011 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1012
1013 Type *getResultElementType() const {
1014 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1015, __extension__ __PRETTY_FUNCTION__
))
1015 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1015, __extension__ __PRETTY_FUNCTION__
))
;
1016 return ResultElementType;
1017 }
1018
1019 /// Returns the address space of this instruction's pointer type.
1020 unsigned getAddressSpace() const {
1021 // Note that this is always the same as the pointer operand's address space
1022 // and that is cheaper to compute, so cheat here.
1023 return getPointerAddressSpace();
1024 }
1025
1026 /// Returns the result type of a getelementptr with the given source
1027 /// element type and indexes.
1028 ///
1029 /// Null is returned if the indices are invalid for the specified
1030 /// source element type.
1031 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1032 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1033 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1034
1035 /// Return the type of the element at the given index of an indexable
1036 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1037 ///
1038 /// Returns null if the type can't be indexed, or the given index is not
1039 /// legal for the given type.
1040 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1041 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1042
1043 inline op_iterator idx_begin() { return op_begin()+1; }
1044 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1045 inline op_iterator idx_end() { return op_end(); }
1046 inline const_op_iterator idx_end() const { return op_end(); }
1047
1048 inline iterator_range<op_iterator> indices() {
1049 return make_range(idx_begin(), idx_end());
1050 }
1051
1052 inline iterator_range<const_op_iterator> indices() const {
1053 return make_range(idx_begin(), idx_end());
1054 }
1055
1056 Value *getPointerOperand() {
1057 return getOperand(0);
1058 }
1059 const Value *getPointerOperand() const {
1060 return getOperand(0);
1061 }
1062 static unsigned getPointerOperandIndex() {
1063 return 0U; // get index for modifying correct operand.
1064 }
1065
1066 /// Method to return the pointer operand as a
1067 /// PointerType.
1068 Type *getPointerOperandType() const {
1069 return getPointerOperand()->getType();
1070 }
1071
1072 /// Returns the address space of the pointer operand.
1073 unsigned getPointerAddressSpace() const {
1074 return getPointerOperandType()->getPointerAddressSpace();
1075 }
1076
1077 /// Returns the pointer type returned by the GEP
1078 /// instruction, which may be a vector of pointers.
1079 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1080 ArrayRef<Value *> IdxList) {
1081 PointerType *OrigPtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1082 unsigned AddrSpace = OrigPtrTy->getAddressSpace();
1083 Type *ResultElemTy = checkGEPType(getIndexedType(ElTy, IdxList));
1084 Type *PtrTy = OrigPtrTy->isOpaque()
1085 ? PointerType::get(OrigPtrTy->getContext(), AddrSpace)
1086 : PointerType::get(ResultElemTy, AddrSpace);
1087 // Vector GEP
1088 if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
1089 ElementCount EltCount = PtrVTy->getElementCount();
1090 return VectorType::get(PtrTy, EltCount);
1091 }
1092 for (Value *Index : IdxList)
1093 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1094 ElementCount EltCount = IndexVTy->getElementCount();
1095 return VectorType::get(PtrTy, EltCount);
1096 }
1097 // Scalar GEP
1098 return PtrTy;
1099 }
1100
1101 unsigned getNumIndices() const { // Note: always non-negative
1102 return getNumOperands() - 1;
1103 }
1104
1105 bool hasIndices() const {
1106 return getNumOperands() > 1;
1107 }
1108
1109 /// Return true if all of the indices of this GEP are
1110 /// zeros. If so, the result pointer and the first operand have the same
1111 /// value, just potentially different types.
1112 bool hasAllZeroIndices() const;
1113
1114 /// Return true if all of the indices of this GEP are
1115 /// constant integers. If so, the result pointer and the first operand have
1116 /// a constant offset between them.
1117 bool hasAllConstantIndices() const;
1118
1119 /// Set or clear the inbounds flag on this GEP instruction.
1120 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1121 void setIsInBounds(bool b = true);
1122
1123 /// Determine whether the GEP has the inbounds flag.
1124 bool isInBounds() const;
1125
1126 /// Accumulate the constant address offset of this GEP if possible.
1127 ///
1128 /// This routine accepts an APInt into which it will accumulate the constant
1129 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1130 /// all-constant, it returns false and the value of the offset APInt is
1131 /// undefined (it is *not* preserved!). The APInt passed into this routine
1132 /// must be at least as wide as the IntPtr type for the address space of
1133 /// the base GEP pointer.
1134 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1135 bool collectOffset(const DataLayout &DL, unsigned BitWidth,
1136 MapVector<Value *, APInt> &VariableOffsets,
1137 APInt &ConstantOffset) const;
1138 // Methods for support type inquiry through isa, cast, and dyn_cast:
1139 static bool classof(const Instruction *I) {
1140 return (I->getOpcode() == Instruction::GetElementPtr);
1141 }
1142 static bool classof(const Value *V) {
1143 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1144 }
1145};
1146
1147template <>
1148struct OperandTraits<GetElementPtrInst> :
1149 public VariadicOperandTraits<GetElementPtrInst, 1> {
1150};
1151
1152GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1153 ArrayRef<Value *> IdxList, unsigned Values,
1154 const Twine &NameStr,
1155 Instruction *InsertBefore)
1156 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1157 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1158 Values, InsertBefore),
1159 SourceElementType(PointeeType),
1160 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1161 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1162, __extension__ __PRETTY_FUNCTION__
))
1162 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1162, __extension__ __PRETTY_FUNCTION__
))
;
1163 init(Ptr, IdxList, NameStr);
1164}
1165
1166GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1167 ArrayRef<Value *> IdxList, unsigned Values,
1168 const Twine &NameStr,
1169 BasicBlock *InsertAtEnd)
1170 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1171 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1172 Values, InsertAtEnd),
1173 SourceElementType(PointeeType),
1174 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1175 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1176, __extension__ __PRETTY_FUNCTION__
))
1176 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1176, __extension__ __PRETTY_FUNCTION__
))
;
1177 init(Ptr, IdxList, NameStr);
1178}
1179
1180DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<GetElementPtrInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1180, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<GetElementPtrInst
>::op_begin(const_cast<GetElementPtrInst*>(this))[i_nocapture
].get()); } void GetElementPtrInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<GetElementPtrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1180, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<GetElementPtrInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned GetElementPtrInst::getNumOperands
() const { return OperandTraits<GetElementPtrInst>::operands
(this); } template <int Idx_nocapture> Use &GetElementPtrInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &GetElementPtrInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1181
1182//===----------------------------------------------------------------------===//
1183// ICmpInst Class
1184//===----------------------------------------------------------------------===//
1185
1186/// This instruction compares its operands according to the predicate given
1187/// to the constructor. It only operates on integers or pointers. The operands
1188/// must be identical types.
1189/// Represent an integer comparison operator.
1190class ICmpInst: public CmpInst {
1191 void AssertOK() {
1192 assert(isIntPredicate() &&(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "llvm/include/llvm/IR/Instructions.h", 1193, __extension__ __PRETTY_FUNCTION__
))
1193 "Invalid ICmp predicate value")(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "llvm/include/llvm/IR/Instructions.h", 1193, __extension__ __PRETTY_FUNCTION__
))
;
1194 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1195, __extension__ __PRETTY_FUNCTION__
))
1195 "Both operands to ICmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1195, __extension__ __PRETTY_FUNCTION__
))
;
1196 // Check that the operands are the right type
1197 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1199, __extension__ __PRETTY_FUNCTION__
))
1198 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1199, __extension__ __PRETTY_FUNCTION__
))
1199 "Invalid operand types for ICmp instruction")(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1199, __extension__ __PRETTY_FUNCTION__
))
;
1200 }
1201
1202protected:
1203 // Note: Instruction needs to be a friend here to call cloneImpl.
1204 friend class Instruction;
1205
1206 /// Clone an identical ICmpInst
1207 ICmpInst *cloneImpl() const;
1208
1209public:
1210 /// Constructor with insert-before-instruction semantics.
1211 ICmpInst(
1212 Instruction *InsertBefore, ///< Where to insert
1213 Predicate pred, ///< The predicate to use for the comparison
1214 Value *LHS, ///< The left-hand-side of the expression
1215 Value *RHS, ///< The right-hand-side of the expression
1216 const Twine &NameStr = "" ///< Name of the instruction
1217 ) : CmpInst(makeCmpResultType(LHS->getType()),
1218 Instruction::ICmp, pred, LHS, RHS, NameStr,
1219 InsertBefore) {
1220#ifndef NDEBUG
1221 AssertOK();
1222#endif
1223 }
1224
1225 /// Constructor with insert-at-end semantics.
1226 ICmpInst(
1227 BasicBlock &InsertAtEnd, ///< Block to insert into.
1228 Predicate pred, ///< The predicate to use for the comparison
1229 Value *LHS, ///< The left-hand-side of the expression
1230 Value *RHS, ///< The right-hand-side of the expression
1231 const Twine &NameStr = "" ///< Name of the instruction
1232 ) : CmpInst(makeCmpResultType(LHS->getType()),
1233 Instruction::ICmp, pred, LHS, RHS, NameStr,
1234 &InsertAtEnd) {
1235#ifndef NDEBUG
1236 AssertOK();
1237#endif
1238 }
1239
1240 /// Constructor with no-insertion semantics
1241 ICmpInst(
1242 Predicate pred, ///< The predicate to use for the comparison
1243 Value *LHS, ///< The left-hand-side of the expression
1244 Value *RHS, ///< The right-hand-side of the expression
1245 const Twine &NameStr = "" ///< Name of the instruction
1246 ) : CmpInst(makeCmpResultType(LHS->getType()),
1247 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1248#ifndef NDEBUG
1249 AssertOK();
1250#endif
1251 }
1252
1253 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1254 /// @returns the predicate that would be the result if the operand were
1255 /// regarded as signed.
1256 /// Return the signed version of the predicate
1257 Predicate getSignedPredicate() const {
1258 return getSignedPredicate(getPredicate());
1259 }
1260
1261 /// This is a static version that you can use without an instruction.
1262 /// Return the signed version of the predicate.
1263 static Predicate getSignedPredicate(Predicate pred);
1264
1265 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1266 /// @returns the predicate that would be the result if the operand were
1267 /// regarded as unsigned.
1268 /// Return the unsigned version of the predicate
1269 Predicate getUnsignedPredicate() const {
1270 return getUnsignedPredicate(getPredicate());
1271 }
1272
1273 /// This is a static version that you can use without an instruction.
1274 /// Return the unsigned version of the predicate.
1275 static Predicate getUnsignedPredicate(Predicate pred);
1276
1277 /// Return true if this predicate is either EQ or NE. This also
1278 /// tests for commutativity.
1279 static bool isEquality(Predicate P) {
1280 return P == ICMP_EQ || P == ICMP_NE;
1281 }
1282
1283 /// Return true if this predicate is either EQ or NE. This also
1284 /// tests for commutativity.
1285 bool isEquality() const {
1286 return isEquality(getPredicate());
1287 }
1288
1289 /// @returns true if the predicate of this ICmpInst is commutative
1290 /// Determine if this relation is commutative.
1291 bool isCommutative() const { return isEquality(); }
1292
1293 /// Return true if the predicate is relational (not EQ or NE).
1294 ///
1295 bool isRelational() const {
1296 return !isEquality();
1297 }
1298
1299 /// Return true if the predicate is relational (not EQ or NE).
1300 ///
1301 static bool isRelational(Predicate P) {
1302 return !isEquality(P);
1303 }
1304
1305 /// Return true if the predicate is SGT or UGT.
1306 ///
1307 static bool isGT(Predicate P) {
1308 return P == ICMP_SGT || P == ICMP_UGT;
1309 }
1310
1311 /// Return true if the predicate is SLT or ULT.
1312 ///
1313 static bool isLT(Predicate P) {
1314 return P == ICMP_SLT || P == ICMP_ULT;
1315 }
1316
1317 /// Return true if the predicate is SGE or UGE.
1318 ///
1319 static bool isGE(Predicate P) {
1320 return P == ICMP_SGE || P == ICMP_UGE;
1321 }
1322
1323 /// Return true if the predicate is SLE or ULE.
1324 ///
1325 static bool isLE(Predicate P) {
1326 return P == ICMP_SLE || P == ICMP_ULE;
1327 }
1328
1329 /// Returns the sequence of all ICmp predicates.
1330 ///
1331 static auto predicates() { return ICmpPredicates(); }
1332
1333 /// Exchange the two operands to this instruction in such a way that it does
1334 /// not modify the semantics of the instruction. The predicate value may be
1335 /// changed to retain the same result if the predicate is order dependent
1336 /// (e.g. ult).
1337 /// Swap operands and adjust predicate.
1338 void swapOperands() {
1339 setPredicate(getSwappedPredicate());
1340 Op<0>().swap(Op<1>());
1341 }
1342
1343 /// Return result of `LHS Pred RHS` comparison.
1344 static bool compare(const APInt &LHS, const APInt &RHS,
1345 ICmpInst::Predicate Pred);
1346
1347 // Methods for support type inquiry through isa, cast, and dyn_cast:
1348 static bool classof(const Instruction *I) {
1349 return I->getOpcode() == Instruction::ICmp;
1350 }
1351 static bool classof(const Value *V) {
1352 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1353 }
1354};
1355
1356//===----------------------------------------------------------------------===//
1357// FCmpInst Class
1358//===----------------------------------------------------------------------===//
1359
1360/// This instruction compares its operands according to the predicate given
1361/// to the constructor. It only operates on floating point values or packed
1362/// vectors of floating point values. The operands must be identical types.
1363/// Represents a floating point comparison operator.
1364class FCmpInst: public CmpInst {
1365 void AssertOK() {
1366 assert(isFPPredicate() && "Invalid FCmp predicate value")(static_cast <bool> (isFPPredicate() && "Invalid FCmp predicate value"
) ? void (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\""
, "llvm/include/llvm/IR/Instructions.h", 1366, __extension__ __PRETTY_FUNCTION__
))
;
1367 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1368, __extension__ __PRETTY_FUNCTION__
))
1368 "Both operands to FCmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1368, __extension__ __PRETTY_FUNCTION__
))
;
1369 // Check that the operands are the right type
1370 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1371, __extension__ __PRETTY_FUNCTION__
))
1371 "Invalid operand types for FCmp instruction")(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1371, __extension__ __PRETTY_FUNCTION__
))
;
1372 }
1373
1374protected:
1375 // Note: Instruction needs to be a friend here to call cloneImpl.
1376 friend class Instruction;
1377
1378 /// Clone an identical FCmpInst
1379 FCmpInst *cloneImpl() const;
1380
1381public:
1382 /// Constructor with insert-before-instruction semantics.
1383 FCmpInst(
1384 Instruction *InsertBefore, ///< Where to insert
1385 Predicate pred, ///< The predicate to use for the comparison
1386 Value *LHS, ///< The left-hand-side of the expression
1387 Value *RHS, ///< The right-hand-side of the expression
1388 const Twine &NameStr = "" ///< Name of the instruction
1389 ) : CmpInst(makeCmpResultType(LHS->getType()),
1390 Instruction::FCmp, pred, LHS, RHS, NameStr,
1391 InsertBefore) {
1392 AssertOK();
1393 }
1394
1395 /// Constructor with insert-at-end semantics.
1396 FCmpInst(
1397 BasicBlock &InsertAtEnd, ///< Block to insert into.
1398 Predicate pred, ///< The predicate to use for the comparison
1399 Value *LHS, ///< The left-hand-side of the expression
1400 Value *RHS, ///< The right-hand-side of the expression
1401 const Twine &NameStr = "" ///< Name of the instruction
1402 ) : CmpInst(makeCmpResultType(LHS->getType()),
1403 Instruction::FCmp, pred, LHS, RHS, NameStr,
1404 &InsertAtEnd) {
1405 AssertOK();
1406 }
1407
1408 /// Constructor with no-insertion semantics
1409 FCmpInst(
1410 Predicate Pred, ///< The predicate to use for the comparison
1411 Value *LHS, ///< The left-hand-side of the expression
1412 Value *RHS, ///< The right-hand-side of the expression
1413 const Twine &NameStr = "", ///< Name of the instruction
1414 Instruction *FlagsSource = nullptr
1415 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1416 RHS, NameStr, nullptr, FlagsSource) {
1417 AssertOK();
1418 }
1419
1420 /// @returns true if the predicate of this instruction is EQ or NE.
1421 /// Determine if this is an equality predicate.
1422 static bool isEquality(Predicate Pred) {
1423 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1424 Pred == FCMP_UNE;
1425 }
1426
1427 /// @returns true if the predicate of this instruction is EQ or NE.
1428 /// Determine if this is an equality predicate.
1429 bool isEquality() const { return isEquality(getPredicate()); }
1430
1431 /// @returns true if the predicate of this instruction is commutative.
1432 /// Determine if this is a commutative predicate.
1433 bool isCommutative() const {
1434 return isEquality() ||
1435 getPredicate() == FCMP_FALSE ||
1436 getPredicate() == FCMP_TRUE ||
1437 getPredicate() == FCMP_ORD ||
1438 getPredicate() == FCMP_UNO;
1439 }
1440
1441 /// @returns true if the predicate is relational (not EQ or NE).
1442 /// Determine if this a relational predicate.
1443 bool isRelational() const { return !isEquality(); }
1444
1445 /// Exchange the two operands to this instruction in such a way that it does
1446 /// not modify the semantics of the instruction. The predicate value may be
1447 /// changed to retain the same result if the predicate is order dependent
1448 /// (e.g. ult).
1449 /// Swap operands and adjust predicate.
1450 void swapOperands() {
1451 setPredicate(getSwappedPredicate());
1452 Op<0>().swap(Op<1>());
1453 }
1454
1455 /// Returns the sequence of all FCmp predicates.
1456 ///
1457 static auto predicates() { return FCmpPredicates(); }
1458
1459 /// Return result of `LHS Pred RHS` comparison.
1460 static bool compare(const APFloat &LHS, const APFloat &RHS,
1461 FCmpInst::Predicate Pred);
1462
1463 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1464 static bool classof(const Instruction *I) {
1465 return I->getOpcode() == Instruction::FCmp;
1466 }
1467 static bool classof(const Value *V) {
1468 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1469 }
1470};
1471
1472//===----------------------------------------------------------------------===//
1473/// This class represents a function call, abstracting a target
1474/// machine's calling convention. This class uses low bit of the SubClassData
1475/// field to indicate whether or not this is a tail call. The rest of the bits
1476/// hold the calling convention of the call.
1477///
1478class CallInst : public CallBase {
1479 CallInst(const CallInst &CI);
1480
1481 /// Construct a CallInst given a range of arguments.
1482 /// Construct a CallInst from a range of arguments
1483 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1484 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1485 Instruction *InsertBefore);
1486
1487 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1488 const Twine &NameStr, Instruction *InsertBefore)
1489 : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {}
1490
1491 /// Construct a CallInst given a range of arguments.
1492 /// Construct a CallInst from a range of arguments
1493 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1494 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1495 BasicBlock *InsertAtEnd);
1496
1497 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1498 Instruction *InsertBefore);
1499
1500 CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1501 BasicBlock *InsertAtEnd);
1502
1503 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1504 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1505 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1506
1507 /// Compute the number of operands to allocate.
1508 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1509 // We need one operand for the called function, plus the input operand
1510 // counts provided.
1511 return 1 + NumArgs + NumBundleInputs;
1512 }
1513
1514protected:
1515 // Note: Instruction needs to be a friend here to call cloneImpl.
1516 friend class Instruction;
1517
1518 CallInst *cloneImpl() const;
1519
1520public:
1521 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1522 Instruction *InsertBefore = nullptr) {
1523 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1524 }
1525
1526 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1527 const Twine &NameStr,
1528 Instruction *InsertBefore = nullptr) {
1529 return new (ComputeNumOperands(Args.size()))
1530 CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1531 }
1532
1533 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1534 ArrayRef<OperandBundleDef> Bundles = None,
1535 const Twine &NameStr = "",
1536 Instruction *InsertBefore = nullptr) {
1537 const int NumOperands =
1538 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1539 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1540
1541 return new (NumOperands, DescriptorBytes)
1542 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1543 }
1544
1545 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1546 BasicBlock *InsertAtEnd) {
1547 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1548 }
1549
1550 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1551 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1552 return new (ComputeNumOperands(Args.size()))
1553 CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd);
1554 }
1555
1556 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1557 ArrayRef<OperandBundleDef> Bundles,
1558 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1559 const int NumOperands =
1560 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1561 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1562
1563 return new (NumOperands, DescriptorBytes)
1564 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1565 }
1566
1567 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1568 Instruction *InsertBefore = nullptr) {
1569 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1570 InsertBefore);
1571 }
1572
1573 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1574 ArrayRef<OperandBundleDef> Bundles = None,
1575 const Twine &NameStr = "",
1576 Instruction *InsertBefore = nullptr) {
1577 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1578 NameStr, InsertBefore);
1579 }
1580
1581 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1582 const Twine &NameStr,
1583 Instruction *InsertBefore = nullptr) {
1584 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1585 InsertBefore);
1586 }
1587
1588 static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1589 BasicBlock *InsertAtEnd) {
1590 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1591 InsertAtEnd);
1592 }
1593
1594 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1595 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1596 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1597 InsertAtEnd);
1598 }
1599
1600 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1601 ArrayRef<OperandBundleDef> Bundles,
1602 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1603 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1604 NameStr, InsertAtEnd);
1605 }
1606
1607 /// Create a clone of \p CI with a different set of operand bundles and
1608 /// insert it before \p InsertPt.
1609 ///
1610 /// The returned call instruction is identical \p CI in every way except that
1611 /// the operand bundles for the new instruction are set to the operand bundles
1612 /// in \p Bundles.
1613 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1614 Instruction *InsertPt = nullptr);
1615
1616 /// Generate the IR for a call to malloc:
1617 /// 1. Compute the malloc call's argument as the specified type's size,
1618 /// possibly multiplied by the array size if the array size is not
1619 /// constant 1.
1620 /// 2. Call malloc with that argument.
1621 /// 3. Bitcast the result of the malloc call to the specified type.
1622 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1623 Type *AllocTy, Value *AllocSize,
1624 Value *ArraySize = nullptr,
1625 Function *MallocF = nullptr,
1626 const Twine &Name = "");
1627 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1628 Type *AllocTy, Value *AllocSize,
1629 Value *ArraySize = nullptr,
1630 Function *MallocF = nullptr,
1631 const Twine &Name = "");
1632 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1633 Type *AllocTy, Value *AllocSize,
1634 Value *ArraySize = nullptr,
1635 ArrayRef<OperandBundleDef> Bundles = None,
1636 Function *MallocF = nullptr,
1637 const Twine &Name = "");
1638 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1639 Type *AllocTy, Value *AllocSize,
1640 Value *ArraySize = nullptr,
1641 ArrayRef<OperandBundleDef> Bundles = None,
1642 Function *MallocF = nullptr,
1643 const Twine &Name = "");
1644 /// Generate the IR for a call to the builtin free function.
1645 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
1646 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
1647 static Instruction *CreateFree(Value *Source,
1648 ArrayRef<OperandBundleDef> Bundles,
1649 Instruction *InsertBefore);
1650 static Instruction *CreateFree(Value *Source,
1651 ArrayRef<OperandBundleDef> Bundles,
1652 BasicBlock *InsertAtEnd);
1653
1654 // Note that 'musttail' implies 'tail'.
1655 enum TailCallKind : unsigned {
1656 TCK_None = 0,
1657 TCK_Tail = 1,
1658 TCK_MustTail = 2,
1659 TCK_NoTail = 3,
1660 TCK_LAST = TCK_NoTail
1661 };
1662
1663 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>;
1664 static_assert(
1665 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1666 "Bitfields must be contiguous");
1667
1668 TailCallKind getTailCallKind() const {
1669 return getSubclassData<TailCallKindField>();
1670 }
1671
1672 bool isTailCall() const {
1673 TailCallKind Kind = getTailCallKind();
1674 return Kind == TCK_Tail || Kind == TCK_MustTail;
1675 }
1676
1677 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1678
1679 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1680
1681 void setTailCallKind(TailCallKind TCK) {
1682 setSubclassData<TailCallKindField>(TCK);
1683 }
1684
1685 void setTailCall(bool IsTc = true) {
1686 setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1687 }
1688
1689 /// Return true if the call can return twice
1690 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1691 void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); }
1692
1693 // Methods for support type inquiry through isa, cast, and dyn_cast:
1694 static bool classof(const Instruction *I) {
1695 return I->getOpcode() == Instruction::Call;
1696 }
1697 static bool classof(const Value *V) {
1698 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1699 }
1700
1701 /// Updates profile metadata by scaling it by \p S / \p T.
1702 void updateProfWeight(uint64_t S, uint64_t T);
1703
1704private:
1705 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1706 // method so that subclasses cannot accidentally use it.
1707 template <typename Bitfield>
1708 void setSubclassData(typename Bitfield::Type Value) {
1709 Instruction::setSubclassData<Bitfield>(Value);
1710 }
1711};
1712
1713CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1714 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1715 BasicBlock *InsertAtEnd)
1716 : CallBase(Ty->getReturnType(), Instruction::Call,
1717 OperandTraits<CallBase>::op_end(this) -
1718 (Args.size() + CountBundleInputs(Bundles) + 1),
1719 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1720 InsertAtEnd) {
1721 init(Ty, Func, Args, Bundles, NameStr);
1722}
1723
1724CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1725 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1726 Instruction *InsertBefore)
1727 : CallBase(Ty->getReturnType(), Instruction::Call,
1728 OperandTraits<CallBase>::op_end(this) -
1729 (Args.size() + CountBundleInputs(Bundles) + 1),
1730 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1731 InsertBefore) {
1732 init(Ty, Func, Args, Bundles, NameStr);
1733}
1734
1735//===----------------------------------------------------------------------===//
1736// SelectInst Class
1737//===----------------------------------------------------------------------===//
1738
1739/// This class represents the LLVM 'select' instruction.
1740///
1741class SelectInst : public Instruction {
1742 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1743 Instruction *InsertBefore)
1744 : Instruction(S1->getType(), Instruction::Select,
1745 &Op<0>(), 3, InsertBefore) {
1746 init(C, S1, S2);
1747 setName(NameStr);
1748 }
1749
1750 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1751 BasicBlock *InsertAtEnd)
1752 : Instruction(S1->getType(), Instruction::Select,
1753 &Op<0>(), 3, InsertAtEnd) {
1754 init(C, S1, S2);
1755 setName(NameStr);
1756 }
1757
1758 void init(Value *C, Value *S1, Value *S2) {
1759 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")(static_cast <bool> (!areInvalidOperands(C, S1, S2) &&
"Invalid operands for select") ? void (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\""
, "llvm/include/llvm/IR/Instructions.h", 1759, __extension__ __PRETTY_FUNCTION__
))
;
1760 Op<0>() = C;
1761 Op<1>() = S1;
1762 Op<2>() = S2;
1763 }
1764
1765protected:
1766 // Note: Instruction needs to be a friend here to call cloneImpl.
1767 friend class Instruction;
1768
1769 SelectInst *cloneImpl() const;
1770
1771public:
1772 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1773 const Twine &NameStr = "",
1774 Instruction *InsertBefore = nullptr,
1775 Instruction *MDFrom = nullptr) {
1776 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1777 if (MDFrom)
1778 Sel->copyMetadata(*MDFrom);
1779 return Sel;
1780 }
1781
1782 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1783 const Twine &NameStr,
1784 BasicBlock *InsertAtEnd) {
1785 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1786 }
1787
1788 const Value *getCondition() const { return Op<0>(); }
1789 const Value *getTrueValue() const { return Op<1>(); }
1790 const Value *getFalseValue() const { return Op<2>(); }
1791 Value *getCondition() { return Op<0>(); }
1792 Value *getTrueValue() { return Op<1>(); }
1793 Value *getFalseValue() { return Op<2>(); }
1794
1795 void setCondition(Value *V) { Op<0>() = V; }
1796 void setTrueValue(Value *V) { Op<1>() = V; }
1797 void setFalseValue(Value *V) { Op<2>() = V; }
1798
1799 /// Swap the true and false values of the select instruction.
1800 /// This doesn't swap prof metadata.
1801 void swapValues() { Op<1>().swap(Op<2>()); }
1802
1803 /// Return a string if the specified operands are invalid
1804 /// for a select operation, otherwise return null.
1805 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1806
1807 /// Transparently provide more efficient getOperand methods.
1808 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1809
1810 OtherOps getOpcode() const {
1811 return static_cast<OtherOps>(Instruction::getOpcode());
1812 }
1813
1814 // Methods for support type inquiry through isa, cast, and dyn_cast:
1815 static bool classof(const Instruction *I) {
1816 return I->getOpcode() == Instruction::Select;
1817 }
1818 static bool classof(const Value *V) {
1819 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1820 }
1821};
1822
1823template <>
1824struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1825};
1826
1827DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits
<SelectInst>::op_begin(this); } SelectInst::const_op_iterator
SelectInst::op_begin() const { return OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this)); } SelectInst
::op_iterator SelectInst::op_end() { return OperandTraits<
SelectInst>::op_end(this); } SelectInst::const_op_iterator
SelectInst::op_end() const { return OperandTraits<SelectInst
>::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SelectInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1827, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this))[i_nocapture
].get()); } void SelectInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<SelectInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1827, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<SelectInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned SelectInst::getNumOperands() const
{ return OperandTraits<SelectInst>::operands(this); } template
<int Idx_nocapture> Use &SelectInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &SelectInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
1828
1829//===----------------------------------------------------------------------===//
1830// VAArgInst Class
1831//===----------------------------------------------------------------------===//
1832
1833/// This class represents the va_arg llvm instruction, which returns
1834/// an argument of the specified type given a va_list and increments that list
1835///
1836class VAArgInst : public UnaryInstruction {
1837protected:
1838 // Note: Instruction needs to be a friend here to call cloneImpl.
1839 friend class Instruction;
1840
1841 VAArgInst *cloneImpl() const;
1842
1843public:
1844 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1845 Instruction *InsertBefore = nullptr)
1846 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1847 setName(NameStr);
1848 }
1849
1850 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1851 BasicBlock *InsertAtEnd)
1852 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1853 setName(NameStr);
1854 }
1855
1856 Value *getPointerOperand() { return getOperand(0); }
1857 const Value *getPointerOperand() const { return getOperand(0); }
1858 static unsigned getPointerOperandIndex() { return 0U; }
1859
1860 // Methods for support type inquiry through isa, cast, and dyn_cast:
1861 static bool classof(const Instruction *I) {
1862 return I->getOpcode() == VAArg;
1863 }
1864 static bool classof(const Value *V) {
1865 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1866 }
1867};
1868
1869//===----------------------------------------------------------------------===//
1870// ExtractElementInst Class
1871//===----------------------------------------------------------------------===//
1872
1873/// This instruction extracts a single (scalar)
1874/// element from a VectorType value
1875///
1876class ExtractElementInst : public Instruction {
1877 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1878 Instruction *InsertBefore = nullptr);
1879 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1880 BasicBlock *InsertAtEnd);
1881
1882protected:
1883 // Note: Instruction needs to be a friend here to call cloneImpl.
1884 friend class Instruction;
1885
1886 ExtractElementInst *cloneImpl() const;
1887
1888public:
1889 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1890 const Twine &NameStr = "",
1891 Instruction *InsertBefore = nullptr) {
1892 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1893 }
1894
1895 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1896 const Twine &NameStr,
1897 BasicBlock *InsertAtEnd) {
1898 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1899 }
1900
1901 /// Return true if an extractelement instruction can be
1902 /// formed with the specified operands.
1903 static bool isValidOperands(const Value *Vec, const Value *Idx);
1904
1905 Value *getVectorOperand() { return Op<0>(); }
1906 Value *getIndexOperand() { return Op<1>(); }
1907 const Value *getVectorOperand() const { return Op<0>(); }
1908 const Value *getIndexOperand() const { return Op<1>(); }
1909
1910 VectorType *getVectorOperandType() const {
1911 return cast<VectorType>(getVectorOperand()->getType());
1912 }
1913
1914 /// Transparently provide more efficient getOperand methods.
1915 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1916
1917 // Methods for support type inquiry through isa, cast, and dyn_cast:
1918 static bool classof(const Instruction *I) {
1919 return I->getOpcode() == Instruction::ExtractElement;
1920 }
1921 static bool classof(const Value *V) {
1922 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1923 }
1924};
1925
1926template <>
1927struct OperandTraits<ExtractElementInst> :
1928 public FixedNumOperandTraits<ExtractElementInst, 2> {
1929};
1930
1931DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin(
) { return OperandTraits<ExtractElementInst>::op_begin(
this); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_begin() const { return OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this)); }
ExtractElementInst::op_iterator ExtractElementInst::op_end()
{ return OperandTraits<ExtractElementInst>::op_end(this
); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_end() const { return OperandTraits<ExtractElementInst
>::op_end(const_cast<ExtractElementInst*>(this)); } Value
*ExtractElementInst::getOperand(unsigned i_nocapture) const {
(static_cast <bool> (i_nocapture < OperandTraits<
ExtractElementInst>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1931, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this))[i_nocapture
].get()); } void ExtractElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ExtractElementInst>::operands(this)
&& "setOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1931, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ExtractElementInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ExtractElementInst::getNumOperands
() const { return OperandTraits<ExtractElementInst>::operands
(this); } template <int Idx_nocapture> Use &ExtractElementInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &ExtractElementInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1932
1933//===----------------------------------------------------------------------===//
1934// InsertElementInst Class
1935//===----------------------------------------------------------------------===//
1936
1937/// This instruction inserts a single (scalar)
1938/// element into a VectorType value
1939///
1940class InsertElementInst : public Instruction {
1941 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1942 const Twine &NameStr = "",
1943 Instruction *InsertBefore = nullptr);
1944 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1945 BasicBlock *InsertAtEnd);
1946
1947protected:
1948 // Note: Instruction needs to be a friend here to call cloneImpl.
1949 friend class Instruction;
1950
1951 InsertElementInst *cloneImpl() const;
1952
1953public:
1954 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1955 const Twine &NameStr = "",
1956 Instruction *InsertBefore = nullptr) {
1957 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1958 }
1959
1960 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1961 const Twine &NameStr,
1962 BasicBlock *InsertAtEnd) {
1963 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1964 }
1965
1966 /// Return true if an insertelement instruction can be
1967 /// formed with the specified operands.
1968 static bool isValidOperands(const Value *Vec, const Value *NewElt,
1969 const Value *Idx);
1970
1971 /// Overload to return most specific vector type.
1972 ///
1973 VectorType *getType() const {
1974 return cast<VectorType>(Instruction::getType());
1975 }
1976
1977 /// Transparently provide more efficient getOperand methods.
1978 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1979
1980 // Methods for support type inquiry through isa, cast, and dyn_cast:
1981 static bool classof(const Instruction *I) {
1982 return I->getOpcode() == Instruction::InsertElement;
1983 }
1984 static bool classof(const Value *V) {
1985 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1986 }
1987};
1988
1989template <>
1990struct OperandTraits<InsertElementInst> :
1991 public FixedNumOperandTraits<InsertElementInst, 3> {
1992};
1993
1994DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() {
return OperandTraits<InsertElementInst>::op_begin(this
); } InsertElementInst::const_op_iterator InsertElementInst::
op_begin() const { return OperandTraits<InsertElementInst>
::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst
::op_iterator InsertElementInst::op_end() { return OperandTraits
<InsertElementInst>::op_end(this); } InsertElementInst::
const_op_iterator InsertElementInst::op_end() const { return OperandTraits
<InsertElementInst>::op_end(const_cast<InsertElementInst
*>(this)); } Value *InsertElementInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<InsertElementInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1994, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<InsertElementInst
>::op_begin(const_cast<InsertElementInst*>(this))[i_nocapture
].get()); } void InsertElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<InsertElementInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1994, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<InsertElementInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned InsertElementInst::getNumOperands
() const { return OperandTraits<InsertElementInst>::operands
(this); } template <int Idx_nocapture> Use &InsertElementInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &InsertElementInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1995
1996//===----------------------------------------------------------------------===//
1997// ShuffleVectorInst Class
1998//===----------------------------------------------------------------------===//
1999
2000constexpr int UndefMaskElem = -1;
2001
2002/// This instruction constructs a fixed permutation of two
2003/// input vectors.
2004///
2005/// For each element of the result vector, the shuffle mask selects an element
2006/// from one of the input vectors to copy to the result. Non-negative elements
2007/// in the mask represent an index into the concatenated pair of input vectors.
2008/// UndefMaskElem (-1) specifies that the result element is undefined.
2009///
2010/// For scalable vectors, all the elements of the mask must be 0 or -1. This
2011/// requirement may be relaxed in the future.
2012class ShuffleVectorInst : public Instruction {
2013 SmallVector<int, 4> ShuffleMask;
2014 Constant *ShuffleMaskForBitcode;
2015
2016protected:
2017 // Note: Instruction needs to be a friend here to call cloneImpl.
2018 friend class Instruction;
2019
2020 ShuffleVectorInst *cloneImpl() const;
2021
2022public:
2023 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "",
2024 Instruction *InsertBefore = nullptr);
2025 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr,
2026 BasicBlock *InsertAtEnd);
2027 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "",
2028 Instruction *InsertBefore = nullptr);
2029 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr,
2030 BasicBlock *InsertAtEnd);
2031 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2032 const Twine &NameStr = "",
2033 Instruction *InsertBefor = nullptr);
2034 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2035 const Twine &NameStr, BasicBlock *InsertAtEnd);
2036 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2037 const Twine &NameStr = "",
2038 Instruction *InsertBefor = nullptr);
2039 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2040 const Twine &NameStr, BasicBlock *InsertAtEnd);
2041
2042 void *operator new(size_t S) { return User::operator new(S, 2); }
2043 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
2044
2045 /// Swap the operands and adjust the mask to preserve the semantics
2046 /// of the instruction.
2047 void commute();
2048
2049 /// Return true if a shufflevector instruction can be
2050 /// formed with the specified operands.
2051 static bool isValidOperands(const Value *V1, const Value *V2,
2052 const Value *Mask);
2053 static bool isValidOperands(const Value *V1, const Value *V2,
2054 ArrayRef<int> Mask);
2055
2056 /// Overload to return most specific vector type.
2057 ///
2058 VectorType *getType() const {
2059 return cast<VectorType>(Instruction::getType());
2060 }
2061
2062 /// Transparently provide more efficient getOperand methods.
2063 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2064
2065 /// Return the shuffle mask value of this instruction for the given element
2066 /// index. Return UndefMaskElem if the element is undef.
2067 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2068
2069 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2070 /// elements of the mask are returned as UndefMaskElem.
2071 static void getShuffleMask(const Constant *Mask,
2072 SmallVectorImpl<int> &Result);
2073
2074 /// Return the mask for this instruction as a vector of integers. Undefined
2075 /// elements of the mask are returned as UndefMaskElem.
2076 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2077 Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2078 }
2079
2080 /// Return the mask for this instruction, for use in bitcode.
2081 ///
2082 /// TODO: This is temporary until we decide a new bitcode encoding for
2083 /// shufflevector.
2084 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2085
2086 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2087 Type *ResultTy);
2088
2089 void setShuffleMask(ArrayRef<int> Mask);
2090
2091 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2092
2093 /// Return true if this shuffle returns a vector with a different number of
2094 /// elements than its source vectors.
2095 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2096 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2097 bool changesLength() const {
2098 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2099 ->getElementCount()
2100 .getKnownMinValue();
2101 unsigned NumMaskElts = ShuffleMask.size();
2102 return NumSourceElts != NumMaskElts;
2103 }
2104
2105 /// Return true if this shuffle returns a vector with a greater number of
2106 /// elements than its source vectors.
2107 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2108 bool increasesLength() const {
2109 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2110 ->getElementCount()
2111 .getKnownMinValue();
2112 unsigned NumMaskElts = ShuffleMask.size();
2113 return NumSourceElts < NumMaskElts;
2114 }
2115
2116 /// Return true if this shuffle mask chooses elements from exactly one source
2117 /// vector.
2118 /// Example: <7,5,undef,7>
2119 /// This assumes that vector operands are the same length as the mask.
2120 static bool isSingleSourceMask(ArrayRef<int> Mask);
2121 static bool isSingleSourceMask(const Constant *Mask) {
2122 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2122, __extension__ __PRETTY_FUNCTION__
))
;
2123 SmallVector<int, 16> MaskAsInts;
2124 getShuffleMask(Mask, MaskAsInts);
2125 return isSingleSourceMask(MaskAsInts);
2126 }
2127
2128 /// Return true if this shuffle chooses elements from exactly one source
2129 /// vector without changing the length of that vector.
2130 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2131 /// TODO: Optionally allow length-changing shuffles.
2132 bool isSingleSource() const {
2133 return !changesLength() && isSingleSourceMask(ShuffleMask);
2134 }
2135
2136 /// Return true if this shuffle mask chooses elements from exactly one source
2137 /// vector without lane crossings. A shuffle using this mask is not
2138 /// necessarily a no-op because it may change the number of elements from its
2139 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2140 /// Example: <undef,undef,2,3>
2141 static bool isIdentityMask(ArrayRef<int> Mask);
2142 static bool isIdentityMask(const Constant *Mask) {
2143 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2143, __extension__ __PRETTY_FUNCTION__
))
;
2144 SmallVector<int, 16> MaskAsInts;
2145 getShuffleMask(Mask, MaskAsInts);
2146 return isIdentityMask(MaskAsInts);
2147 }
2148
2149 /// Return true if this shuffle chooses elements from exactly one source
2150 /// vector without lane crossings and does not change the number of elements
2151 /// from its input vectors.
2152 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2153 bool isIdentity() const {
2154 return !changesLength() && isIdentityMask(ShuffleMask);
2155 }
2156
2157 /// Return true if this shuffle lengthens exactly one source vector with
2158 /// undefs in the high elements.
2159 bool isIdentityWithPadding() const;
2160
2161 /// Return true if this shuffle extracts the first N elements of exactly one
2162 /// source vector.
2163 bool isIdentityWithExtract() const;
2164
2165 /// Return true if this shuffle concatenates its 2 source vectors. This
2166 /// returns false if either input is undefined. In that case, the shuffle is
2167 /// is better classified as an identity with padding operation.
2168 bool isConcat() const;
2169
2170 /// Return true if this shuffle mask chooses elements from its source vectors
2171 /// without lane crossings. A shuffle using this mask would be
2172 /// equivalent to a vector select with a constant condition operand.
2173 /// Example: <4,1,6,undef>
2174 /// This returns false if the mask does not choose from both input vectors.
2175 /// In that case, the shuffle is better classified as an identity shuffle.
2176 /// This assumes that vector operands are the same length as the mask
2177 /// (a length-changing shuffle can never be equivalent to a vector select).
2178 static bool isSelectMask(ArrayRef<int> Mask);
2179 static bool isSelectMask(const Constant *Mask) {
2180 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2180, __extension__ __PRETTY_FUNCTION__
))
;
2181 SmallVector<int, 16> MaskAsInts;
2182 getShuffleMask(Mask, MaskAsInts);
2183 return isSelectMask(MaskAsInts);
2184 }
2185
2186 /// Return true if this shuffle chooses elements from its source vectors
2187 /// without lane crossings and all operands have the same number of elements.
2188 /// In other words, this shuffle is equivalent to a vector select with a
2189 /// constant condition operand.
2190 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2191 /// This returns false if the mask does not choose from both input vectors.
2192 /// In that case, the shuffle is better classified as an identity shuffle.
2193 /// TODO: Optionally allow length-changing shuffles.
2194 bool isSelect() const {
2195 return !changesLength() && isSelectMask(ShuffleMask);
2196 }
2197
2198 /// Return true if this shuffle mask swaps the order of elements from exactly
2199 /// one source vector.
2200 /// Example: <7,6,undef,4>
2201 /// This assumes that vector operands are the same length as the mask.
2202 static bool isReverseMask(ArrayRef<int> Mask);
2203 static bool isReverseMask(const Constant *Mask) {
2204 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2204, __extension__ __PRETTY_FUNCTION__
))
;
2205 SmallVector<int, 16> MaskAsInts;
2206 getShuffleMask(Mask, MaskAsInts);
2207 return isReverseMask(MaskAsInts);
2208 }
2209
2210 /// Return true if this shuffle swaps the order of elements from exactly
2211 /// one source vector.
2212 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2213 /// TODO: Optionally allow length-changing shuffles.
2214 bool isReverse() const {
2215 return !changesLength() && isReverseMask(ShuffleMask);
2216 }
2217
2218 /// Return true if this shuffle mask chooses all elements with the same value
2219 /// as the first element of exactly one source vector.
2220 /// Example: <4,undef,undef,4>
2221 /// This assumes that vector operands are the same length as the mask.
2222 static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2223 static bool isZeroEltSplatMask(const Constant *Mask) {
2224 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2224, __extension__ __PRETTY_FUNCTION__
))
;
2225 SmallVector<int, 16> MaskAsInts;
2226 getShuffleMask(Mask, MaskAsInts);
2227 return isZeroEltSplatMask(MaskAsInts);
2228 }
2229
2230 /// Return true if all elements of this shuffle are the same value as the
2231 /// first element of exactly one source vector without changing the length
2232 /// of that vector.
2233 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2234 /// TODO: Optionally allow length-changing shuffles.
2235 /// TODO: Optionally allow splats from other elements.
2236 bool isZeroEltSplat() const {
2237 return !changesLength() && isZeroEltSplatMask(ShuffleMask);
2238 }
2239
2240 /// Return true if this shuffle mask is a transpose mask.
2241 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2242 /// even- or odd-numbered vector elements from two n-dimensional source
2243 /// vectors and write each result into consecutive elements of an
2244 /// n-dimensional destination vector. Two shuffles are necessary to complete
2245 /// the transpose, one for the even elements and another for the odd elements.
2246 /// This description closely follows how the TRN1 and TRN2 AArch64
2247 /// instructions operate.
2248 ///
2249 /// For example, a simple 2x2 matrix can be transposed with:
2250 ///
2251 /// ; Original matrix
2252 /// m0 = < a, b >
2253 /// m1 = < c, d >
2254 ///
2255 /// ; Transposed matrix
2256 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2257 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2258 ///
2259 /// For matrices having greater than n columns, the resulting nx2 transposed
2260 /// matrix is stored in two result vectors such that one vector contains
2261 /// interleaved elements from all the even-numbered rows and the other vector
2262 /// contains interleaved elements from all the odd-numbered rows. For example,
2263 /// a 2x4 matrix can be transposed with:
2264 ///
2265 /// ; Original matrix
2266 /// m0 = < a, b, c, d >
2267 /// m1 = < e, f, g, h >
2268 ///
2269 /// ; Transposed matrix
2270 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2271 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2272 static bool isTransposeMask(ArrayRef<int> Mask);
2273 static bool isTransposeMask(const Constant *Mask) {
2274 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2274, __extension__ __PRETTY_FUNCTION__
))
;
2275 SmallVector<int, 16> MaskAsInts;
2276 getShuffleMask(Mask, MaskAsInts);
2277 return isTransposeMask(MaskAsInts);
2278 }
2279
2280 /// Return true if this shuffle transposes the elements of its inputs without
2281 /// changing the length of the vectors. This operation may also be known as a
2282 /// merge or interleave. See the description for isTransposeMask() for the
2283 /// exact specification.
2284 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2285 bool isTranspose() const {
2286 return !changesLength() && isTransposeMask(ShuffleMask);
2287 }
2288
2289 /// Return true if this shuffle mask is an extract subvector mask.
2290 /// A valid extract subvector mask returns a smaller vector from a single
2291 /// source operand. The base extraction index is returned as well.
2292 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2293 int &Index);
2294 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2295 int &Index) {
2296 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2296, __extension__ __PRETTY_FUNCTION__
))
;
2297 // Not possible to express a shuffle mask for a scalable vector for this
2298 // case.
2299 if (isa<ScalableVectorType>(Mask->getType()))
2300 return false;
2301 SmallVector<int, 16> MaskAsInts;
2302 getShuffleMask(Mask, MaskAsInts);
2303 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2304 }
2305
2306 /// Return true if this shuffle mask is an extract subvector mask.
2307 bool isExtractSubvectorMask(int &Index) const {
2308 // Not possible to express a shuffle mask for a scalable vector for this
2309 // case.
2310 if (isa<ScalableVectorType>(getType()))
2311 return false;
2312
2313 int NumSrcElts =
2314 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2315 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2316 }
2317
2318 /// Return true if this shuffle mask is an insert subvector mask.
2319 /// A valid insert subvector mask inserts the lowest elements of a second
2320 /// source operand into an in-place first source operand operand.
2321 /// Both the sub vector width and the insertion index is returned.
2322 static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2323 int &NumSubElts, int &Index);
2324 static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts,
2325 int &NumSubElts, int &Index) {
2326 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2326, __extension__ __PRETTY_FUNCTION__
))
;
2327 // Not possible to express a shuffle mask for a scalable vector for this
2328 // case.
2329 if (isa<ScalableVectorType>(Mask->getType()))
2330 return false;
2331 SmallVector<int, 16> MaskAsInts;
2332 getShuffleMask(Mask, MaskAsInts);
2333 return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index);
2334 }
2335
2336 /// Return true if this shuffle mask is an insert subvector mask.
2337 bool isInsertSubvectorMask(int &NumSubElts, int &Index) const {
2338 // Not possible to express a shuffle mask for a scalable vector for this
2339 // case.
2340 if (isa<ScalableVectorType>(getType()))
2341 return false;
2342
2343 int NumSrcElts =
2344 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2345 return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index);
2346 }
2347
2348 /// Return true if this shuffle mask replicates each of the \p VF elements
2349 /// in a vector \p ReplicationFactor times.
2350 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
2351 /// <0,0,0,1,1,1,2,2,2,3,3,3>
2352 static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor,
2353 int &VF);
2354 static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor,
2355 int &VF) {
2356 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2356, __extension__ __PRETTY_FUNCTION__
))
;
2357 // Not possible to express a shuffle mask for a scalable vector for this
2358 // case.
2359 if (isa<ScalableVectorType>(Mask->getType()))
2360 return false;
2361 SmallVector<int, 16> MaskAsInts;
2362 getShuffleMask(Mask, MaskAsInts);
2363 return isReplicationMask(MaskAsInts, ReplicationFactor, VF);
2364 }
2365
2366 /// Return true if this shuffle mask is a replication mask.
2367 bool isReplicationMask(int &ReplicationFactor, int &VF) const;
2368
2369 /// Change values in a shuffle permute mask assuming the two vector operands
2370 /// of length InVecNumElts have swapped position.
2371 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2372 unsigned InVecNumElts) {
2373 for (int &Idx : Mask) {
2374 if (Idx == -1)
2375 continue;
2376 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2377 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "llvm/include/llvm/IR/Instructions.h", 2378, __extension__ __PRETTY_FUNCTION__
))
2378 "shufflevector mask index out of range")(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "llvm/include/llvm/IR/Instructions.h", 2378, __extension__ __PRETTY_FUNCTION__
))
;
2379 }
2380 }
2381
2382 // Methods for support type inquiry through isa, cast, and dyn_cast:
2383 static bool classof(const Instruction *I) {
2384 return I->getOpcode() == Instruction::ShuffleVector;
2385 }
2386 static bool classof(const Value *V) {
2387 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2388 }
2389};
2390
2391template <>
2392struct OperandTraits<ShuffleVectorInst>
2393 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2394
2395DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() {
return OperandTraits<ShuffleVectorInst>::op_begin(this
); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst::
op_begin() const { return OperandTraits<ShuffleVectorInst>
::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst
::op_iterator ShuffleVectorInst::op_end() { return OperandTraits
<