Bug Summary

File:llvm/lib/Transforms/Scalar/LICM.cpp
Warning:line 1215, column 41
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name LICM.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/build-llvm/lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/build-llvm/include -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/build-llvm/lib/Transforms/Scalar -fdebug-prefix-map=/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2019-12-11-181444-25759-1 -x c++ /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp

/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp

1//===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass performs loop invariant code motion, attempting to remove as much
10// code from the body of a loop as possible. It does this by either hoisting
11// code into the preheader block, or by sinking code to the exit blocks if it is
12// safe. This pass also promotes must-aliased memory locations in the loop to
13// live in registers, thus hoisting and sinking "invariant" loads and stores.
14//
15// This pass uses alias analysis for two purposes:
16//
17// 1. Moving loop invariant loads and calls out of loops. If we can determine
18// that a load or call inside of a loop never aliases anything stored to,
19// we can hoist it or sink it like any other instruction.
20// 2. Scalar Promotion of Memory - If there is a store instruction inside of
21// the loop, we try to move the store to happen AFTER the loop instead of
22// inside of the loop. This can only happen if a few conditions are true:
23// A. The pointer stored through is loop invariant
24// B. There are no stores or loads in the loop which _may_ alias the
25// pointer. There are no calls in the loop which mod/ref the pointer.
26// If these conditions are true, we can promote the loads and stores in the
27// loop of the pointer to use a temporary alloca'd variable. We then use
28// the SSAUpdater to construct the appropriate SSA form for the value.
29//
30//===----------------------------------------------------------------------===//
31
32#include "llvm/Transforms/Scalar/LICM.h"
33#include "llvm/ADT/SetOperations.h"
34#include "llvm/ADT/Statistic.h"
35#include "llvm/Analysis/AliasAnalysis.h"
36#include "llvm/Analysis/AliasSetTracker.h"
37#include "llvm/Analysis/BasicAliasAnalysis.h"
38#include "llvm/Analysis/CaptureTracking.h"
39#include "llvm/Analysis/ConstantFolding.h"
40#include "llvm/Analysis/GlobalsModRef.h"
41#include "llvm/Analysis/GuardUtils.h"
42#include "llvm/Analysis/Loads.h"
43#include "llvm/Analysis/LoopInfo.h"
44#include "llvm/Analysis/LoopIterator.h"
45#include "llvm/Analysis/LoopPass.h"
46#include "llvm/Analysis/MemoryBuiltins.h"
47#include "llvm/Analysis/MemorySSA.h"
48#include "llvm/Analysis/MemorySSAUpdater.h"
49#include "llvm/Analysis/OptimizationRemarkEmitter.h"
50#include "llvm/Analysis/ScalarEvolution.h"
51#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
52#include "llvm/Analysis/TargetLibraryInfo.h"
53#include "llvm/Analysis/ValueTracking.h"
54#include "llvm/IR/CFG.h"
55#include "llvm/IR/Constants.h"
56#include "llvm/IR/DataLayout.h"
57#include "llvm/IR/DebugInfoMetadata.h"
58#include "llvm/IR/DerivedTypes.h"
59#include "llvm/IR/Dominators.h"
60#include "llvm/IR/Instructions.h"
61#include "llvm/IR/IntrinsicInst.h"
62#include "llvm/IR/LLVMContext.h"
63#include "llvm/IR/Metadata.h"
64#include "llvm/IR/PatternMatch.h"
65#include "llvm/IR/PredIteratorCache.h"
66#include "llvm/Support/CommandLine.h"
67#include "llvm/Support/Debug.h"
68#include "llvm/Support/raw_ostream.h"
69#include "llvm/Transforms/Scalar.h"
70#include "llvm/Transforms/Scalar/LoopPassManager.h"
71#include "llvm/Transforms/Utils/BasicBlockUtils.h"
72#include "llvm/Transforms/Utils/Local.h"
73#include "llvm/Transforms/Utils/LoopUtils.h"
74#include "llvm/Transforms/Utils/SSAUpdater.h"
75#include <algorithm>
76#include <utility>
77using namespace llvm;
78
79#define DEBUG_TYPE"licm" "licm"
80
81STATISTIC(NumCreatedBlocks, "Number of blocks created")static llvm::Statistic NumCreatedBlocks = {"licm", "NumCreatedBlocks"
, "Number of blocks created"}
;
82STATISTIC(NumClonedBranches, "Number of branches cloned")static llvm::Statistic NumClonedBranches = {"licm", "NumClonedBranches"
, "Number of branches cloned"}
;
83STATISTIC(NumSunk, "Number of instructions sunk out of loop")static llvm::Statistic NumSunk = {"licm", "NumSunk", "Number of instructions sunk out of loop"
}
;
84STATISTIC(NumHoisted, "Number of instructions hoisted out of loop")static llvm::Statistic NumHoisted = {"licm", "NumHoisted", "Number of instructions hoisted out of loop"
}
;
85STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk")static llvm::Statistic NumMovedLoads = {"licm", "NumMovedLoads"
, "Number of load insts hoisted or sunk"}
;
86STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk")static llvm::Statistic NumMovedCalls = {"licm", "NumMovedCalls"
, "Number of call insts hoisted or sunk"}
;
87STATISTIC(NumPromoted, "Number of memory locations promoted to registers")static llvm::Statistic NumPromoted = {"licm", "NumPromoted", "Number of memory locations promoted to registers"
}
;
88
89/// Memory promotion is enabled by default.
90static cl::opt<bool>
91 DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false),
92 cl::desc("Disable memory promotion in LICM pass"));
93
94static cl::opt<bool> ControlFlowHoisting(
95 "licm-control-flow-hoisting", cl::Hidden, cl::init(false),
96 cl::desc("Enable control flow (and PHI) hoisting in LICM"));
97
98static cl::opt<uint32_t> MaxNumUsesTraversed(
99 "licm-max-num-uses-traversed", cl::Hidden, cl::init(8),
100 cl::desc("Max num uses visited for identifying load "
101 "invariance in loop using invariant start (default = 8)"));
102
103// Default value of zero implies we use the regular alias set tracker mechanism
104// instead of the cross product using AA to identify aliasing of the memory
105// location we are interested in.
106static cl::opt<int>
107LICMN2Theshold("licm-n2-threshold", cl::Hidden, cl::init(0),
108 cl::desc("How many instruction to cross product using AA"));
109
110// Experimental option to allow imprecision in LICM in pathological cases, in
111// exchange for faster compile. This is to be removed if MemorySSA starts to
112// address the same issue. This flag applies only when LICM uses MemorySSA
113// instead on AliasSetTracker. LICM calls MemorySSAWalker's
114// getClobberingMemoryAccess, up to the value of the Cap, getting perfect
115// accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess,
116// which may not be precise, since optimizeUses is capped. The result is
117// correct, but we may not get as "far up" as possible to get which access is
118// clobbering the one queried.
119cl::opt<unsigned> llvm::SetLicmMssaOptCap(
120 "licm-mssa-optimization-cap", cl::init(100), cl::Hidden,
121 cl::desc("Enable imprecision in LICM in pathological cases, in exchange "
122 "for faster compile. Caps the MemorySSA clobbering calls."));
123
124// Experimentally, memory promotion carries less importance than sinking and
125// hoisting. Limit when we do promotion when using MemorySSA, in order to save
126// compile time.
127cl::opt<unsigned> llvm::SetLicmMssaNoAccForPromotionCap(
128 "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden,
129 cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no "
130 "effect. When MSSA in LICM is enabled, then this is the maximum "
131 "number of accesses allowed to be present in a loop in order to "
132 "enable memory promotion."));
133
134static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI);
135static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop,
136 const LoopSafetyInfo *SafetyInfo,
137 TargetTransformInfo *TTI, bool &FreeInLoop);
138static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
139 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
140 MemorySSAUpdater *MSSAU, ScalarEvolution *SE,
141 OptimizationRemarkEmitter *ORE);
142static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
143 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
144 MemorySSAUpdater *MSSAU, OptimizationRemarkEmitter *ORE);
145static bool isSafeToExecuteUnconditionally(Instruction &Inst,
146 const DominatorTree *DT,
147 const Loop *CurLoop,
148 const LoopSafetyInfo *SafetyInfo,
149 OptimizationRemarkEmitter *ORE,
150 const Instruction *CtxI = nullptr);
151static bool pointerInvalidatedByLoop(MemoryLocation MemLoc,
152 AliasSetTracker *CurAST, Loop *CurLoop,
153 AliasAnalysis *AA);
154static bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU,
155 Loop *CurLoop,
156 SinkAndHoistLICMFlags &Flags);
157static Instruction *CloneInstructionInExitBlock(
158 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
159 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU);
160
161static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
162 AliasSetTracker *AST, MemorySSAUpdater *MSSAU);
163
164static void moveInstructionBefore(Instruction &I, Instruction &Dest,
165 ICFLoopSafetyInfo &SafetyInfo,
166 MemorySSAUpdater *MSSAU, ScalarEvolution *SE);
167
168namespace {
169struct LoopInvariantCodeMotion {
170 using ASTrackerMapTy = DenseMap<Loop *, std::unique_ptr<AliasSetTracker>>;
171 bool runOnLoop(Loop *L, AliasAnalysis *AA, LoopInfo *LI, DominatorTree *DT,
172 TargetLibraryInfo *TLI, TargetTransformInfo *TTI,
173 ScalarEvolution *SE, MemorySSA *MSSA,
174 OptimizationRemarkEmitter *ORE, bool DeleteAST);
175
176 ASTrackerMapTy &getLoopToAliasSetMap() { return LoopToAliasSetMap; }
177 LoopInvariantCodeMotion(unsigned LicmMssaOptCap,
178 unsigned LicmMssaNoAccForPromotionCap)
179 : LicmMssaOptCap(LicmMssaOptCap),
180 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap) {}
181
182private:
183 ASTrackerMapTy LoopToAliasSetMap;
184 unsigned LicmMssaOptCap;
185 unsigned LicmMssaNoAccForPromotionCap;
186
187 std::unique_ptr<AliasSetTracker>
188 collectAliasInfoForLoop(Loop *L, LoopInfo *LI, AliasAnalysis *AA);
189 std::unique_ptr<AliasSetTracker>
190 collectAliasInfoForLoopWithMSSA(Loop *L, AliasAnalysis *AA,
191 MemorySSAUpdater *MSSAU);
192};
193
194struct LegacyLICMPass : public LoopPass {
195 static char ID; // Pass identification, replacement for typeid
196 LegacyLICMPass(
197 unsigned LicmMssaOptCap = SetLicmMssaOptCap,
198 unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap)
199 : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap) {
200 initializeLegacyLICMPassPass(*PassRegistry::getPassRegistry());
201 }
202
203 bool runOnLoop(Loop *L, LPPassManager &LPM) override {
204 if (skipLoop(L)) {
205 // If we have run LICM on a previous loop but now we are skipping
206 // (because we've hit the opt-bisect limit), we need to clear the
207 // loop alias information.
208 LICM.getLoopToAliasSetMap().clear();
209 return false;
210 }
211
212 auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>();
213 MemorySSA *MSSA = EnableMSSALoopDependency
214 ? (&getAnalysis<MemorySSAWrapperPass>().getMSSA())
215 : nullptr;
216 // For the old PM, we can't use OptimizationRemarkEmitter as an analysis
217 // pass. Function analyses need to be preserved across loop transformations
218 // but ORE cannot be preserved (see comment before the pass definition).
219 OptimizationRemarkEmitter ORE(L->getHeader()->getParent());
220 return LICM.runOnLoop(L,
221 &getAnalysis<AAResultsWrapperPass>().getAAResults(),
222 &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(),
223 &getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
224 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
225 *L->getHeader()->getParent()),
226 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
227 *L->getHeader()->getParent()),
228 SE ? &SE->getSE() : nullptr, MSSA, &ORE, false);
229 }
230
231 /// This transformation requires natural loop information & requires that
232 /// loop preheaders be inserted into the CFG...
233 ///
234 void getAnalysisUsage(AnalysisUsage &AU) const override {
235 AU.addPreserved<DominatorTreeWrapperPass>();
236 AU.addPreserved<LoopInfoWrapperPass>();
237 AU.addRequired<TargetLibraryInfoWrapperPass>();
238 if (EnableMSSALoopDependency) {
239 AU.addRequired<MemorySSAWrapperPass>();
240 AU.addPreserved<MemorySSAWrapperPass>();
241 }
242 AU.addRequired<TargetTransformInfoWrapperPass>();
243 getLoopAnalysisUsage(AU);
244 }
245
246 using llvm::Pass::doFinalization;
247
248 bool doFinalization() override {
249 auto &AliasSetMap = LICM.getLoopToAliasSetMap();
250 // All loops in the AliasSetMap should be cleaned up already. The only case
251 // where we fail to do so is if an outer loop gets deleted before LICM
252 // visits it.
253 assert(all_of(AliasSetMap,((all_of(AliasSetMap, [](LoopInvariantCodeMotion::ASTrackerMapTy
::value_type &KV) { return !KV.first->getParentLoop();
}) && "Didn't free loop alias sets") ? static_cast<
void> (0) : __assert_fail ("all_of(AliasSetMap, [](LoopInvariantCodeMotion::ASTrackerMapTy::value_type &KV) { return !KV.first->getParentLoop(); }) && \"Didn't free loop alias sets\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 257, __PRETTY_FUNCTION__))
254 [](LoopInvariantCodeMotion::ASTrackerMapTy::value_type &KV) {((all_of(AliasSetMap, [](LoopInvariantCodeMotion::ASTrackerMapTy
::value_type &KV) { return !KV.first->getParentLoop();
}) && "Didn't free loop alias sets") ? static_cast<
void> (0) : __assert_fail ("all_of(AliasSetMap, [](LoopInvariantCodeMotion::ASTrackerMapTy::value_type &KV) { return !KV.first->getParentLoop(); }) && \"Didn't free loop alias sets\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 257, __PRETTY_FUNCTION__))
255 return !KV.first->getParentLoop();((all_of(AliasSetMap, [](LoopInvariantCodeMotion::ASTrackerMapTy
::value_type &KV) { return !KV.first->getParentLoop();
}) && "Didn't free loop alias sets") ? static_cast<
void> (0) : __assert_fail ("all_of(AliasSetMap, [](LoopInvariantCodeMotion::ASTrackerMapTy::value_type &KV) { return !KV.first->getParentLoop(); }) && \"Didn't free loop alias sets\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 257, __PRETTY_FUNCTION__))
256 }) &&((all_of(AliasSetMap, [](LoopInvariantCodeMotion::ASTrackerMapTy
::value_type &KV) { return !KV.first->getParentLoop();
}) && "Didn't free loop alias sets") ? static_cast<
void> (0) : __assert_fail ("all_of(AliasSetMap, [](LoopInvariantCodeMotion::ASTrackerMapTy::value_type &KV) { return !KV.first->getParentLoop(); }) && \"Didn't free loop alias sets\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 257, __PRETTY_FUNCTION__))
257 "Didn't free loop alias sets")((all_of(AliasSetMap, [](LoopInvariantCodeMotion::ASTrackerMapTy
::value_type &KV) { return !KV.first->getParentLoop();
}) && "Didn't free loop alias sets") ? static_cast<
void> (0) : __assert_fail ("all_of(AliasSetMap, [](LoopInvariantCodeMotion::ASTrackerMapTy::value_type &KV) { return !KV.first->getParentLoop(); }) && \"Didn't free loop alias sets\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 257, __PRETTY_FUNCTION__))
;
258 AliasSetMap.clear();
259 return false;
260 }
261
262private:
263 LoopInvariantCodeMotion LICM;
264
265 /// cloneBasicBlockAnalysis - Simple Analysis hook. Clone alias set info.
266 void cloneBasicBlockAnalysis(BasicBlock *From, BasicBlock *To,
267 Loop *L) override;
268
269 /// deleteAnalysisValue - Simple Analysis hook. Delete value V from alias
270 /// set.
271 void deleteAnalysisValue(Value *V, Loop *L) override;
272
273 /// Simple Analysis hook. Delete loop L from alias set map.
274 void deleteAnalysisLoop(Loop *L) override;
275};
276} // namespace
277
278PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM,
279 LoopStandardAnalysisResults &AR, LPMUpdater &) {
280 const auto &FAM =
281 AM.getResult<FunctionAnalysisManagerLoopProxy>(L, AR).getManager();
282 Function *F = L.getHeader()->getParent();
283
284 auto *ORE = FAM.getCachedResult<OptimizationRemarkEmitterAnalysis>(*F);
285 // FIXME: This should probably be optional rather than required.
286 if (!ORE)
287 report_fatal_error("LICM: OptimizationRemarkEmitterAnalysis not "
288 "cached at a higher level");
289
290 LoopInvariantCodeMotion LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap);
291 if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, &AR.TLI, &AR.TTI, &AR.SE,
292 AR.MSSA, ORE, true))
293 return PreservedAnalyses::all();
294
295 auto PA = getLoopPassPreservedAnalyses();
296
297 PA.preserve<DominatorTreeAnalysis>();
298 PA.preserve<LoopAnalysis>();
299 if (AR.MSSA)
300 PA.preserve<MemorySSAAnalysis>();
301
302 return PA;
303}
304
305char LegacyLICMPass::ID = 0;
306INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion",static void *initializeLegacyLICMPassPassOnce(PassRegistry &
Registry) {
307 false, false)static void *initializeLegacyLICMPassPassOnce(PassRegistry &
Registry) {
308INITIALIZE_PASS_DEPENDENCY(LoopPass)initializeLoopPassPass(Registry);
309INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry);
310INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)initializeTargetTransformInfoWrapperPassPass(Registry);
311INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)initializeMemorySSAWrapperPassPass(Registry);
312INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false,PassInfo *PI = new PassInfo( "Loop Invariant Code Motion", "licm"
, &LegacyLICMPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<LegacyLICMPass>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeLegacyLICMPassPassFlag
; void llvm::initializeLegacyLICMPassPass(PassRegistry &Registry
) { llvm::call_once(InitializeLegacyLICMPassPassFlag, initializeLegacyLICMPassPassOnce
, std::ref(Registry)); }
313 false)PassInfo *PI = new PassInfo( "Loop Invariant Code Motion", "licm"
, &LegacyLICMPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<LegacyLICMPass>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeLegacyLICMPassPassFlag
; void llvm::initializeLegacyLICMPassPass(PassRegistry &Registry
) { llvm::call_once(InitializeLegacyLICMPassPassFlag, initializeLegacyLICMPassPassOnce
, std::ref(Registry)); }
314
315Pass *llvm::createLICMPass() { return new LegacyLICMPass(); }
316Pass *llvm::createLICMPass(unsigned LicmMssaOptCap,
317 unsigned LicmMssaNoAccForPromotionCap) {
318 return new LegacyLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap);
319}
320
321/// Hoist expressions out of the specified loop. Note, alias info for inner
322/// loop is not preserved so it is not a good idea to run LICM multiple
323/// times on one loop.
324/// We should delete AST for inner loops in the new pass manager to avoid
325/// memory leak.
326///
327bool LoopInvariantCodeMotion::runOnLoop(
328 Loop *L, AliasAnalysis *AA, LoopInfo *LI, DominatorTree *DT,
329 TargetLibraryInfo *TLI, TargetTransformInfo *TTI, ScalarEvolution *SE,
330 MemorySSA *MSSA, OptimizationRemarkEmitter *ORE, bool DeleteAST) {
331 bool Changed = false;
332
333 assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.")((L->isLCSSAForm(*DT) && "Loop is not in LCSSA form."
) ? static_cast<void> (0) : __assert_fail ("L->isLCSSAForm(*DT) && \"Loop is not in LCSSA form.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 333, __PRETTY_FUNCTION__))
;
334
335 // If this loop has metadata indicating that LICM is not to be performed then
336 // just exit.
337 if (hasDisableLICMTransformsHint(L)) {
338 return false;
339 }
340
341 std::unique_ptr<AliasSetTracker> CurAST;
342 std::unique_ptr<MemorySSAUpdater> MSSAU;
343 bool NoOfMemAccTooLarge = false;
344 unsigned LicmMssaOptCounter = 0;
345
346 if (!MSSA) {
347 LLVM_DEBUG(dbgs() << "LICM: Using Alias Set Tracker.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM: Using Alias Set Tracker.\n"
; } } while (false)
;
348 CurAST = collectAliasInfoForLoop(L, LI, AA);
349 } else {
350 LLVM_DEBUG(dbgs() << "LICM: Using MemorySSA.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM: Using MemorySSA.\n"; } } while
(false)
;
351 MSSAU = std::make_unique<MemorySSAUpdater>(MSSA);
352
353 unsigned AccessCapCount = 0;
354 for (auto *BB : L->getBlocks()) {
355 if (auto *Accesses = MSSA->getBlockAccesses(BB)) {
356 for (const auto &MA : *Accesses) {
357 (void)MA;
358 AccessCapCount++;
359 if (AccessCapCount > LicmMssaNoAccForPromotionCap) {
360 NoOfMemAccTooLarge = true;
361 break;
362 }
363 }
364 }
365 if (NoOfMemAccTooLarge)
366 break;
367 }
368 }
369
370 // Get the preheader block to move instructions into...
371 BasicBlock *Preheader = L->getLoopPreheader();
372
373 // Compute loop safety information.
374 ICFLoopSafetyInfo SafetyInfo(DT);
375 SafetyInfo.computeLoopSafetyInfo(L);
376
377 // We want to visit all of the instructions in this loop... that are not parts
378 // of our subloops (they have already had their invariants hoisted out of
379 // their loop, into this loop, so there is no need to process the BODIES of
380 // the subloops).
381 //
382 // Traverse the body of the loop in depth first order on the dominator tree so
383 // that we are guaranteed to see definitions before we see uses. This allows
384 // us to sink instructions in one pass, without iteration. After sinking
385 // instructions, we perform another pass to hoist them out of the loop.
386 SinkAndHoistLICMFlags Flags = {NoOfMemAccTooLarge, LicmMssaOptCounter,
387 LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
388 /*IsSink=*/true};
389 if (L->hasDedicatedExits())
390 Changed |= sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, TTI, L,
391 CurAST.get(), MSSAU.get(), &SafetyInfo, Flags, ORE);
392 Flags.IsSink = false;
393 if (Preheader)
394 Changed |=
395 hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, L,
396 CurAST.get(), MSSAU.get(), SE, &SafetyInfo, Flags, ORE);
397
398 // Now that all loop invariants have been removed from the loop, promote any
399 // memory references to scalars that we can.
400 // Don't sink stores from loops without dedicated block exits. Exits
401 // containing indirect branches are not transformed by loop simplify,
402 // make sure we catch that. An additional load may be generated in the
403 // preheader for SSA updater, so also avoid sinking when no preheader
404 // is available.
405 if (!DisablePromotion && Preheader && L->hasDedicatedExits() &&
406 !NoOfMemAccTooLarge) {
407 // Figure out the loop exits and their insertion points
408 SmallVector<BasicBlock *, 8> ExitBlocks;
409 L->getUniqueExitBlocks(ExitBlocks);
410
411 // We can't insert into a catchswitch.
412 bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) {
413 return isa<CatchSwitchInst>(Exit->getTerminator());
414 });
415
416 if (!HasCatchSwitch) {
417 SmallVector<Instruction *, 8> InsertPts;
418 SmallVector<MemoryAccess *, 8> MSSAInsertPts;
419 InsertPts.reserve(ExitBlocks.size());
420 if (MSSAU)
421 MSSAInsertPts.reserve(ExitBlocks.size());
422 for (BasicBlock *ExitBlock : ExitBlocks) {
423 InsertPts.push_back(&*ExitBlock->getFirstInsertionPt());
424 if (MSSAU)
425 MSSAInsertPts.push_back(nullptr);
426 }
427
428 PredIteratorCache PIC;
429
430 bool Promoted = false;
431
432 // Build an AST using MSSA.
433 if (!CurAST.get())
434 CurAST = collectAliasInfoForLoopWithMSSA(L, AA, MSSAU.get());
435
436 // Loop over all of the alias sets in the tracker object.
437 for (AliasSet &AS : *CurAST) {
438 // We can promote this alias set if it has a store, if it is a "Must"
439 // alias set, if the pointer is loop invariant, and if we are not
440 // eliminating any volatile loads or stores.
441 if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() ||
442 !L->isLoopInvariant(AS.begin()->getValue()))
443 continue;
444
445 assert(((!AS.empty() && "Must alias set should have at least one pointer element in it!"
) ? static_cast<void> (0) : __assert_fail ("!AS.empty() && \"Must alias set should have at least one pointer element in it!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 447, __PRETTY_FUNCTION__))
446 !AS.empty() &&((!AS.empty() && "Must alias set should have at least one pointer element in it!"
) ? static_cast<void> (0) : __assert_fail ("!AS.empty() && \"Must alias set should have at least one pointer element in it!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 447, __PRETTY_FUNCTION__))
447 "Must alias set should have at least one pointer element in it!")((!AS.empty() && "Must alias set should have at least one pointer element in it!"
) ? static_cast<void> (0) : __assert_fail ("!AS.empty() && \"Must alias set should have at least one pointer element in it!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 447, __PRETTY_FUNCTION__))
;
448
449 SmallSetVector<Value *, 8> PointerMustAliases;
450 for (const auto &ASI : AS)
451 PointerMustAliases.insert(ASI.getValue());
452
453 Promoted |= promoteLoopAccessesToScalars(
454 PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, LI,
455 DT, TLI, L, CurAST.get(), MSSAU.get(), &SafetyInfo, ORE);
456 }
457
458 // Once we have promoted values across the loop body we have to
459 // recursively reform LCSSA as any nested loop may now have values defined
460 // within the loop used in the outer loop.
461 // FIXME: This is really heavy handed. It would be a bit better to use an
462 // SSAUpdater strategy during promotion that was LCSSA aware and reformed
463 // it as it went.
464 if (Promoted)
465 formLCSSARecursively(*L, *DT, LI, SE);
466
467 Changed |= Promoted;
468 }
469 }
470
471 // Check that neither this loop nor its parent have had LCSSA broken. LICM is
472 // specifically moving instructions across the loop boundary and so it is
473 // especially in need of sanity checking here.
474 assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!")((L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!"
) ? static_cast<void> (0) : __assert_fail ("L->isLCSSAForm(*DT) && \"Loop not left in LCSSA form after LICM!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 474, __PRETTY_FUNCTION__))
;
475 assert((!L->getParentLoop() || L->getParentLoop()->isLCSSAForm(*DT)) &&(((!L->getParentLoop() || L->getParentLoop()->isLCSSAForm
(*DT)) && "Parent loop not left in LCSSA form after LICM!"
) ? static_cast<void> (0) : __assert_fail ("(!L->getParentLoop() || L->getParentLoop()->isLCSSAForm(*DT)) && \"Parent loop not left in LCSSA form after LICM!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 476, __PRETTY_FUNCTION__))
476 "Parent loop not left in LCSSA form after LICM!")(((!L->getParentLoop() || L->getParentLoop()->isLCSSAForm
(*DT)) && "Parent loop not left in LCSSA form after LICM!"
) ? static_cast<void> (0) : __assert_fail ("(!L->getParentLoop() || L->getParentLoop()->isLCSSAForm(*DT)) && \"Parent loop not left in LCSSA form after LICM!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 476, __PRETTY_FUNCTION__))
;
477
478 // If this loop is nested inside of another one, save the alias information
479 // for when we process the outer loop.
480 if (!MSSAU.get() && CurAST.get() && L->getParentLoop() && !DeleteAST)
481 LoopToAliasSetMap[L] = std::move(CurAST);
482
483 if (MSSAU.get() && VerifyMemorySSA)
484 MSSAU->getMemorySSA()->verifyMemorySSA();
485
486 if (Changed && SE)
487 SE->forgetLoopDispositions(L);
488 return Changed;
489}
490
491/// Walk the specified region of the CFG (defined by all blocks dominated by
492/// the specified block, and that are in the current loop) in reverse depth
493/// first order w.r.t the DominatorTree. This allows us to visit uses before
494/// definitions, allowing us to sink a loop body in one pass without iteration.
495///
496bool llvm::sinkRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI,
497 DominatorTree *DT, TargetLibraryInfo *TLI,
498 TargetTransformInfo *TTI, Loop *CurLoop,
499 AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU,
500 ICFLoopSafetyInfo *SafetyInfo,
501 SinkAndHoistLICMFlags &Flags,
502 OptimizationRemarkEmitter *ORE) {
503
504 // Verify inputs.
505 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&((N != nullptr && AA != nullptr && LI != nullptr
&& DT != nullptr && CurLoop != nullptr &&
SafetyInfo != nullptr && "Unexpected input to sinkRegion."
) ? static_cast<void> (0) : __assert_fail ("N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && \"Unexpected input to sinkRegion.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 507, __PRETTY_FUNCTION__))
506 CurLoop != nullptr && SafetyInfo != nullptr &&((N != nullptr && AA != nullptr && LI != nullptr
&& DT != nullptr && CurLoop != nullptr &&
SafetyInfo != nullptr && "Unexpected input to sinkRegion."
) ? static_cast<void> (0) : __assert_fail ("N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && \"Unexpected input to sinkRegion.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 507, __PRETTY_FUNCTION__))
507 "Unexpected input to sinkRegion.")((N != nullptr && AA != nullptr && LI != nullptr
&& DT != nullptr && CurLoop != nullptr &&
SafetyInfo != nullptr && "Unexpected input to sinkRegion."
) ? static_cast<void> (0) : __assert_fail ("N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && \"Unexpected input to sinkRegion.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 507, __PRETTY_FUNCTION__))
;
508 assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&((((CurAST != nullptr) ^ (MSSAU != nullptr)) && "Either AliasSetTracker or MemorySSA should be initialized."
) ? static_cast<void> (0) : __assert_fail ("((CurAST != nullptr) ^ (MSSAU != nullptr)) && \"Either AliasSetTracker or MemorySSA should be initialized.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 509, __PRETTY_FUNCTION__))
509 "Either AliasSetTracker or MemorySSA should be initialized.")((((CurAST != nullptr) ^ (MSSAU != nullptr)) && "Either AliasSetTracker or MemorySSA should be initialized."
) ? static_cast<void> (0) : __assert_fail ("((CurAST != nullptr) ^ (MSSAU != nullptr)) && \"Either AliasSetTracker or MemorySSA should be initialized.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 509, __PRETTY_FUNCTION__))
;
510
511 // We want to visit children before parents. We will enque all the parents
512 // before their children in the worklist and process the worklist in reverse
513 // order.
514 SmallVector<DomTreeNode *, 16> Worklist = collectChildrenInLoop(N, CurLoop);
515
516 bool Changed = false;
517 for (DomTreeNode *DTN : reverse(Worklist)) {
518 BasicBlock *BB = DTN->getBlock();
519 // Only need to process the contents of this block if it is not part of a
520 // subloop (which would already have been processed).
521 if (inSubLoop(BB, CurLoop, LI))
522 continue;
523
524 for (BasicBlock::iterator II = BB->end(); II != BB->begin();) {
525 Instruction &I = *--II;
526
527 // If the instruction is dead, we would try to sink it because it isn't
528 // used in the loop, instead, just delete it.
529 if (isInstructionTriviallyDead(&I, TLI)) {
530 LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM deleting dead inst: " <<
I << '\n'; } } while (false)
;
531 salvageDebugInfo(I);
532 ++II;
533 eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
534 Changed = true;
535 continue;
536 }
537
538 // Check to see if we can sink this instruction to the exit blocks
539 // of the loop. We can do this if the all users of the instruction are
540 // outside of the loop. In this case, it doesn't even matter if the
541 // operands of the instruction are loop invariant.
542 //
543 bool FreeInLoop = false;
544 if (isNotUsedOrFreeInLoop(I, CurLoop, SafetyInfo, TTI, FreeInLoop) &&
545 canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, MSSAU, true, &Flags,
546 ORE) &&
547 !I.mayHaveSideEffects()) {
548 if (sink(I, LI, DT, CurLoop, SafetyInfo, MSSAU, ORE)) {
549 if (!FreeInLoop) {
550 ++II;
551 eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
552 }
553 Changed = true;
554 }
555 }
556 }
557 }
558 if (MSSAU && VerifyMemorySSA)
559 MSSAU->getMemorySSA()->verifyMemorySSA();
560 return Changed;
561}
562
563namespace {
564// This is a helper class for hoistRegion to make it able to hoist control flow
565// in order to be able to hoist phis. The way this works is that we initially
566// start hoisting to the loop preheader, and when we see a loop invariant branch
567// we make note of this. When we then come to hoist an instruction that's
568// conditional on such a branch we duplicate the branch and the relevant control
569// flow, then hoist the instruction into the block corresponding to its original
570// block in the duplicated control flow.
571class ControlFlowHoister {
572private:
573 // Information about the loop we are hoisting from
574 LoopInfo *LI;
575 DominatorTree *DT;
576 Loop *CurLoop;
577 MemorySSAUpdater *MSSAU;
578
579 // A map of blocks in the loop to the block their instructions will be hoisted
580 // to.
581 DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap;
582
583 // The branches that we can hoist, mapped to the block that marks a
584 // convergence point of their control flow.
585 DenseMap<BranchInst *, BasicBlock *> HoistableBranches;
586
587public:
588 ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop,
589 MemorySSAUpdater *MSSAU)
590 : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {}
591
592 void registerPossiblyHoistableBranch(BranchInst *BI) {
593 // We can only hoist conditional branches with loop invariant operands.
594 if (!ControlFlowHoisting || !BI->isConditional() ||
595 !CurLoop->hasLoopInvariantOperands(BI))
596 return;
597
598 // The branch destinations need to be in the loop, and we don't gain
599 // anything by duplicating conditional branches with duplicate successors,
600 // as it's essentially the same as an unconditional branch.
601 BasicBlock *TrueDest = BI->getSuccessor(0);
602 BasicBlock *FalseDest = BI->getSuccessor(1);
603 if (!CurLoop->contains(TrueDest) || !CurLoop->contains(FalseDest) ||
604 TrueDest == FalseDest)
605 return;
606
607 // We can hoist BI if one branch destination is the successor of the other,
608 // or both have common successor which we check by seeing if the
609 // intersection of their successors is non-empty.
610 // TODO: This could be expanded to allowing branches where both ends
611 // eventually converge to a single block.
612 SmallPtrSet<BasicBlock *, 4> TrueDestSucc, FalseDestSucc;
613 TrueDestSucc.insert(succ_begin(TrueDest), succ_end(TrueDest));
614 FalseDestSucc.insert(succ_begin(FalseDest), succ_end(FalseDest));
615 BasicBlock *CommonSucc = nullptr;
616 if (TrueDestSucc.count(FalseDest)) {
617 CommonSucc = FalseDest;
618 } else if (FalseDestSucc.count(TrueDest)) {
619 CommonSucc = TrueDest;
620 } else {
621 set_intersect(TrueDestSucc, FalseDestSucc);
622 // If there's one common successor use that.
623 if (TrueDestSucc.size() == 1)
624 CommonSucc = *TrueDestSucc.begin();
625 // If there's more than one pick whichever appears first in the block list
626 // (we can't use the value returned by TrueDestSucc.begin() as it's
627 // unpredicatable which element gets returned).
628 else if (!TrueDestSucc.empty()) {
629 Function *F = TrueDest->getParent();
630 auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(&BB); };
631 auto It = std::find_if(F->begin(), F->end(), IsSucc);
632 assert(It != F->end() && "Could not find successor in function")((It != F->end() && "Could not find successor in function"
) ? static_cast<void> (0) : __assert_fail ("It != F->end() && \"Could not find successor in function\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 632, __PRETTY_FUNCTION__))
;
633 CommonSucc = &*It;
634 }
635 }
636 // The common successor has to be dominated by the branch, as otherwise
637 // there will be some other path to the successor that will not be
638 // controlled by this branch so any phi we hoist would be controlled by the
639 // wrong condition. This also takes care of avoiding hoisting of loop back
640 // edges.
641 // TODO: In some cases this could be relaxed if the successor is dominated
642 // by another block that's been hoisted and we can guarantee that the
643 // control flow has been replicated exactly.
644 if (CommonSucc && DT->dominates(BI, CommonSucc))
645 HoistableBranches[BI] = CommonSucc;
646 }
647
648 bool canHoistPHI(PHINode *PN) {
649 // The phi must have loop invariant operands.
650 if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(PN))
651 return false;
652 // We can hoist phis if the block they are in is the target of hoistable
653 // branches which cover all of the predecessors of the block.
654 SmallPtrSet<BasicBlock *, 8> PredecessorBlocks;
655 BasicBlock *BB = PN->getParent();
656 for (BasicBlock *PredBB : predecessors(BB))
657 PredecessorBlocks.insert(PredBB);
658 // If we have less predecessor blocks than predecessors then the phi will
659 // have more than one incoming value for the same block which we can't
660 // handle.
661 // TODO: This could be handled be erasing some of the duplicate incoming
662 // values.
663 if (PredecessorBlocks.size() != pred_size(BB))
664 return false;
665 for (auto &Pair : HoistableBranches) {
666 if (Pair.second == BB) {
667 // Which blocks are predecessors via this branch depends on if the
668 // branch is triangle-like or diamond-like.
669 if (Pair.first->getSuccessor(0) == BB) {
670 PredecessorBlocks.erase(Pair.first->getParent());
671 PredecessorBlocks.erase(Pair.first->getSuccessor(1));
672 } else if (Pair.first->getSuccessor(1) == BB) {
673 PredecessorBlocks.erase(Pair.first->getParent());
674 PredecessorBlocks.erase(Pair.first->getSuccessor(0));
675 } else {
676 PredecessorBlocks.erase(Pair.first->getSuccessor(0));
677 PredecessorBlocks.erase(Pair.first->getSuccessor(1));
678 }
679 }
680 }
681 // PredecessorBlocks will now be empty if for every predecessor of BB we
682 // found a hoistable branch source.
683 return PredecessorBlocks.empty();
684 }
685
686 BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) {
687 if (!ControlFlowHoisting)
688 return CurLoop->getLoopPreheader();
689 // If BB has already been hoisted, return that
690 if (HoistDestinationMap.count(BB))
691 return HoistDestinationMap[BB];
692
693 // Check if this block is conditional based on a pending branch
694 auto HasBBAsSuccessor =
695 [&](DenseMap<BranchInst *, BasicBlock *>::value_type &Pair) {
696 return BB != Pair.second && (Pair.first->getSuccessor(0) == BB ||
697 Pair.first->getSuccessor(1) == BB);
698 };
699 auto It = std::find_if(HoistableBranches.begin(), HoistableBranches.end(),
700 HasBBAsSuccessor);
701
702 // If not involved in a pending branch, hoist to preheader
703 BasicBlock *InitialPreheader = CurLoop->getLoopPreheader();
704 if (It == HoistableBranches.end()) {
705 LLVM_DEBUG(dbgs() << "LICM using " << InitialPreheader->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM using " << InitialPreheader
->getName() << " as hoist destination for " <<
BB->getName() << "\n"; } } while (false)
706 << " as hoist destination for " << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM using " << InitialPreheader
->getName() << " as hoist destination for " <<
BB->getName() << "\n"; } } while (false)
707 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM using " << InitialPreheader
->getName() << " as hoist destination for " <<
BB->getName() << "\n"; } } while (false)
;
708 HoistDestinationMap[BB] = InitialPreheader;
709 return InitialPreheader;
710 }
711 BranchInst *BI = It->first;
712 assert(std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) ==((std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor
) == HoistableBranches.end() && "BB is expected to be the target of at most one branch"
) ? static_cast<void> (0) : __assert_fail ("std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) == HoistableBranches.end() && \"BB is expected to be the target of at most one branch\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 714, __PRETTY_FUNCTION__))
713 HoistableBranches.end() &&((std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor
) == HoistableBranches.end() && "BB is expected to be the target of at most one branch"
) ? static_cast<void> (0) : __assert_fail ("std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) == HoistableBranches.end() && \"BB is expected to be the target of at most one branch\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 714, __PRETTY_FUNCTION__))
714 "BB is expected to be the target of at most one branch")((std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor
) == HoistableBranches.end() && "BB is expected to be the target of at most one branch"
) ? static_cast<void> (0) : __assert_fail ("std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) == HoistableBranches.end() && \"BB is expected to be the target of at most one branch\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 714, __PRETTY_FUNCTION__))
;
715
716 LLVMContext &C = BB->getContext();
717 BasicBlock *TrueDest = BI->getSuccessor(0);
718 BasicBlock *FalseDest = BI->getSuccessor(1);
719 BasicBlock *CommonSucc = HoistableBranches[BI];
720 BasicBlock *HoistTarget = getOrCreateHoistedBlock(BI->getParent());
721
722 // Create hoisted versions of blocks that currently don't have them
723 auto CreateHoistedBlock = [&](BasicBlock *Orig) {
724 if (HoistDestinationMap.count(Orig))
725 return HoistDestinationMap[Orig];
726 BasicBlock *New =
727 BasicBlock::Create(C, Orig->getName() + ".licm", Orig->getParent());
728 HoistDestinationMap[Orig] = New;
729 DT->addNewBlock(New, HoistTarget);
730 if (CurLoop->getParentLoop())
731 CurLoop->getParentLoop()->addBasicBlockToLoop(New, *LI);
732 ++NumCreatedBlocks;
733 LLVM_DEBUG(dbgs() << "LICM created " << New->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM created " << New->
getName() << " as hoist destination for " << Orig
->getName() << "\n"; } } while (false)
734 << " as hoist destination for " << Orig->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM created " << New->
getName() << " as hoist destination for " << Orig
->getName() << "\n"; } } while (false)
735 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM created " << New->
getName() << " as hoist destination for " << Orig
->getName() << "\n"; } } while (false)
;
736 return New;
737 };
738 BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest);
739 BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest);
740 BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc);
741
742 // Link up these blocks with branches.
743 if (!HoistCommonSucc->getTerminator()) {
744 // The new common successor we've generated will branch to whatever that
745 // hoist target branched to.
746 BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor();
747 assert(TargetSucc && "Expected hoist target to have a single successor")((TargetSucc && "Expected hoist target to have a single successor"
) ? static_cast<void> (0) : __assert_fail ("TargetSucc && \"Expected hoist target to have a single successor\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 747, __PRETTY_FUNCTION__))
;
748 HoistCommonSucc->moveBefore(TargetSucc);
749 BranchInst::Create(TargetSucc, HoistCommonSucc);
750 }
751 if (!HoistTrueDest->getTerminator()) {
752 HoistTrueDest->moveBefore(HoistCommonSucc);
753 BranchInst::Create(HoistCommonSucc, HoistTrueDest);
754 }
755 if (!HoistFalseDest->getTerminator()) {
756 HoistFalseDest->moveBefore(HoistCommonSucc);
757 BranchInst::Create(HoistCommonSucc, HoistFalseDest);
758 }
759
760 // If BI is being cloned to what was originally the preheader then
761 // HoistCommonSucc will now be the new preheader.
762 if (HoistTarget == InitialPreheader) {
763 // Phis in the loop header now need to use the new preheader.
764 InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc);
765 if (MSSAU)
766 MSSAU->wireOldPredecessorsToNewImmediatePredecessor(
767 HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget});
768 // The new preheader dominates the loop header.
769 DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc);
770 DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader());
771 DT->changeImmediateDominator(HeaderNode, PreheaderNode);
772 // The preheader hoist destination is now the new preheader, with the
773 // exception of the hoist destination of this branch.
774 for (auto &Pair : HoistDestinationMap)
775 if (Pair.second == InitialPreheader && Pair.first != BI->getParent())
776 Pair.second = HoistCommonSucc;
777 }
778
779 // Now finally clone BI.
780 ReplaceInstWithInst(
781 HoistTarget->getTerminator(),
782 BranchInst::Create(HoistTrueDest, HoistFalseDest, BI->getCondition()));
783 ++NumClonedBranches;
784
785 assert(CurLoop->getLoopPreheader() &&((CurLoop->getLoopPreheader() && "Hoisting blocks should not have destroyed preheader"
) ? static_cast<void> (0) : __assert_fail ("CurLoop->getLoopPreheader() && \"Hoisting blocks should not have destroyed preheader\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 786, __PRETTY_FUNCTION__))
786 "Hoisting blocks should not have destroyed preheader")((CurLoop->getLoopPreheader() && "Hoisting blocks should not have destroyed preheader"
) ? static_cast<void> (0) : __assert_fail ("CurLoop->getLoopPreheader() && \"Hoisting blocks should not have destroyed preheader\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 786, __PRETTY_FUNCTION__))
;
787 return HoistDestinationMap[BB];
788 }
789};
790} // namespace
791
792
793/// Return true if we know how to rewrite all uses of the given alloca after
794/// hoisting it out of the loop. The main concerns are a) potential captures
795/// and b) invariant.start markers which don't capture, but are no longer
796/// valid w/o a corresponding invariant.end.
797static bool canRewriteUsesOfAlloca(AllocaInst &AI) {
798 // TODO: This looks a lot like capture tracking, but we need to remove any
799 // invariant starts if we extend the lifetime of the alloca by hoisting it.
800 // We should probably refactor capture tracking into a form which allows us
801 // to reuse the relevant bits and remove the duplicated logic here.
802
803 SmallVector<Use *, 16> Worklist;
804 for (Use &U : AI.uses())
805 Worklist.push_back(&U);
806
807 unsigned NumUsesExplored = 0;
808 while (!Worklist.empty()) {
809 Use *U = Worklist.pop_back_val();
810 Instruction *I = cast<Instruction>(U->getUser());
811 NumUsesExplored++;
812 if (NumUsesExplored > DefaultMaxUsesToExplore)
813 return false;
814 // Non capturing, terminating uses
815 if (isa<LoadInst>(I) ||
816 (isa<StoreInst>(I) && U->getOperandNo() == 1))
817 continue;
818 // Non capturing, non-terminating
819 if (!isa<BitCastInst>(I) && !isa<GetElementPtrInst>(I))
820 return false;
821 for (Use &U : I->uses())
822 Worklist.push_back(&U);
823 }
824 return true;
825}
826
827/// Walk the specified region of the CFG (defined by all blocks dominated by
828/// the specified block, and that are in the current loop) in depth first
829/// order w.r.t the DominatorTree. This allows us to visit definitions before
830/// uses, allowing us to hoist a loop body in one pass without iteration.
831///
832bool llvm::hoistRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI,
833 DominatorTree *DT, TargetLibraryInfo *TLI, Loop *CurLoop,
834 AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU,
835 ScalarEvolution *SE, ICFLoopSafetyInfo *SafetyInfo,
836 SinkAndHoistLICMFlags &Flags,
837 OptimizationRemarkEmitter *ORE) {
838 // Verify inputs.
839 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&((N != nullptr && AA != nullptr && LI != nullptr
&& DT != nullptr && CurLoop != nullptr &&
SafetyInfo != nullptr && "Unexpected input to hoistRegion."
) ? static_cast<void> (0) : __assert_fail ("N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && \"Unexpected input to hoistRegion.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 841, __PRETTY_FUNCTION__))
840 CurLoop != nullptr && SafetyInfo != nullptr &&((N != nullptr && AA != nullptr && LI != nullptr
&& DT != nullptr && CurLoop != nullptr &&
SafetyInfo != nullptr && "Unexpected input to hoistRegion."
) ? static_cast<void> (0) : __assert_fail ("N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && \"Unexpected input to hoistRegion.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 841, __PRETTY_FUNCTION__))
841 "Unexpected input to hoistRegion.")((N != nullptr && AA != nullptr && LI != nullptr
&& DT != nullptr && CurLoop != nullptr &&
SafetyInfo != nullptr && "Unexpected input to hoistRegion."
) ? static_cast<void> (0) : __assert_fail ("N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && \"Unexpected input to hoistRegion.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 841, __PRETTY_FUNCTION__))
;
842 assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&((((CurAST != nullptr) ^ (MSSAU != nullptr)) && "Either AliasSetTracker or MemorySSA should be initialized."
) ? static_cast<void> (0) : __assert_fail ("((CurAST != nullptr) ^ (MSSAU != nullptr)) && \"Either AliasSetTracker or MemorySSA should be initialized.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 843, __PRETTY_FUNCTION__))
843 "Either AliasSetTracker or MemorySSA should be initialized.")((((CurAST != nullptr) ^ (MSSAU != nullptr)) && "Either AliasSetTracker or MemorySSA should be initialized."
) ? static_cast<void> (0) : __assert_fail ("((CurAST != nullptr) ^ (MSSAU != nullptr)) && \"Either AliasSetTracker or MemorySSA should be initialized.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 843, __PRETTY_FUNCTION__))
;
844
845 ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU);
846
847 // Keep track of instructions that have been hoisted, as they may need to be
848 // re-hoisted if they end up not dominating all of their uses.
849 SmallVector<Instruction *, 16> HoistedInstructions;
850
851 // For PHI hoisting to work we need to hoist blocks before their successors.
852 // We can do this by iterating through the blocks in the loop in reverse
853 // post-order.
854 LoopBlocksRPO Worklist(CurLoop);
855 Worklist.perform(LI);
856 bool Changed = false;
857 for (BasicBlock *BB : Worklist) {
858 // Only need to process the contents of this block if it is not part of a
859 // subloop (which would already have been processed).
860 if (inSubLoop(BB, CurLoop, LI))
861 continue;
862
863 for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;) {
864 Instruction &I = *II++;
865 // Try constant folding this instruction. If all the operands are
866 // constants, it is technically hoistable, but it would be better to
867 // just fold it.
868 if (Constant *C = ConstantFoldInstruction(
869 &I, I.getModule()->getDataLayout(), TLI)) {
870 LLVM_DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *Cdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM folding inst: " << I <<
" --> " << *C << '\n'; } } while (false)
871 << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM folding inst: " << I <<
" --> " << *C << '\n'; } } while (false)
;
872 if (CurAST)
873 CurAST->copyValue(&I, C);
874 // FIXME MSSA: Such replacements may make accesses unoptimized (D51960).
875 I.replaceAllUsesWith(C);
876 if (isInstructionTriviallyDead(&I, TLI))
877 eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
878 Changed = true;
879 continue;
880 }
881
882 // Try hoisting the instruction out to the preheader. We can only do
883 // this if all of the operands of the instruction are loop invariant and
884 // if it is safe to hoist the instruction.
885 // TODO: It may be safe to hoist if we are hoisting to a conditional block
886 // and we have accurately duplicated the control flow from the loop header
887 // to that block.
888 if (CurLoop->hasLoopInvariantOperands(&I) &&
889 canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, MSSAU, true, &Flags,
890 ORE) &&
891 isSafeToExecuteUnconditionally(
892 I, DT, CurLoop, SafetyInfo, ORE,
893 CurLoop->getLoopPreheader()->getTerminator())) {
894 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
895 MSSAU, SE, ORE);
896 HoistedInstructions.push_back(&I);
897 Changed = true;
898 continue;
899 }
900
901 // Attempt to remove floating point division out of the loop by
902 // converting it to a reciprocal multiplication.
903 if (I.getOpcode() == Instruction::FDiv &&
904 CurLoop->isLoopInvariant(I.getOperand(1)) &&
905 I.hasAllowReciprocal()) {
906 auto Divisor = I.getOperand(1);
907 auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0);
908 auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor);
909 ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags());
910 SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent());
911 ReciprocalDivisor->insertBefore(&I);
912
913 auto Product =
914 BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor);
915 Product->setFastMathFlags(I.getFastMathFlags());
916 SafetyInfo->insertInstructionTo(Product, I.getParent());
917 Product->insertAfter(&I);
918 I.replaceAllUsesWith(Product);
919 eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
920
921 hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB),
922 SafetyInfo, MSSAU, SE, ORE);
923 HoistedInstructions.push_back(ReciprocalDivisor);
924 Changed = true;
925 continue;
926 }
927
928 auto IsInvariantStart = [&](Instruction &I) {
929 using namespace PatternMatch;
930 return I.use_empty() &&
931 match(&I, m_Intrinsic<Intrinsic::invariant_start>());
932 };
933 auto MustExecuteWithoutWritesBefore = [&](Instruction &I) {
934 return SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) &&
935 SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop);
936 };
937 if ((IsInvariantStart(I) || isGuard(&I)) &&
938 CurLoop->hasLoopInvariantOperands(&I) &&
939 MustExecuteWithoutWritesBefore(I)) {
940 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
941 MSSAU, SE, ORE);
942 HoistedInstructions.push_back(&I);
943 Changed = true;
944 continue;
945 }
946
947 if (isa<AllocaInst>(&I) &&
948 SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) &&
949 canRewriteUsesOfAlloca(cast<AllocaInst>(I))) {
950 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
951 MSSAU, SE, ORE);
952 HoistedInstructions.push_back(&I);
953 Changed = true;
954 continue;
955 }
956
957 if (PHINode *PN = dyn_cast<PHINode>(&I)) {
958 if (CFH.canHoistPHI(PN)) {
959 // Redirect incoming blocks first to ensure that we create hoisted
960 // versions of those blocks before we hoist the phi.
961 for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i)
962 PN->setIncomingBlock(
963 i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i)));
964 hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
965 MSSAU, SE, ORE);
966 assert(DT->dominates(PN, BB) && "Conditional PHIs not expected")((DT->dominates(PN, BB) && "Conditional PHIs not expected"
) ? static_cast<void> (0) : __assert_fail ("DT->dominates(PN, BB) && \"Conditional PHIs not expected\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 966, __PRETTY_FUNCTION__))
;
967 Changed = true;
968 continue;
969 }
970 }
971
972 // Remember possibly hoistable branches so we can actually hoist them
973 // later if needed.
974 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
975 CFH.registerPossiblyHoistableBranch(BI);
976 }
977 }
978
979 // If we hoisted instructions to a conditional block they may not dominate
980 // their uses that weren't hoisted (such as phis where some operands are not
981 // loop invariant). If so make them unconditional by moving them to their
982 // immediate dominator. We iterate through the instructions in reverse order
983 // which ensures that when we rehoist an instruction we rehoist its operands,
984 // and also keep track of where in the block we are rehoisting to to make sure
985 // that we rehoist instructions before the instructions that use them.
986 Instruction *HoistPoint = nullptr;
987 if (ControlFlowHoisting) {
988 for (Instruction *I : reverse(HoistedInstructions)) {
989 if (!llvm::all_of(I->uses(),
990 [&](Use &U) { return DT->dominates(I, U); })) {
991 BasicBlock *Dominator =
992 DT->getNode(I->getParent())->getIDom()->getBlock();
993 if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) {
994 if (HoistPoint)
995 assert(DT->dominates(Dominator, HoistPoint->getParent()) &&((DT->dominates(Dominator, HoistPoint->getParent()) &&
"New hoist point expected to dominate old hoist point") ? static_cast
<void> (0) : __assert_fail ("DT->dominates(Dominator, HoistPoint->getParent()) && \"New hoist point expected to dominate old hoist point\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 996, __PRETTY_FUNCTION__))
996 "New hoist point expected to dominate old hoist point")((DT->dominates(Dominator, HoistPoint->getParent()) &&
"New hoist point expected to dominate old hoist point") ? static_cast
<void> (0) : __assert_fail ("DT->dominates(Dominator, HoistPoint->getParent()) && \"New hoist point expected to dominate old hoist point\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 996, __PRETTY_FUNCTION__))
;
997 HoistPoint = Dominator->getTerminator();
998 }
999 LLVM_DEBUG(dbgs() << "LICM rehoisting to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM rehoisting to " << HoistPoint
->getParent()->getName() << ": " << *I <<
"\n"; } } while (false)
1000 << HoistPoint->getParent()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM rehoisting to " << HoistPoint
->getParent()->getName() << ": " << *I <<
"\n"; } } while (false)
1001 << ": " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM rehoisting to " << HoistPoint
->getParent()->getName() << ": " << *I <<
"\n"; } } while (false)
;
1002 moveInstructionBefore(*I, *HoistPoint, *SafetyInfo, MSSAU, SE);
1003 HoistPoint = I;
1004 Changed = true;
1005 }
1006 }
1007 }
1008 if (MSSAU && VerifyMemorySSA)
1009 MSSAU->getMemorySSA()->verifyMemorySSA();
1010
1011 // Now that we've finished hoisting make sure that LI and DT are still
1012 // valid.
1013#ifdef EXPENSIVE_CHECKS
1014 if (Changed) {
1015 assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&((DT->verify(DominatorTree::VerificationLevel::Fast) &&
"Dominator tree verification failed") ? static_cast<void>
(0) : __assert_fail ("DT->verify(DominatorTree::VerificationLevel::Fast) && \"Dominator tree verification failed\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 1016, __PRETTY_FUNCTION__))
1016 "Dominator tree verification failed")((DT->verify(DominatorTree::VerificationLevel::Fast) &&
"Dominator tree verification failed") ? static_cast<void>
(0) : __assert_fail ("DT->verify(DominatorTree::VerificationLevel::Fast) && \"Dominator tree verification failed\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 1016, __PRETTY_FUNCTION__))
;
1017 LI->verify(*DT);
1018 }
1019#endif
1020
1021 return Changed;
1022}
1023
1024// Return true if LI is invariant within scope of the loop. LI is invariant if
1025// CurLoop is dominated by an invariant.start representing the same memory
1026// location and size as the memory location LI loads from, and also the
1027// invariant.start has no uses.
1028static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT,
1029 Loop *CurLoop) {
1030 Value *Addr = LI->getOperand(0);
1031 const DataLayout &DL = LI->getModule()->getDataLayout();
1032 const uint32_t LocSizeInBits = DL.getTypeSizeInBits(LI->getType());
1033
1034 // if the type is i8 addrspace(x)*, we know this is the type of
1035 // llvm.invariant.start operand
1036 auto *PtrInt8Ty = PointerType::get(Type::getInt8Ty(LI->getContext()),
1037 LI->getPointerAddressSpace());
1038 unsigned BitcastsVisited = 0;
1039 // Look through bitcasts until we reach the i8* type (this is invariant.start
1040 // operand type).
1041 while (Addr->getType() != PtrInt8Ty) {
1042 auto *BC = dyn_cast<BitCastInst>(Addr);
1043 // Avoid traversing high number of bitcast uses.
1044 if (++BitcastsVisited > MaxNumUsesTraversed || !BC)
1045 return false;
1046 Addr = BC->getOperand(0);
1047 }
1048
1049 unsigned UsesVisited = 0;
1050 // Traverse all uses of the load operand value, to see if invariant.start is
1051 // one of the uses, and whether it dominates the load instruction.
1052 for (auto *U : Addr->users()) {
1053 // Avoid traversing for Load operand with high number of users.
1054 if (++UsesVisited > MaxNumUsesTraversed)
1055 return false;
1056 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
1057 // If there are escaping uses of invariant.start instruction, the load maybe
1058 // non-invariant.
1059 if (!II || II->getIntrinsicID() != Intrinsic::invariant_start ||
1060 !II->use_empty())
1061 continue;
1062 unsigned InvariantSizeInBits =
1063 cast<ConstantInt>(II->getArgOperand(0))->getSExtValue() * 8;
1064 // Confirm the invariant.start location size contains the load operand size
1065 // in bits. Also, the invariant.start should dominate the load, and we
1066 // should not hoist the load out of a loop that contains this dominating
1067 // invariant.start.
1068 if (LocSizeInBits <= InvariantSizeInBits &&
1069 DT->properlyDominates(II->getParent(), CurLoop->getHeader()))
1070 return true;
1071 }
1072
1073 return false;
1074}
1075
1076namespace {
1077/// Return true if-and-only-if we know how to (mechanically) both hoist and
1078/// sink a given instruction out of a loop. Does not address legality
1079/// concerns such as aliasing or speculation safety.
1080bool isHoistableAndSinkableInst(Instruction &I) {
1081 // Only these instructions are hoistable/sinkable.
1082 return (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
2
Assuming 'I' is not a 'LoadInst'
3
Assuming 'I' is not a 'StoreInst'
4
Assuming 'I' is not a 'CallInst'
17
Returning the value 1, which participates in a condition later
1083 isa<FenceInst>(I) || isa<CastInst>(I) ||
5
Assuming 'I' is not a 'FenceInst'
6
Assuming 'I' is not a 'CastInst'
1084 isa<UnaryOperator>(I) || isa<BinaryOperator>(I) ||
7
Assuming 'I' is not a 'UnaryOperator'
8
Assuming 'I' is not a 'BinaryOperator'
1085 isa<SelectInst>(I) || isa<GetElementPtrInst>(I) || isa<CmpInst>(I) ||
9
Assuming 'I' is not a 'SelectInst'
10
Assuming 'I' is not a 'GetElementPtrInst'
11
Assuming 'I' is not a 'CmpInst'
1086 isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
12
Assuming 'I' is not a 'InsertElementInst'
13
Assuming 'I' is not a 'ExtractElementInst'
1087 isa<ShuffleVectorInst>(I) || isa<ExtractValueInst>(I) ||
14
Assuming 'I' is not a 'ShuffleVectorInst'
15
Assuming 'I' is not a 'ExtractValueInst'
1088 isa<InsertValueInst>(I));
16
Assuming 'I' is a 'InsertValueInst'
1089}
1090/// Return true if all of the alias sets within this AST are known not to
1091/// contain a Mod, or if MSSA knows thare are no MemoryDefs in the loop.
1092bool isReadOnly(AliasSetTracker *CurAST, const MemorySSAUpdater *MSSAU,
1093 const Loop *L) {
1094 if (CurAST) {
1095 for (AliasSet &AS : *CurAST) {
1096 if (!AS.isForwardingAliasSet() && AS.isMod()) {
1097 return false;
1098 }
1099 }
1100 return true;
1101 } else { /*MSSAU*/
1102 for (auto *BB : L->getBlocks())
1103 if (MSSAU->getMemorySSA()->getBlockDefs(BB))
1104 return false;
1105 return true;
1106 }
1107}
1108
1109/// Return true if I is the only Instruction with a MemoryAccess in L.
1110bool isOnlyMemoryAccess(const Instruction *I, const Loop *L,
1111 const MemorySSAUpdater *MSSAU) {
1112 for (auto *BB : L->getBlocks())
1113 if (auto *Accs = MSSAU->getMemorySSA()->getBlockAccesses(BB)) {
1114 int NotAPhi = 0;
1115 for (const auto &Acc : *Accs) {
1116 if (isa<MemoryPhi>(&Acc))
1117 continue;
1118 const auto *MUD = cast<MemoryUseOrDef>(&Acc);
1119 if (MUD->getMemoryInst() != I || NotAPhi++ == 1)
1120 return false;
1121 }
1122 }
1123 return true;
1124}
1125}
1126
1127bool llvm::canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT,
1128 Loop *CurLoop, AliasSetTracker *CurAST,
1129 MemorySSAUpdater *MSSAU,
1130 bool TargetExecutesOncePerLoop,
1131 SinkAndHoistLICMFlags *Flags,
1132 OptimizationRemarkEmitter *ORE) {
1133 // If we don't understand the instruction, bail early.
1134 if (!isHoistableAndSinkableInst(I))
1
Calling 'isHoistableAndSinkableInst'
18
Returning from 'isHoistableAndSinkableInst'
19
Taking false branch
1135 return false;
1136
1137 MemorySSA *MSSA = MSSAU ? MSSAU->getMemorySSA() : nullptr;
20
Assuming 'MSSAU' is null
21
'?' condition is false
22
'MSSA' initialized to a null pointer value
1138 if (MSSA
22.1
'MSSA' is null
22.1
'MSSA' is null
22.1
'MSSA' is null
22.1
'MSSA' is null
)
23
Taking false branch
1139 assert(Flags != nullptr && "Flags cannot be null.")((Flags != nullptr && "Flags cannot be null.") ? static_cast
<void> (0) : __assert_fail ("Flags != nullptr && \"Flags cannot be null.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 1139, __PRETTY_FUNCTION__))
;
1140
1141 // Loads have extra constraints we have to verify before we can hoist them.
1142 if (LoadInst *LI
24.1
'LI' is null
24.1
'LI' is null
24.1
'LI' is null
24.1
'LI' is null
= dyn_cast<LoadInst>(&I)) {
24
Assuming the object is not a 'LoadInst'
25
Taking false branch
1143 if (!LI->isUnordered())
1144 return false; // Don't sink/hoist volatile or ordered atomic loads!
1145
1146 // Loads from constant memory are always safe to move, even if they end up
1147 // in the same alias set as something that ends up being modified.
1148 if (AA->pointsToConstantMemory(LI->getOperand(0)))
1149 return true;
1150 if (LI->hasMetadata(LLVMContext::MD_invariant_load))
1151 return true;
1152
1153 if (LI->isAtomic() && !TargetExecutesOncePerLoop)
1154 return false; // Don't risk duplicating unordered loads
1155
1156 // This checks for an invariant.start dominating the load.
1157 if (isLoadInvariantInLoop(LI, DT, CurLoop))
1158 return true;
1159
1160 bool Invalidated;
1161 if (CurAST)
1162 Invalidated = pointerInvalidatedByLoop(MemoryLocation::get(LI), CurAST,
1163 CurLoop, AA);
1164 else
1165 Invalidated = pointerInvalidatedByLoopWithMSSA(
1166 MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(LI)), CurLoop, *Flags);
1167 // Check loop-invariant address because this may also be a sinkable load
1168 // whose address is not necessarily loop-invariant.
1169 if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1170 ORE->emit([&]() {
1171 return OptimizationRemarkMissed(
1172 DEBUG_TYPE"licm", "LoadWithLoopInvariantAddressInvalidated", LI)
1173 << "failed to move load with loop-invariant address "
1174 "because the loop may invalidate its value";
1175 });
1176
1177 return !Invalidated;
1178 } else if (CallInst *CI
26.1
'CI' is non-null
26.1
'CI' is non-null
26.1
'CI' is non-null
26.1
'CI' is non-null
= dyn_cast<CallInst>(&I)) {
26
Assuming the object is a 'CallInst'
27
Taking true branch
1179 // Don't sink or hoist dbg info; it's legal, but not useful.
1180 if (isa<DbgInfoIntrinsic>(I))
28
Assuming 'I' is not a 'DbgInfoIntrinsic'
29
Taking false branch
1181 return false;
1182
1183 // Don't sink calls which can throw.
1184 if (CI->mayThrow())
30
Assuming the condition is false
31
Taking false branch
1185 return false;
1186
1187 using namespace PatternMatch;
1188 if (match(CI, m_Intrinsic<Intrinsic::assume>()))
32
Calling 'match<llvm::CallInst, llvm::PatternMatch::IntrinsicID_match>'
39
Returning from 'match<llvm::CallInst, llvm::PatternMatch::IntrinsicID_match>'
40
Taking false branch
1189 // Assumes don't actually alias anything or throw
1190 return true;
1191
1192 if (match(CI, m_Intrinsic<Intrinsic::experimental_widenable_condition>()))
41
Calling 'match<llvm::CallInst, llvm::PatternMatch::IntrinsicID_match>'
48
Returning from 'match<llvm::CallInst, llvm::PatternMatch::IntrinsicID_match>'
49
Taking false branch
1193 // Widenable conditions don't actually alias anything or throw
1194 return true;
1195
1196 // Handle simple cases by querying alias analysis.
1197 FunctionModRefBehavior Behavior = AA->getModRefBehavior(CI);
1198 if (Behavior == FMRB_DoesNotAccessMemory)
50
Assuming 'Behavior' is not equal to FMRB_DoesNotAccessMemory
51
Taking false branch
1199 return true;
1200 if (AliasAnalysis::onlyReadsMemory(Behavior)) {
52
Calling 'AAResults::onlyReadsMemory'
55
Returning from 'AAResults::onlyReadsMemory'
56
Taking true branch
1201 // A readonly argmemonly function only reads from memory pointed to by
1202 // it's arguments with arbitrary offsets. If we can prove there are no
1203 // writes to this memory in the loop, we can hoist or sink.
1204 if (AliasAnalysis::onlyAccessesArgPointees(Behavior)) {
57
Calling 'AAResults::onlyAccessesArgPointees'
60
Returning from 'AAResults::onlyAccessesArgPointees'
61
Taking true branch
1205 // TODO: expand to writeable arguments
1206 for (Value *Op : CI->arg_operands())
62
Assuming '__begin5' is not equal to '__end5'
1207 if (Op->getType()->isPointerTy()) {
63
Calling 'Type::isPointerTy'
66
Returning from 'Type::isPointerTy'
67
Taking true branch
1208 bool Invalidated;
1209 if (CurAST)
68
Assuming 'CurAST' is null
69
Taking false branch
1210 Invalidated = pointerInvalidatedByLoop(
1211 MemoryLocation(Op, LocationSize::unknown(), AAMDNodes()),
1212 CurAST, CurLoop, AA);
1213 else
1214 Invalidated = pointerInvalidatedByLoopWithMSSA(
1215 MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(CI)), CurLoop,
70
Called C++ object pointer is null
1216 *Flags);
1217 if (Invalidated)
1218 return false;
1219 }
1220 return true;
1221 }
1222
1223 // If this call only reads from memory and there are no writes to memory
1224 // in the loop, we can hoist or sink the call as appropriate.
1225 if (isReadOnly(CurAST, MSSAU, CurLoop))
1226 return true;
1227 }
1228
1229 // FIXME: This should use mod/ref information to see if we can hoist or
1230 // sink the call.
1231
1232 return false;
1233 } else if (auto *FI = dyn_cast<FenceInst>(&I)) {
1234 // Fences alias (most) everything to provide ordering. For the moment,
1235 // just give up if there are any other memory operations in the loop.
1236 if (CurAST) {
1237 auto Begin = CurAST->begin();
1238 assert(Begin != CurAST->end() && "must contain FI")((Begin != CurAST->end() && "must contain FI") ? static_cast
<void> (0) : __assert_fail ("Begin != CurAST->end() && \"must contain FI\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 1238, __PRETTY_FUNCTION__))
;
1239 if (std::next(Begin) != CurAST->end())
1240 // constant memory for instance, TODO: handle better
1241 return false;
1242 auto *UniqueI = Begin->getUniqueInstruction();
1243 if (!UniqueI)
1244 // other memory op, give up
1245 return false;
1246 (void)FI; // suppress unused variable warning
1247 assert(UniqueI == FI && "AS must contain FI")((UniqueI == FI && "AS must contain FI") ? static_cast
<void> (0) : __assert_fail ("UniqueI == FI && \"AS must contain FI\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 1247, __PRETTY_FUNCTION__))
;
1248 return true;
1249 } else // MSSAU
1250 return isOnlyMemoryAccess(FI, CurLoop, MSSAU);
1251 } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
1252 if (!SI->isUnordered())
1253 return false; // Don't sink/hoist volatile or ordered atomic store!
1254
1255 // We can only hoist a store that we can prove writes a value which is not
1256 // read or overwritten within the loop. For those cases, we fallback to
1257 // load store promotion instead. TODO: We can extend this to cases where
1258 // there is exactly one write to the location and that write dominates an
1259 // arbitrary number of reads in the loop.
1260 if (CurAST) {
1261 auto &AS = CurAST->getAliasSetFor(MemoryLocation::get(SI));
1262
1263 if (AS.isRef() || !AS.isMustAlias())
1264 // Quick exit test, handled by the full path below as well.
1265 return false;
1266 auto *UniqueI = AS.getUniqueInstruction();
1267 if (!UniqueI)
1268 // other memory op, give up
1269 return false;
1270 assert(UniqueI == SI && "AS must contain SI")((UniqueI == SI && "AS must contain SI") ? static_cast
<void> (0) : __assert_fail ("UniqueI == SI && \"AS must contain SI\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 1270, __PRETTY_FUNCTION__))
;
1271 return true;
1272 } else { // MSSAU
1273 if (isOnlyMemoryAccess(SI, CurLoop, MSSAU))
1274 return true;
1275 // If there are more accesses than the Promotion cap, give up, we're not
1276 // walking a list that long.
1277 if (Flags->NoOfMemAccTooLarge)
1278 return false;
1279 // Check store only if there's still "quota" to check clobber.
1280 if (Flags->LicmMssaOptCounter >= Flags->LicmMssaOptCap)
1281 return false;
1282 // If there are interfering Uses (i.e. their defining access is in the
1283 // loop), or ordered loads (stored as Defs!), don't move this store.
1284 // Could do better here, but this is conservatively correct.
1285 // TODO: Cache set of Uses on the first walk in runOnLoop, update when
1286 // moving accesses. Can also extend to dominating uses.
1287 auto *SIMD = MSSA->getMemoryAccess(SI);
1288 for (auto *BB : CurLoop->getBlocks())
1289 if (auto *Accesses = MSSA->getBlockAccesses(BB)) {
1290 for (const auto &MA : *Accesses)
1291 if (const auto *MU = dyn_cast<MemoryUse>(&MA)) {
1292 auto *MD = MU->getDefiningAccess();
1293 if (!MSSA->isLiveOnEntryDef(MD) &&
1294 CurLoop->contains(MD->getBlock()))
1295 return false;
1296 // Disable hoisting past potentially interfering loads. Optimized
1297 // Uses may point to an access outside the loop, as getClobbering
1298 // checks the previous iteration when walking the backedge.
1299 // FIXME: More precise: no Uses that alias SI.
1300 if (!Flags->IsSink && !MSSA->dominates(SIMD, MU))
1301 return false;
1302 } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) {
1303 if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) {
1304 (void)LI; // Silence warning.
1305 assert(!LI->isUnordered() && "Expected unordered load")((!LI->isUnordered() && "Expected unordered load")
? static_cast<void> (0) : __assert_fail ("!LI->isUnordered() && \"Expected unordered load\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 1305, __PRETTY_FUNCTION__))
;
1306 return false;
1307 }
1308 // Any call, while it may not be clobbering SI, it may be a use.
1309 if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) {
1310 // Check if the call may read from the memory locattion written
1311 // to by SI. Check CI's attributes and arguments; the number of
1312 // such checks performed is limited above by NoOfMemAccTooLarge.
1313 ModRefInfo MRI = AA->getModRefInfo(CI, MemoryLocation::get(SI));
1314 if (isModOrRefSet(MRI))
1315 return false;
1316 }
1317 }
1318 }
1319
1320 auto *Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(SI);
1321 Flags->LicmMssaOptCounter++;
1322 // If there are no clobbering Defs in the loop, store is safe to hoist.
1323 return MSSA->isLiveOnEntryDef(Source) ||
1324 !CurLoop->contains(Source->getBlock());
1325 }
1326 }
1327
1328 assert(!I.mayReadOrWriteMemory() && "unhandled aliasing")((!I.mayReadOrWriteMemory() && "unhandled aliasing") ?
static_cast<void> (0) : __assert_fail ("!I.mayReadOrWriteMemory() && \"unhandled aliasing\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 1328, __PRETTY_FUNCTION__))
;
1329
1330 // We've established mechanical ability and aliasing, it's up to the caller
1331 // to check fault safety
1332 return true;
1333}
1334
1335/// Returns true if a PHINode is a trivially replaceable with an
1336/// Instruction.
1337/// This is true when all incoming values are that instruction.
1338/// This pattern occurs most often with LCSSA PHI nodes.
1339///
1340static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) {
1341 for (const Value *IncValue : PN.incoming_values())
1342 if (IncValue != &I)
1343 return false;
1344
1345 return true;
1346}
1347
1348/// Return true if the instruction is free in the loop.
1349static bool isFreeInLoop(const Instruction &I, const Loop *CurLoop,
1350 const TargetTransformInfo *TTI) {
1351
1352 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1353 if (TTI->getUserCost(GEP) != TargetTransformInfo::TCC_Free)
1354 return false;
1355 // For a GEP, we cannot simply use getUserCost because currently it
1356 // optimistically assume that a GEP will fold into addressing mode
1357 // regardless of its users.
1358 const BasicBlock *BB = GEP->getParent();
1359 for (const User *U : GEP->users()) {
1360 const Instruction *UI = cast<Instruction>(U);
1361 if (CurLoop->contains(UI) &&
1362 (BB != UI->getParent() ||
1363 (!isa<StoreInst>(UI) && !isa<LoadInst>(UI))))
1364 return false;
1365 }
1366 return true;
1367 } else
1368 return TTI->getUserCost(&I) == TargetTransformInfo::TCC_Free;
1369}
1370
1371/// Return true if the only users of this instruction are outside of
1372/// the loop. If this is true, we can sink the instruction to the exit
1373/// blocks of the loop.
1374///
1375/// We also return true if the instruction could be folded away in lowering.
1376/// (e.g., a GEP can be folded into a load as an addressing mode in the loop).
1377static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop,
1378 const LoopSafetyInfo *SafetyInfo,
1379 TargetTransformInfo *TTI, bool &FreeInLoop) {
1380 const auto &BlockColors = SafetyInfo->getBlockColors();
1381 bool IsFree = isFreeInLoop(I, CurLoop, TTI);
1382 for (const User *U : I.users()) {
1383 const Instruction *UI = cast<Instruction>(U);
1384 if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
1385 const BasicBlock *BB = PN->getParent();
1386 // We cannot sink uses in catchswitches.
1387 if (isa<CatchSwitchInst>(BB->getTerminator()))
1388 return false;
1389
1390 // We need to sink a callsite to a unique funclet. Avoid sinking if the
1391 // phi use is too muddled.
1392 if (isa<CallInst>(I))
1393 if (!BlockColors.empty() &&
1394 BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1)
1395 return false;
1396 }
1397
1398 if (CurLoop->contains(UI)) {
1399 if (IsFree) {
1400 FreeInLoop = true;
1401 continue;
1402 }
1403 return false;
1404 }
1405 }
1406 return true;
1407}
1408
1409static Instruction *CloneInstructionInExitBlock(
1410 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
1411 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU) {
1412 Instruction *New;
1413 if (auto *CI = dyn_cast<CallInst>(&I)) {
1414 const auto &BlockColors = SafetyInfo->getBlockColors();
1415
1416 // Sinking call-sites need to be handled differently from other
1417 // instructions. The cloned call-site needs a funclet bundle operand
1418 // appropriate for its location in the CFG.
1419 SmallVector<OperandBundleDef, 1> OpBundles;
1420 for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles();
1421 BundleIdx != BundleEnd; ++BundleIdx) {
1422 OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx);
1423 if (Bundle.getTagID() == LLVMContext::OB_funclet)
1424 continue;
1425
1426 OpBundles.emplace_back(Bundle);
1427 }
1428
1429 if (!BlockColors.empty()) {
1430 const ColorVector &CV = BlockColors.find(&ExitBlock)->second;
1431 assert(CV.size() == 1 && "non-unique color for exit block!")((CV.size() == 1 && "non-unique color for exit block!"
) ? static_cast<void> (0) : __assert_fail ("CV.size() == 1 && \"non-unique color for exit block!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 1431, __PRETTY_FUNCTION__))
;
1432 BasicBlock *BBColor = CV.front();
1433 Instruction *EHPad = BBColor->getFirstNonPHI();
1434 if (EHPad->isEHPad())
1435 OpBundles.emplace_back("funclet", EHPad);
1436 }
1437
1438 New = CallInst::Create(CI, OpBundles);
1439 } else {
1440 New = I.clone();
1441 }
1442
1443 ExitBlock.getInstList().insert(ExitBlock.getFirstInsertionPt(), New);
1444 if (!I.getName().empty())
1445 New->setName(I.getName() + ".le");
1446
1447 if (MSSAU && MSSAU->getMemorySSA()->getMemoryAccess(&I)) {
1448 // Create a new MemoryAccess and let MemorySSA set its defining access.
1449 MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB(
1450 New, nullptr, New->getParent(), MemorySSA::Beginning);
1451 if (NewMemAcc) {
1452 if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc))
1453 MSSAU->insertDef(MemDef, /*RenameUses=*/true);
1454 else {
1455 auto *MemUse = cast<MemoryUse>(NewMemAcc);
1456 MSSAU->insertUse(MemUse, /*RenameUses=*/true);
1457 }
1458 }
1459 }
1460
1461 // Build LCSSA PHI nodes for any in-loop operands. Note that this is
1462 // particularly cheap because we can rip off the PHI node that we're
1463 // replacing for the number and blocks of the predecessors.
1464 // OPT: If this shows up in a profile, we can instead finish sinking all
1465 // invariant instructions, and then walk their operands to re-establish
1466 // LCSSA. That will eliminate creating PHI nodes just to nuke them when
1467 // sinking bottom-up.
1468 for (User::op_iterator OI = New->op_begin(), OE = New->op_end(); OI != OE;
1469 ++OI)
1470 if (Instruction *OInst = dyn_cast<Instruction>(*OI))
1471 if (Loop *OLoop = LI->getLoopFor(OInst->getParent()))
1472 if (!OLoop->contains(&PN)) {
1473 PHINode *OpPN =
1474 PHINode::Create(OInst->getType(), PN.getNumIncomingValues(),
1475 OInst->getName() + ".lcssa", &ExitBlock.front());
1476 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
1477 OpPN->addIncoming(OInst, PN.getIncomingBlock(i));
1478 *OI = OpPN;
1479 }
1480 return New;
1481}
1482
1483static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
1484 AliasSetTracker *AST, MemorySSAUpdater *MSSAU) {
1485 if (AST)
1486 AST->deleteValue(&I);
1487 if (MSSAU)
1488 MSSAU->removeMemoryAccess(&I);
1489 SafetyInfo.removeInstruction(&I);
1490 I.eraseFromParent();
1491}
1492
1493static void moveInstructionBefore(Instruction &I, Instruction &Dest,
1494 ICFLoopSafetyInfo &SafetyInfo,
1495 MemorySSAUpdater *MSSAU,
1496 ScalarEvolution *SE) {
1497 SafetyInfo.removeInstruction(&I);
1498 SafetyInfo.insertInstructionTo(&I, Dest.getParent());
1499 I.moveBefore(&Dest);
1500 if (MSSAU)
1501 if (MemoryUseOrDef *OldMemAcc = cast_or_null<MemoryUseOrDef>(
1502 MSSAU->getMemorySSA()->getMemoryAccess(&I)))
1503 MSSAU->moveToPlace(OldMemAcc, Dest.getParent(), MemorySSA::End);
1504 if (SE)
1505 SE->forgetValue(&I);
1506}
1507
1508static Instruction *sinkThroughTriviallyReplaceablePHI(
1509 PHINode *TPN, Instruction *I, LoopInfo *LI,
1510 SmallDenseMap<BasicBlock *, Instruction *, 32> &SunkCopies,
1511 const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop,
1512 MemorySSAUpdater *MSSAU) {
1513 assert(isTriviallyReplaceablePHI(*TPN, *I) &&((isTriviallyReplaceablePHI(*TPN, *I) && "Expect only trivially replaceable PHI"
) ? static_cast<void> (0) : __assert_fail ("isTriviallyReplaceablePHI(*TPN, *I) && \"Expect only trivially replaceable PHI\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 1514, __PRETTY_FUNCTION__))
1514 "Expect only trivially replaceable PHI")((isTriviallyReplaceablePHI(*TPN, *I) && "Expect only trivially replaceable PHI"
) ? static_cast<void> (0) : __assert_fail ("isTriviallyReplaceablePHI(*TPN, *I) && \"Expect only trivially replaceable PHI\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 1514, __PRETTY_FUNCTION__))
;
1515 BasicBlock *ExitBlock = TPN->getParent();
1516 Instruction *New;
1517 auto It = SunkCopies.find(ExitBlock);
1518 if (It != SunkCopies.end())
1519 New = It->second;
1520 else
1521 New = SunkCopies[ExitBlock] = CloneInstructionInExitBlock(
1522 *I, *ExitBlock, *TPN, LI, SafetyInfo, MSSAU);
1523 return New;
1524}
1525
1526static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) {
1527 BasicBlock *BB = PN->getParent();
1528 if (!BB->canSplitPredecessors())
1529 return false;
1530 // It's not impossible to split EHPad blocks, but if BlockColors already exist
1531 // it require updating BlockColors for all offspring blocks accordingly. By
1532 // skipping such corner case, we can make updating BlockColors after splitting
1533 // predecessor fairly simple.
1534 if (!SafetyInfo->getBlockColors().empty() && BB->getFirstNonPHI()->isEHPad())
1535 return false;
1536 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
1537 BasicBlock *BBPred = *PI;
1538 if (isa<IndirectBrInst>(BBPred->getTerminator()))
1539 return false;
1540 }
1541 return true;
1542}
1543
1544static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT,
1545 LoopInfo *LI, const Loop *CurLoop,
1546 LoopSafetyInfo *SafetyInfo,
1547 MemorySSAUpdater *MSSAU) {
1548#ifndef NDEBUG
1549 SmallVector<BasicBlock *, 32> ExitBlocks;
1550 CurLoop->getUniqueExitBlocks(ExitBlocks);
1551 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1552 ExitBlocks.end());
1553#endif
1554 BasicBlock *ExitBB = PN->getParent();
1555 assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block.")((ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block."
) ? static_cast<void> (0) : __assert_fail ("ExitBlockSet.count(ExitBB) && \"Expect the PHI is in an exit block.\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 1555, __PRETTY_FUNCTION__))
;
1556
1557 // Split predecessors of the loop exit to make instructions in the loop are
1558 // exposed to exit blocks through trivially replaceable PHIs while keeping the
1559 // loop in the canonical form where each predecessor of each exit block should
1560 // be contained within the loop. For example, this will convert the loop below
1561 // from
1562 //
1563 // LB1:
1564 // %v1 =
1565 // br %LE, %LB2
1566 // LB2:
1567 // %v2 =
1568 // br %LE, %LB1
1569 // LE:
1570 // %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable
1571 //
1572 // to
1573 //
1574 // LB1:
1575 // %v1 =
1576 // br %LE.split, %LB2
1577 // LB2:
1578 // %v2 =
1579 // br %LE.split2, %LB1
1580 // LE.split:
1581 // %p1 = phi [%v1, %LB1] <-- trivially replaceable
1582 // br %LE
1583 // LE.split2:
1584 // %p2 = phi [%v2, %LB2] <-- trivially replaceable
1585 // br %LE
1586 // LE:
1587 // %p = phi [%p1, %LE.split], [%p2, %LE.split2]
1588 //
1589 const auto &BlockColors = SafetyInfo->getBlockColors();
1590 SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB));
1591 while (!PredBBs.empty()) {
1592 BasicBlock *PredBB = *PredBBs.begin();
1593 assert(CurLoop->contains(PredBB) &&((CurLoop->contains(PredBB) && "Expect all predecessors are in the loop"
) ? static_cast<void> (0) : __assert_fail ("CurLoop->contains(PredBB) && \"Expect all predecessors are in the loop\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 1594, __PRETTY_FUNCTION__))
1594 "Expect all predecessors are in the loop")((CurLoop->contains(PredBB) && "Expect all predecessors are in the loop"
) ? static_cast<void> (0) : __assert_fail ("CurLoop->contains(PredBB) && \"Expect all predecessors are in the loop\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 1594, __PRETTY_FUNCTION__))
;
1595 if (PN->getBasicBlockIndex(PredBB) >= 0) {
1596 BasicBlock *NewPred = SplitBlockPredecessors(
1597 ExitBB, PredBB, ".split.loop.exit", DT, LI, MSSAU, true);
1598 // Since we do not allow splitting EH-block with BlockColors in
1599 // canSplitPredecessors(), we can simply assign predecessor's color to
1600 // the new block.
1601 if (!BlockColors.empty())
1602 // Grab a reference to the ColorVector to be inserted before getting the
1603 // reference to the vector we are copying because inserting the new
1604 // element in BlockColors might cause the map to be reallocated.
1605 SafetyInfo->copyColors(NewPred, PredBB);
1606 }
1607 PredBBs.remove(PredBB);
1608 }
1609}
1610
1611/// When an instruction is found to only be used outside of the loop, this
1612/// function moves it to the exit blocks and patches up SSA form as needed.
1613/// This method is guaranteed to remove the original instruction from its
1614/// position, and may either delete it or move it to outside of the loop.
1615///
1616static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
1617 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
1618 MemorySSAUpdater *MSSAU, OptimizationRemarkEmitter *ORE) {
1619 LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM sinking instruction: " <<
I << "\n"; } } while (false)
;
1620 ORE->emit([&]() {
1621 return OptimizationRemark(DEBUG_TYPE"licm", "InstSunk", &I)
1622 << "sinking " << ore::NV("Inst", &I);
1623 });
1624 bool Changed = false;
1625 if (isa<LoadInst>(I))
1626 ++NumMovedLoads;
1627 else if (isa<CallInst>(I))
1628 ++NumMovedCalls;
1629 ++NumSunk;
1630
1631 // Iterate over users to be ready for actual sinking. Replace users via
1632 // unreachable blocks with undef and make all user PHIs trivially replaceable.
1633 SmallPtrSet<Instruction *, 8> VisitedUsers;
1634 for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) {
1635 auto *User = cast<Instruction>(*UI);
1636 Use &U = UI.getUse();
1637 ++UI;
1638
1639 if (VisitedUsers.count(User) || CurLoop->contains(User))
1640 continue;
1641
1642 if (!DT->isReachableFromEntry(User->getParent())) {
1643 U = UndefValue::get(I.getType());
1644 Changed = true;
1645 continue;
1646 }
1647
1648 // The user must be a PHI node.
1649 PHINode *PN = cast<PHINode>(User);
1650
1651 // Surprisingly, instructions can be used outside of loops without any
1652 // exits. This can only happen in PHI nodes if the incoming block is
1653 // unreachable.
1654 BasicBlock *BB = PN->getIncomingBlock(U);
1655 if (!DT->isReachableFromEntry(BB)) {
1656 U = UndefValue::get(I.getType());
1657 Changed = true;
1658 continue;
1659 }
1660
1661 VisitedUsers.insert(PN);
1662 if (isTriviallyReplaceablePHI(*PN, I))
1663 continue;
1664
1665 if (!canSplitPredecessors(PN, SafetyInfo))
1666 return Changed;
1667
1668 // Split predecessors of the PHI so that we can make users trivially
1669 // replaceable.
1670 splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, MSSAU);
1671
1672 // Should rebuild the iterators, as they may be invalidated by
1673 // splitPredecessorsOfLoopExit().
1674 UI = I.user_begin();
1675 UE = I.user_end();
1676 }
1677
1678 if (VisitedUsers.empty())
1679 return Changed;
1680
1681#ifndef NDEBUG
1682 SmallVector<BasicBlock *, 32> ExitBlocks;
1683 CurLoop->getUniqueExitBlocks(ExitBlocks);
1684 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1685 ExitBlocks.end());
1686#endif
1687
1688 // Clones of this instruction. Don't create more than one per exit block!
1689 SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies;
1690
1691 // If this instruction is only used outside of the loop, then all users are
1692 // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of
1693 // the instruction.
1694 SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end());
1695 for (auto *UI : Users) {
1696 auto *User = cast<Instruction>(UI);
1697
1698 if (CurLoop->contains(User))
1699 continue;
1700
1701 PHINode *PN = cast<PHINode>(User);
1702 assert(ExitBlockSet.count(PN->getParent()) &&((ExitBlockSet.count(PN->getParent()) && "The LCSSA PHI is not in an exit block!"
) ? static_cast<void> (0) : __assert_fail ("ExitBlockSet.count(PN->getParent()) && \"The LCSSA PHI is not in an exit block!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 1703, __PRETTY_FUNCTION__))
1703 "The LCSSA PHI is not in an exit block!")((ExitBlockSet.count(PN->getParent()) && "The LCSSA PHI is not in an exit block!"
) ? static_cast<void> (0) : __assert_fail ("ExitBlockSet.count(PN->getParent()) && \"The LCSSA PHI is not in an exit block!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 1703, __PRETTY_FUNCTION__))
;
1704 // The PHI must be trivially replaceable.
1705 Instruction *New = sinkThroughTriviallyReplaceablePHI(
1706 PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU);
1707 PN->replaceAllUsesWith(New);
1708 eraseInstruction(*PN, *SafetyInfo, nullptr, nullptr);
1709 Changed = true;
1710 }
1711 return Changed;
1712}
1713
1714/// When an instruction is found to only use loop invariant operands that
1715/// is safe to hoist, this instruction is called to do the dirty work.
1716///
1717static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
1718 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
1719 MemorySSAUpdater *MSSAU, ScalarEvolution *SE,
1720 OptimizationRemarkEmitter *ORE) {
1721 LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getName() << ": " << Ido { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM hoisting to " << Dest
->getName() << ": " << I << "\n"; } } while
(false)
1722 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM hoisting to " << Dest
->getName() << ": " << I << "\n"; } } while
(false)
;
1723 ORE->emit([&]() {
1724 return OptimizationRemark(DEBUG_TYPE"licm", "Hoisted", &I) << "hoisting "
1725 << ore::NV("Inst", &I);
1726 });
1727
1728 // Metadata can be dependent on conditions we are hoisting above.
1729 // Conservatively strip all metadata on the instruction unless we were
1730 // guaranteed to execute I if we entered the loop, in which case the metadata
1731 // is valid in the loop preheader.
1732 if (I.hasMetadataOtherThanDebugLoc() &&
1733 // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning
1734 // time in isGuaranteedToExecute if we don't actually have anything to
1735 // drop. It is a compile time optimization, not required for correctness.
1736 !SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop))
1737 I.dropUnknownNonDebugMetadata();
1738
1739 if (isa<PHINode>(I))
1740 // Move the new node to the end of the phi list in the destination block.
1741 moveInstructionBefore(I, *Dest->getFirstNonPHI(), *SafetyInfo, MSSAU, SE);
1742 else
1743 // Move the new node to the destination block, before its terminator.
1744 moveInstructionBefore(I, *Dest->getTerminator(), *SafetyInfo, MSSAU, SE);
1745
1746 // Apply line 0 debug locations when we are moving instructions to different
1747 // basic blocks because we want to avoid jumpy line tables.
1748 if (const DebugLoc &DL = I.getDebugLoc())
1749 I.setDebugLoc(DebugLoc::get(0, 0, DL.getScope(), DL.getInlinedAt()));
1750
1751 if (isa<LoadInst>(I))
1752 ++NumMovedLoads;
1753 else if (isa<CallInst>(I))
1754 ++NumMovedCalls;
1755 ++NumHoisted;
1756}
1757
1758/// Only sink or hoist an instruction if it is not a trapping instruction,
1759/// or if the instruction is known not to trap when moved to the preheader.
1760/// or if it is a trapping instruction and is guaranteed to execute.
1761static bool isSafeToExecuteUnconditionally(Instruction &Inst,
1762 const DominatorTree *DT,
1763 const Loop *CurLoop,
1764 const LoopSafetyInfo *SafetyInfo,
1765 OptimizationRemarkEmitter *ORE,
1766 const Instruction *CtxI) {
1767 if (isSafeToSpeculativelyExecute(&Inst, CtxI, DT))
1768 return true;
1769
1770 bool GuaranteedToExecute =
1771 SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop);
1772
1773 if (!GuaranteedToExecute) {
1774 auto *LI = dyn_cast<LoadInst>(&Inst);
1775 if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1776 ORE->emit([&]() {
1777 return OptimizationRemarkMissed(
1778 DEBUG_TYPE"licm", "LoadWithLoopInvariantAddressCondExecuted", LI)
1779 << "failed to hoist load with loop-invariant address "
1780 "because load is conditionally executed";
1781 });
1782 }
1783
1784 return GuaranteedToExecute;
1785}
1786
1787namespace {
1788class LoopPromoter : public LoadAndStorePromoter {
1789 Value *SomePtr; // Designated pointer to store to.
1790 const SmallSetVector<Value *, 8> &PointerMustAliases;
1791 SmallVectorImpl<BasicBlock *> &LoopExitBlocks;
1792 SmallVectorImpl<Instruction *> &LoopInsertPts;
1793 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts;
1794 PredIteratorCache &PredCache;
1795 AliasSetTracker &AST;
1796 MemorySSAUpdater *MSSAU;
1797 LoopInfo &LI;
1798 DebugLoc DL;
1799 int Alignment;
1800 bool UnorderedAtomic;
1801 AAMDNodes AATags;
1802 ICFLoopSafetyInfo &SafetyInfo;
1803
1804 Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const {
1805 if (Instruction *I = dyn_cast<Instruction>(V))
1806 if (Loop *L = LI.getLoopFor(I->getParent()))
1807 if (!L->contains(BB)) {
1808 // We need to create an LCSSA PHI node for the incoming value and
1809 // store that.
1810 PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB),
1811 I->getName() + ".lcssa", &BB->front());
1812 for (BasicBlock *Pred : PredCache.get(BB))
1813 PN->addIncoming(I, Pred);
1814 return PN;
1815 }
1816 return V;
1817 }
1818
1819public:
1820 LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S,
1821 const SmallSetVector<Value *, 8> &PMA,
1822 SmallVectorImpl<BasicBlock *> &LEB,
1823 SmallVectorImpl<Instruction *> &LIP,
1824 SmallVectorImpl<MemoryAccess *> &MSSAIP, PredIteratorCache &PIC,
1825 AliasSetTracker &ast, MemorySSAUpdater *MSSAU, LoopInfo &li,
1826 DebugLoc dl, int alignment, bool UnorderedAtomic,
1827 const AAMDNodes &AATags, ICFLoopSafetyInfo &SafetyInfo)
1828 : LoadAndStorePromoter(Insts, S), SomePtr(SP), PointerMustAliases(PMA),
1829 LoopExitBlocks(LEB), LoopInsertPts(LIP), MSSAInsertPts(MSSAIP),
1830 PredCache(PIC), AST(ast), MSSAU(MSSAU), LI(li), DL(std::move(dl)),
1831 Alignment(alignment), UnorderedAtomic(UnorderedAtomic), AATags(AATags),
1832 SafetyInfo(SafetyInfo) {}
1833
1834 bool isInstInList(Instruction *I,
1835 const SmallVectorImpl<Instruction *> &) const override {
1836 Value *Ptr;
1837 if (LoadInst *LI = dyn_cast<LoadInst>(I))
1838 Ptr = LI->getOperand(0);
1839 else
1840 Ptr = cast<StoreInst>(I)->getPointerOperand();
1841 return PointerMustAliases.count(Ptr);
1842 }
1843
1844 void doExtraRewritesBeforeFinalDeletion() override {
1845 // Insert stores after in the loop exit blocks. Each exit block gets a
1846 // store of the live-out values that feed them. Since we've already told
1847 // the SSA updater about the defs in the loop and the preheader
1848 // definition, it is all set and we can start using it.
1849 for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) {
1850 BasicBlock *ExitBlock = LoopExitBlocks[i];
1851 Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
1852 LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock);
1853 Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock);
1854 Instruction *InsertPos = LoopInsertPts[i];
1855 StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos);
1856 if (UnorderedAtomic)
1857 NewSI->setOrdering(AtomicOrdering::Unordered);
1858 NewSI->setAlignment(MaybeAlign(Alignment));
1859 NewSI->setDebugLoc(DL);
1860 if (AATags)
1861 NewSI->setAAMetadata(AATags);
1862
1863 if (MSSAU) {
1864 MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i];
1865 MemoryAccess *NewMemAcc;
1866 if (!MSSAInsertPoint) {
1867 NewMemAcc = MSSAU->createMemoryAccessInBB(
1868 NewSI, nullptr, NewSI->getParent(), MemorySSA::Beginning);
1869 } else {
1870 NewMemAcc =
1871 MSSAU->createMemoryAccessAfter(NewSI, nullptr, MSSAInsertPoint);
1872 }
1873 MSSAInsertPts[i] = NewMemAcc;
1874 MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true);
1875 // FIXME: true for safety, false may still be correct.
1876 }
1877 }
1878 }
1879
1880 void replaceLoadWithValue(LoadInst *LI, Value *V) const override {
1881 // Update alias analysis.
1882 AST.copyValue(LI, V);
1883 }
1884 void instructionDeleted(Instruction *I) const override {
1885 SafetyInfo.removeInstruction(I);
1886 AST.deleteValue(I);
1887 if (MSSAU)
1888 MSSAU->removeMemoryAccess(I);
1889 }
1890};
1891
1892
1893/// Return true iff we can prove that a caller of this function can not inspect
1894/// the contents of the provided object in a well defined program.
1895bool isKnownNonEscaping(Value *Object, const TargetLibraryInfo *TLI) {
1896 if (isa<AllocaInst>(Object))
1897 // Since the alloca goes out of scope, we know the caller can't retain a
1898 // reference to it and be well defined. Thus, we don't need to check for
1899 // capture.
1900 return true;
1901
1902 // For all other objects we need to know that the caller can't possibly
1903 // have gotten a reference to the object. There are two components of
1904 // that:
1905 // 1) Object can't be escaped by this function. This is what
1906 // PointerMayBeCaptured checks.
1907 // 2) Object can't have been captured at definition site. For this, we
1908 // need to know the return value is noalias. At the moment, we use a
1909 // weaker condition and handle only AllocLikeFunctions (which are
1910 // known to be noalias). TODO
1911 return isAllocLikeFn(Object, TLI) &&
1912 !PointerMayBeCaptured(Object, true, true);
1913}
1914
1915} // namespace
1916
1917/// Try to promote memory values to scalars by sinking stores out of the
1918/// loop and moving loads to before the loop. We do this by looping over
1919/// the stores in the loop, looking for stores to Must pointers which are
1920/// loop invariant.
1921///
1922bool llvm::promoteLoopAccessesToScalars(
1923 const SmallSetVector<Value *, 8> &PointerMustAliases,
1924 SmallVectorImpl<BasicBlock *> &ExitBlocks,
1925 SmallVectorImpl<Instruction *> &InsertPts,
1926 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts, PredIteratorCache &PIC,
1927 LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI,
1928 Loop *CurLoop, AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU,
1929 ICFLoopSafetyInfo *SafetyInfo, OptimizationRemarkEmitter *ORE) {
1930 // Verify inputs.
1931 assert(LI != nullptr && DT != nullptr && CurLoop != nullptr &&((LI != nullptr && DT != nullptr && CurLoop !=
nullptr && CurAST != nullptr && SafetyInfo !=
nullptr && "Unexpected Input to promoteLoopAccessesToScalars"
) ? static_cast<void> (0) : __assert_fail ("LI != nullptr && DT != nullptr && CurLoop != nullptr && CurAST != nullptr && SafetyInfo != nullptr && \"Unexpected Input to promoteLoopAccessesToScalars\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 1933, __PRETTY_FUNCTION__))
1932 CurAST != nullptr && SafetyInfo != nullptr &&((LI != nullptr && DT != nullptr && CurLoop !=
nullptr && CurAST != nullptr && SafetyInfo !=
nullptr && "Unexpected Input to promoteLoopAccessesToScalars"
) ? static_cast<void> (0) : __assert_fail ("LI != nullptr && DT != nullptr && CurLoop != nullptr && CurAST != nullptr && SafetyInfo != nullptr && \"Unexpected Input to promoteLoopAccessesToScalars\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 1933, __PRETTY_FUNCTION__))
1933 "Unexpected Input to promoteLoopAccessesToScalars")((LI != nullptr && DT != nullptr && CurLoop !=
nullptr && CurAST != nullptr && SafetyInfo !=
nullptr && "Unexpected Input to promoteLoopAccessesToScalars"
) ? static_cast<void> (0) : __assert_fail ("LI != nullptr && DT != nullptr && CurLoop != nullptr && CurAST != nullptr && SafetyInfo != nullptr && \"Unexpected Input to promoteLoopAccessesToScalars\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 1933, __PRETTY_FUNCTION__))
;
1934
1935 Value *SomePtr = *PointerMustAliases.begin();
1936 BasicBlock *Preheader = CurLoop->getLoopPreheader();
1937
1938 // It is not safe to promote a load/store from the loop if the load/store is
1939 // conditional. For example, turning:
1940 //
1941 // for () { if (c) *P += 1; }
1942 //
1943 // into:
1944 //
1945 // tmp = *P; for () { if (c) tmp +=1; } *P = tmp;
1946 //
1947 // is not safe, because *P may only be valid to access if 'c' is true.
1948 //
1949 // The safety property divides into two parts:
1950 // p1) The memory may not be dereferenceable on entry to the loop. In this
1951 // case, we can't insert the required load in the preheader.
1952 // p2) The memory model does not allow us to insert a store along any dynamic
1953 // path which did not originally have one.
1954 //
1955 // If at least one store is guaranteed to execute, both properties are
1956 // satisfied, and promotion is legal.
1957 //
1958 // This, however, is not a necessary condition. Even if no store/load is
1959 // guaranteed to execute, we can still establish these properties.
1960 // We can establish (p1) by proving that hoisting the load into the preheader
1961 // is safe (i.e. proving dereferenceability on all paths through the loop). We
1962 // can use any access within the alias set to prove dereferenceability,
1963 // since they're all must alias.
1964 //
1965 // There are two ways establish (p2):
1966 // a) Prove the location is thread-local. In this case the memory model
1967 // requirement does not apply, and stores are safe to insert.
1968 // b) Prove a store dominates every exit block. In this case, if an exit
1969 // blocks is reached, the original dynamic path would have taken us through
1970 // the store, so inserting a store into the exit block is safe. Note that this
1971 // is different from the store being guaranteed to execute. For instance,
1972 // if an exception is thrown on the first iteration of the loop, the original
1973 // store is never executed, but the exit blocks are not executed either.
1974
1975 bool DereferenceableInPH = false;
1976 bool SafeToInsertStore = false;
1977
1978 SmallVector<Instruction *, 64> LoopUses;
1979
1980 // We start with an alignment of one and try to find instructions that allow
1981 // us to prove better alignment.
1982 unsigned Alignment = 1;
1983 // Keep track of which types of access we see
1984 bool SawUnorderedAtomic = false;
1985 bool SawNotAtomic = false;
1986 AAMDNodes AATags;
1987
1988 const DataLayout &MDL = Preheader->getModule()->getDataLayout();
1989
1990 bool IsKnownThreadLocalObject = false;
1991 if (SafetyInfo->anyBlockMayThrow()) {
1992 // If a loop can throw, we have to insert a store along each unwind edge.
1993 // That said, we can't actually make the unwind edge explicit. Therefore,
1994 // we have to prove that the store is dead along the unwind edge. We do
1995 // this by proving that the caller can't have a reference to the object
1996 // after return and thus can't possibly load from the object.
1997 Value *Object = GetUnderlyingObject(SomePtr, MDL);
1998 if (!isKnownNonEscaping(Object, TLI))
1999 return false;
2000 // Subtlety: Alloca's aren't visible to callers, but *are* potentially
2001 // visible to other threads if captured and used during their lifetimes.
2002 IsKnownThreadLocalObject = !isa<AllocaInst>(Object);
2003 }
2004
2005 // Check that all of the pointers in the alias set have the same type. We
2006 // cannot (yet) promote a memory location that is loaded and stored in
2007 // different sizes. While we are at it, collect alignment and AA info.
2008 for (Value *ASIV : PointerMustAliases) {
2009 // Check that all of the pointers in the alias set have the same type. We
2010 // cannot (yet) promote a memory location that is loaded and stored in
2011 // different sizes.
2012 if (SomePtr->getType() != ASIV->getType())
2013 return false;
2014
2015 for (User *U : ASIV->users()) {
2016 // Ignore instructions that are outside the loop.
2017 Instruction *UI = dyn_cast<Instruction>(U);
2018 if (!UI || !CurLoop->contains(UI))
2019 continue;
2020
2021 // If there is an non-load/store instruction in the loop, we can't promote
2022 // it.
2023 if (LoadInst *Load = dyn_cast<LoadInst>(UI)) {
2024 if (!Load->isUnordered())
2025 return false;
2026
2027 SawUnorderedAtomic |= Load->isAtomic();
2028 SawNotAtomic |= !Load->isAtomic();
2029
2030 unsigned InstAlignment = Load->getAlignment();
2031 if (!InstAlignment)
2032 InstAlignment =
2033 MDL.getABITypeAlignment(Load->getType());
2034
2035 // Note that proving a load safe to speculate requires proving
2036 // sufficient alignment at the target location. Proving it guaranteed
2037 // to execute does as well. Thus we can increase our guaranteed
2038 // alignment as well.
2039 if (!DereferenceableInPH || (InstAlignment > Alignment))
2040 if (isSafeToExecuteUnconditionally(*Load, DT, CurLoop, SafetyInfo,
2041 ORE, Preheader->getTerminator())) {
2042 DereferenceableInPH = true;
2043 Alignment = std::max(Alignment, InstAlignment);
2044 }
2045 } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) {
2046 // Stores *of* the pointer are not interesting, only stores *to* the
2047 // pointer.
2048 if (UI->getOperand(1) != ASIV)
2049 continue;
2050 if (!Store->isUnordered())
2051 return false;
2052
2053 SawUnorderedAtomic |= Store->isAtomic();
2054 SawNotAtomic |= !Store->isAtomic();
2055
2056 // If the store is guaranteed to execute, both properties are satisfied.
2057 // We may want to check if a store is guaranteed to execute even if we
2058 // already know that promotion is safe, since it may have higher
2059 // alignment than any other guaranteed stores, in which case we can
2060 // raise the alignment on the promoted store.
2061 unsigned InstAlignment = Store->getAlignment();
2062 if (!InstAlignment)
2063 InstAlignment =
2064 MDL.getABITypeAlignment(Store->getValueOperand()->getType());
2065
2066 if (!DereferenceableInPH || !SafeToInsertStore ||
2067 (InstAlignment > Alignment)) {
2068 if (SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop)) {
2069 DereferenceableInPH = true;
2070 SafeToInsertStore = true;
2071 Alignment = std::max(Alignment, InstAlignment);
2072 }
2073 }
2074
2075 // If a store dominates all exit blocks, it is safe to sink.
2076 // As explained above, if an exit block was executed, a dominating
2077 // store must have been executed at least once, so we are not
2078 // introducing stores on paths that did not have them.
2079 // Note that this only looks at explicit exit blocks. If we ever
2080 // start sinking stores into unwind edges (see above), this will break.
2081 if (!SafeToInsertStore)
2082 SafeToInsertStore = llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) {
2083 return DT->dominates(Store->getParent(), Exit);
2084 });
2085
2086 // If the store is not guaranteed to execute, we may still get
2087 // deref info through it.
2088 if (!DereferenceableInPH) {
2089 DereferenceableInPH = isDereferenceableAndAlignedPointer(
2090 Store->getPointerOperand(), Store->getValueOperand()->getType(),
2091 MaybeAlign(Store->getAlignment()), MDL,
2092 Preheader->getTerminator(), DT);
2093 }
2094 } else
2095 return false; // Not a load or store.
2096
2097 // Merge the AA tags.
2098 if (LoopUses.empty()) {
2099 // On the first load/store, just take its AA tags.
2100 UI->getAAMetadata(AATags);
2101 } else if (AATags) {
2102 UI->getAAMetadata(AATags, /* Merge = */ true);
2103 }
2104
2105 LoopUses.push_back(UI);
2106 }
2107 }
2108
2109 // If we found both an unordered atomic instruction and a non-atomic memory
2110 // access, bail. We can't blindly promote non-atomic to atomic since we
2111 // might not be able to lower the result. We can't downgrade since that
2112 // would violate memory model. Also, align 0 is an error for atomics.
2113 if (SawUnorderedAtomic && SawNotAtomic)
2114 return false;
2115
2116 // If we're inserting an atomic load in the preheader, we must be able to
2117 // lower it. We're only guaranteed to be able to lower naturally aligned
2118 // atomics.
2119 auto *SomePtrElemType = SomePtr->getType()->getPointerElementType();
2120 if (SawUnorderedAtomic &&
2121 Alignment < MDL.getTypeStoreSize(SomePtrElemType))
2122 return false;
2123
2124 // If we couldn't prove we can hoist the load, bail.
2125 if (!DereferenceableInPH)
2126 return false;
2127
2128 // We know we can hoist the load, but don't have a guaranteed store.
2129 // Check whether the location is thread-local. If it is, then we can insert
2130 // stores along paths which originally didn't have them without violating the
2131 // memory model.
2132 if (!SafeToInsertStore) {
2133 if (IsKnownThreadLocalObject)
2134 SafeToInsertStore = true;
2135 else {
2136 Value *Object = GetUnderlyingObject(SomePtr, MDL);
2137 SafeToInsertStore =
2138 (isAllocLikeFn(Object, TLI) || isa<AllocaInst>(Object)) &&
2139 !PointerMayBeCaptured(Object, true, true);
2140 }
2141 }
2142
2143 // If we've still failed to prove we can sink the store, give up.
2144 if (!SafeToInsertStore)
2145 return false;
2146
2147 // Otherwise, this is safe to promote, lets do it!
2148 LLVM_DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " << *SomePtrdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM: Promoting value stored to in loop: "
<< *SomePtr << '\n'; } } while (false)
2149 << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM: Promoting value stored to in loop: "
<< *SomePtr << '\n'; } } while (false)
;
2150 ORE->emit([&]() {
2151 return OptimizationRemark(DEBUG_TYPE"licm", "PromoteLoopAccessesToScalar",
2152 LoopUses[0])
2153 << "Moving accesses to memory location out of the loop";
2154 });
2155 ++NumPromoted;
2156
2157 // Grab a debug location for the inserted loads/stores; given that the
2158 // inserted loads/stores have little relation to the original loads/stores,
2159 // this code just arbitrarily picks a location from one, since any debug
2160 // location is better than none.
2161 DebugLoc DL = LoopUses[0]->getDebugLoc();
2162
2163 // We use the SSAUpdater interface to insert phi nodes as required.
2164 SmallVector<PHINode *, 16> NewPHIs;
2165 SSAUpdater SSA(&NewPHIs);
2166 LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks,
2167 InsertPts, MSSAInsertPts, PIC, *CurAST, MSSAU, *LI, DL,
2168 Alignment, SawUnorderedAtomic, AATags, *SafetyInfo);
2169
2170 // Set up the preheader to have a definition of the value. It is the live-out
2171 // value from the preheader that uses in the loop will use.
2172 LoadInst *PreheaderLoad = new LoadInst(
2173 SomePtr->getType()->getPointerElementType(), SomePtr,
2174 SomePtr->getName() + ".promoted", Preheader->getTerminator());
2175 if (SawUnorderedAtomic)
2176 PreheaderLoad->setOrdering(AtomicOrdering::Unordered);
2177 PreheaderLoad->setAlignment(MaybeAlign(Alignment));
2178 PreheaderLoad->setDebugLoc(DL);
2179 if (AATags)
2180 PreheaderLoad->setAAMetadata(AATags);
2181 SSA.AddAvailableValue(Preheader, PreheaderLoad);
2182
2183 if (MSSAU) {
2184 MemoryAccess *PreheaderLoadMemoryAccess = MSSAU->createMemoryAccessInBB(
2185 PreheaderLoad, nullptr, PreheaderLoad->getParent(), MemorySSA::End);
2186 MemoryUse *NewMemUse = cast<MemoryUse>(PreheaderLoadMemoryAccess);
2187 MSSAU->insertUse(NewMemUse, /*RenameUses=*/true);
2188 }
2189
2190 if (MSSAU && VerifyMemorySSA)
2191 MSSAU->getMemorySSA()->verifyMemorySSA();
2192 // Rewrite all the loads in the loop and remember all the definitions from
2193 // stores in the loop.
2194 Promoter.run(LoopUses);
2195
2196 if (MSSAU && VerifyMemorySSA)
2197 MSSAU->getMemorySSA()->verifyMemorySSA();
2198 // If the SSAUpdater didn't use the load in the preheader, just zap it now.
2199 if (PreheaderLoad->use_empty())
2200 eraseInstruction(*PreheaderLoad, *SafetyInfo, CurAST, MSSAU);
2201
2202 return true;
2203}
2204
2205/// Returns an owning pointer to an alias set which incorporates aliasing info
2206/// from L and all subloops of L.
2207/// FIXME: In new pass manager, there is no helper function to handle loop
2208/// analysis such as cloneBasicBlockAnalysis, so the AST needs to be recomputed
2209/// from scratch for every loop. Hook up with the helper functions when
2210/// available in the new pass manager to avoid redundant computation.
2211std::unique_ptr<AliasSetTracker>
2212LoopInvariantCodeMotion::collectAliasInfoForLoop(Loop *L, LoopInfo *LI,
2213 AliasAnalysis *AA) {
2214 std::unique_ptr<AliasSetTracker> CurAST;
2215 SmallVector<Loop *, 4> RecomputeLoops;
2216 for (Loop *InnerL : L->getSubLoops()) {
2217 auto MapI = LoopToAliasSetMap.find(InnerL);
2218 // If the AST for this inner loop is missing it may have been merged into
2219 // some other loop's AST and then that loop unrolled, and so we need to
2220 // recompute it.
2221 if (MapI == LoopToAliasSetMap.end()) {
2222 RecomputeLoops.push_back(InnerL);
2223 continue;
2224 }
2225 std::unique_ptr<AliasSetTracker> InnerAST = std::move(MapI->second);
2226
2227 if (CurAST) {
2228 // What if InnerLoop was modified by other passes ?
2229 // Once we've incorporated the inner loop's AST into ours, we don't need
2230 // the subloop's anymore.
2231 CurAST->add(*InnerAST);
2232 } else {
2233 CurAST = std::move(InnerAST);
2234 }
2235 LoopToAliasSetMap.erase(MapI);
2236 }
2237 if (!CurAST)
2238 CurAST = std::make_unique<AliasSetTracker>(*AA);
2239
2240 // Add everything from the sub loops that are no longer directly available.
2241 for (Loop *InnerL : RecomputeLoops)
2242 for (BasicBlock *BB : InnerL->blocks())
2243 CurAST->add(*BB);
2244
2245 // And merge in this loop (without anything from inner loops).
2246 for (BasicBlock *BB : L->blocks())
2247 if (LI->getLoopFor(BB) == L)
2248 CurAST->add(*BB);
2249
2250 return CurAST;
2251}
2252
2253std::unique_ptr<AliasSetTracker>
2254LoopInvariantCodeMotion::collectAliasInfoForLoopWithMSSA(
2255 Loop *L, AliasAnalysis *AA, MemorySSAUpdater *MSSAU) {
2256 auto *MSSA = MSSAU->getMemorySSA();
2257 auto CurAST = std::make_unique<AliasSetTracker>(*AA, MSSA, L);
2258 CurAST->addAllInstructionsInLoopUsingMSSA();
2259 return CurAST;
2260}
2261
2262/// Simple analysis hook. Clone alias set info.
2263///
2264void LegacyLICMPass::cloneBasicBlockAnalysis(BasicBlock *From, BasicBlock *To,
2265 Loop *L) {
2266 auto ASTIt = LICM.getLoopToAliasSetMap().find(L);
2267 if (ASTIt == LICM.getLoopToAliasSetMap().end())
2268 return;
2269
2270 ASTIt->second->copyValue(From, To);
2271}
2272
2273/// Simple Analysis hook. Delete value V from alias set
2274///
2275void LegacyLICMPass::deleteAnalysisValue(Value *V, Loop *L) {
2276 auto ASTIt = LICM.getLoopToAliasSetMap().find(L);
2277 if (ASTIt == LICM.getLoopToAliasSetMap().end())
2278 return;
2279
2280 ASTIt->second->deleteValue(V);
2281}
2282
2283/// Simple Analysis hook. Delete value L from alias set map.
2284///
2285void LegacyLICMPass::deleteAnalysisLoop(Loop *L) {
2286 if (!LICM.getLoopToAliasSetMap().count(L))
2287 return;
2288
2289 LICM.getLoopToAliasSetMap().erase(L);
2290}
2291
2292static bool pointerInvalidatedByLoop(MemoryLocation MemLoc,
2293 AliasSetTracker *CurAST, Loop *CurLoop,
2294 AliasAnalysis *AA) {
2295 // First check to see if any of the basic blocks in CurLoop invalidate *V.
2296 bool isInvalidatedAccordingToAST = CurAST->getAliasSetFor(MemLoc).isMod();
2297
2298 if (!isInvalidatedAccordingToAST || !LICMN2Theshold)
2299 return isInvalidatedAccordingToAST;
2300
2301 // Check with a diagnostic analysis if we can refine the information above.
2302 // This is to identify the limitations of using the AST.
2303 // The alias set mechanism used by LICM has a major weakness in that it
2304 // combines all things which may alias into a single set *before* asking
2305 // modref questions. As a result, a single readonly call within a loop will
2306 // collapse all loads and stores into a single alias set and report
2307 // invalidation if the loop contains any store. For example, readonly calls
2308 // with deopt states have this form and create a general alias set with all
2309 // loads and stores. In order to get any LICM in loops containing possible
2310 // deopt states we need a more precise invalidation of checking the mod ref
2311 // info of each instruction within the loop and LI. This has a complexity of
2312 // O(N^2), so currently, it is used only as a diagnostic tool since the
2313 // default value of LICMN2Threshold is zero.
2314
2315 // Don't look at nested loops.
2316 if (CurLoop->begin() != CurLoop->end())
2317 return true;
2318
2319 int N = 0;
2320 for (BasicBlock *BB : CurLoop->getBlocks())
2321 for (Instruction &I : *BB) {
2322 if (N >= LICMN2Theshold) {
2323 LLVM_DEBUG(dbgs() << "Alasing N2 threshold exhausted for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "Alasing N2 threshold exhausted for "
<< *(MemLoc.Ptr) << "\n"; } } while (false)
2324 << *(MemLoc.Ptr) << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "Alasing N2 threshold exhausted for "
<< *(MemLoc.Ptr) << "\n"; } } while (false)
;
2325 return true;
2326 }
2327 N++;
2328 auto Res = AA->getModRefInfo(&I, MemLoc);
2329 if (isModSet(Res)) {
2330 LLVM_DEBUG(dbgs() << "Aliasing failed on " << I << " for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "Aliasing failed on " << I <<
" for " << *(MemLoc.Ptr) << "\n"; } } while (false
)
2331 << *(MemLoc.Ptr) << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "Aliasing failed on " << I <<
" for " << *(MemLoc.Ptr) << "\n"; } } while (false
)
;
2332 return true;
2333 }
2334 }
2335 LLVM_DEBUG(dbgs() << "Aliasing okay for " << *(MemLoc.Ptr) << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "Aliasing okay for " << *(MemLoc
.Ptr) << "\n"; } } while (false)
;
2336 return false;
2337}
2338
2339static bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU,
2340 Loop *CurLoop,
2341 SinkAndHoistLICMFlags &Flags) {
2342 // For hoisting, use the walker to determine safety
2343 if (!Flags.IsSink) {
2344 MemoryAccess *Source;
2345 // See declaration of SetLicmMssaOptCap for usage details.
2346 if (Flags.LicmMssaOptCounter >= Flags.LicmMssaOptCap)
2347 Source = MU->getDefiningAccess();
2348 else {
2349 Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(MU);
2350 Flags.LicmMssaOptCounter++;
2351 }
2352 return !MSSA->isLiveOnEntryDef(Source) &&
2353 CurLoop->contains(Source->getBlock());
2354 }
2355
2356 // For sinking, we'd need to check all Defs below this use. The getClobbering
2357 // call will look on the backedge of the loop, but will check aliasing with
2358 // the instructions on the previous iteration.
2359 // For example:
2360 // for (i ... )
2361 // load a[i] ( Use (LoE)
2362 // store a[i] ( 1 = Def (2), with 2 = Phi for the loop.
2363 // i++;
2364 // The load sees no clobbering inside the loop, as the backedge alias check
2365 // does phi translation, and will check aliasing against store a[i-1].
2366 // However sinking the load outside the loop, below the store is incorrect.
2367
2368 // For now, only sink if there are no Defs in the loop, and the existing ones
2369 // precede the use and are in the same block.
2370 // FIXME: Increase precision: Safe to sink if Use post dominates the Def;
2371 // needs PostDominatorTreeAnalysis.
2372 // FIXME: More precise: no Defs that alias this Use.
2373 if (Flags.NoOfMemAccTooLarge)
2374 return true;
2375 for (auto *BB : CurLoop->getBlocks())
2376 if (auto *Accesses = MSSA->getBlockDefs(BB))
2377 for (const auto &MA : *Accesses)
2378 if (const auto *MD = dyn_cast<MemoryDef>(&MA))
2379 if (MU->getBlock() != MD->getBlock() ||
2380 !MSSA->locallyDominates(MD, MU))
2381 return true;
2382 return false;
2383}
2384
2385/// Little predicate that returns true if the specified basic block is in
2386/// a subloop of the current one, not the current one itself.
2387///
2388static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) {
2389 assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop")((CurLoop->contains(BB) && "Only valid if BB is IN the loop"
) ? static_cast<void> (0) : __assert_fail ("CurLoop->contains(BB) && \"Only valid if BB is IN the loop\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Transforms/Scalar/LICM.cpp"
, 2389, __PRETTY_FUNCTION__))
;
2390 return LI->getLoopFor(BB) != CurLoop;
2391}

/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/IR/PatternMatch.h

1//===- PatternMatch.h - Match on the LLVM IR --------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file provides a simple and efficient mechanism for performing general
10// tree-based pattern matches on the LLVM IR. The power of these routines is
11// that it allows you to write concise patterns that are expressive and easy to
12// understand. The other major advantage of this is that it allows you to
13// trivially capture/bind elements in the pattern to variables. For example,
14// you can do something like this:
15//
16// Value *Exp = ...
17// Value *X, *Y; ConstantInt *C1, *C2; // (X & C1) | (Y & C2)
18// if (match(Exp, m_Or(m_And(m_Value(X), m_ConstantInt(C1)),
19// m_And(m_Value(Y), m_ConstantInt(C2))))) {
20// ... Pattern is matched and variables are bound ...
21// }
22//
23// This is primarily useful to things like the instruction combiner, but can
24// also be useful for static analysis tools or code generators.
25//
26//===----------------------------------------------------------------------===//
27
28#ifndef LLVM_IR_PATTERNMATCH_H
29#define LLVM_IR_PATTERNMATCH_H
30
31#include "llvm/ADT/APFloat.h"
32#include "llvm/ADT/APInt.h"
33#include "llvm/IR/Constant.h"
34#include "llvm/IR/Constants.h"
35#include "llvm/IR/InstrTypes.h"
36#include "llvm/IR/Instruction.h"
37#include "llvm/IR/Instructions.h"
38#include "llvm/IR/IntrinsicInst.h"
39#include "llvm/IR/Intrinsics.h"
40#include "llvm/IR/Operator.h"
41#include "llvm/IR/Value.h"
42#include "llvm/Support/Casting.h"
43#include <cstdint>
44
45namespace llvm {
46namespace PatternMatch {
47
48template <typename Val, typename Pattern> bool match(Val *V, const Pattern &P) {
49 return const_cast<Pattern &>(P).match(V);
33
Calling 'IntrinsicID_match::match'
37
Returning from 'IntrinsicID_match::match'
38
Returning zero, which participates in a condition later
42
Calling 'IntrinsicID_match::match'
46
Returning from 'IntrinsicID_match::match'
47
Returning zero, which participates in a condition later
50}
51
52template <typename SubPattern_t> struct OneUse_match {
53 SubPattern_t SubPattern;
54
55 OneUse_match(const SubPattern_t &SP) : SubPattern(SP) {}
56
57 template <typename OpTy> bool match(OpTy *V) {
58 return V->hasOneUse() && SubPattern.match(V);
59 }
60};
61
62template <typename T> inline OneUse_match<T> m_OneUse(const T &SubPattern) {
63 return SubPattern;
64}
65
66template <typename Class> struct class_match {
67 template <typename ITy> bool match(ITy *V) { return isa<Class>(V); }
68};
69
70/// Match an arbitrary value and ignore it.
71inline class_match<Value> m_Value() { return class_match<Value>(); }
72
73/// Match an arbitrary binary operation and ignore it.
74inline class_match<BinaryOperator> m_BinOp() {
75 return class_match<BinaryOperator>();
76}
77
78/// Matches any compare instruction and ignore it.
79inline class_match<CmpInst> m_Cmp() { return class_match<CmpInst>(); }
80
81/// Match an arbitrary ConstantInt and ignore it.
82inline class_match<ConstantInt> m_ConstantInt() {
83 return class_match<ConstantInt>();
84}
85
86/// Match an arbitrary undef constant.
87inline class_match<UndefValue> m_Undef() { return class_match<UndefValue>(); }
88
89/// Match an arbitrary Constant and ignore it.
90inline class_match<Constant> m_Constant() { return class_match<Constant>(); }
91
92/// Match an arbitrary basic block value and ignore it.
93inline class_match<BasicBlock> m_BasicBlock() {
94 return class_match<BasicBlock>();
95}
96
97/// Inverting matcher
98template <typename Ty> struct match_unless {
99 Ty M;
100
101 match_unless(const Ty &Matcher) : M(Matcher) {}
102
103 template <typename ITy> bool match(ITy *V) { return !M.match(V); }
104};
105
106/// Match if the inner matcher does *NOT* match.
107template <typename Ty> inline match_unless<Ty> m_Unless(const Ty &M) {
108 return match_unless<Ty>(M);
109}
110
111/// Matching combinators
112template <typename LTy, typename RTy> struct match_combine_or {
113 LTy L;
114 RTy R;
115
116 match_combine_or(const LTy &Left, const RTy &Right) : L(Left), R(Right) {}
117
118 template <typename ITy> bool match(ITy *V) {
119 if (L.match(V))
120 return true;
121 if (R.match(V))
122 return true;
123 return false;
124 }
125};
126
127template <typename LTy, typename RTy> struct match_combine_and {
128 LTy L;
129 RTy R;
130
131 match_combine_and(const LTy &Left, const RTy &Right) : L(Left), R(Right) {}
132
133 template <typename ITy> bool match(ITy *V) {
134 if (L.match(V))
135 if (R.match(V))
136 return true;
137 return false;
138 }
139};
140
141/// Combine two pattern matchers matching L || R
142template <typename LTy, typename RTy>
143inline match_combine_or<LTy, RTy> m_CombineOr(const LTy &L, const RTy &R) {
144 return match_combine_or<LTy, RTy>(L, R);
145}
146
147/// Combine two pattern matchers matching L && R
148template <typename LTy, typename RTy>
149inline match_combine_and<LTy, RTy> m_CombineAnd(const LTy &L, const RTy &R) {
150 return match_combine_and<LTy, RTy>(L, R);
151}
152
153struct apint_match {
154 const APInt *&Res;
155
156 apint_match(const APInt *&R) : Res(R) {}
157
158 template <typename ITy> bool match(ITy *V) {
159 if (auto *CI = dyn_cast<ConstantInt>(V)) {
160 Res = &CI->getValue();
161 return true;
162 }
163 if (V->getType()->isVectorTy())
164 if (const auto *C = dyn_cast<Constant>(V))
165 if (auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue())) {
166 Res = &CI->getValue();
167 return true;
168 }
169 return false;
170 }
171};
172// Either constexpr if or renaming ConstantFP::getValueAPF to
173// ConstantFP::getValue is needed to do it via single template
174// function for both apint/apfloat.
175struct apfloat_match {
176 const APFloat *&Res;
177 apfloat_match(const APFloat *&R) : Res(R) {}
178 template <typename ITy> bool match(ITy *V) {
179 if (auto *CI = dyn_cast<ConstantFP>(V)) {
180 Res = &CI->getValueAPF();
181 return true;
182 }
183 if (V->getType()->isVectorTy())
184 if (const auto *C = dyn_cast<Constant>(V))
185 if (auto *CI = dyn_cast_or_null<ConstantFP>(C->getSplatValue())) {
186 Res = &CI->getValueAPF();
187 return true;
188 }
189 return false;
190 }
191};
192
193/// Match a ConstantInt or splatted ConstantVector, binding the
194/// specified pointer to the contained APInt.
195inline apint_match m_APInt(const APInt *&Res) { return Res; }
196
197/// Match a ConstantFP or splatted ConstantVector, binding the
198/// specified pointer to the contained APFloat.
199inline apfloat_match m_APFloat(const APFloat *&Res) { return Res; }
200
201template <int64_t Val> struct constantint_match {
202 template <typename ITy> bool match(ITy *V) {
203 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
204 const APInt &CIV = CI->getValue();
205 if (Val >= 0)
206 return CIV == static_cast<uint64_t>(Val);
207 // If Val is negative, and CI is shorter than it, truncate to the right
208 // number of bits. If it is larger, then we have to sign extend. Just
209 // compare their negated values.
210 return -CIV == -Val;
211 }
212 return false;
213 }
214};
215
216/// Match a ConstantInt with a specific value.
217template <int64_t Val> inline constantint_match<Val> m_ConstantInt() {
218 return constantint_match<Val>();
219}
220
221/// This helper class is used to match scalar and vector integer constants that
222/// satisfy a specified predicate.
223/// For vector constants, undefined elements are ignored.
224template <typename Predicate> struct cst_pred_ty : public Predicate {
225 template <typename ITy> bool match(ITy *V) {
226 if (const auto *CI = dyn_cast<ConstantInt>(V))
227 return this->isValue(CI->getValue());
228 if (V->getType()->isVectorTy()) {
229 if (const auto *C = dyn_cast<Constant>(V)) {
230 if (const auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue()))
231 return this->isValue(CI->getValue());
232
233 // Non-splat vector constant: check each element for a match.
234 unsigned NumElts = V->getType()->getVectorNumElements();
235 assert(NumElts != 0 && "Constant vector with no elements?")((NumElts != 0 && "Constant vector with no elements?"
) ? static_cast<void> (0) : __assert_fail ("NumElts != 0 && \"Constant vector with no elements?\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/IR/PatternMatch.h"
, 235, __PRETTY_FUNCTION__))
;
236 bool HasNonUndefElements = false;
237 for (unsigned i = 0; i != NumElts; ++i) {
238 Constant *Elt = C->getAggregateElement(i);
239 if (!Elt)
240 return false;
241 if (isa<UndefValue>(Elt))
242 continue;
243 auto *CI = dyn_cast<ConstantInt>(Elt);
244 if (!CI || !this->isValue(CI->getValue()))
245 return false;
246 HasNonUndefElements = true;
247 }
248 return HasNonUndefElements;
249 }
250 }
251 return false;
252 }
253};
254
255/// This helper class is used to match scalar and vector constants that
256/// satisfy a specified predicate, and bind them to an APInt.
257template <typename Predicate> struct api_pred_ty : public Predicate {
258 const APInt *&Res;
259
260 api_pred_ty(const APInt *&R) : Res(R) {}
261
262 template <typename ITy> bool match(ITy *V) {
263 if (const auto *CI = dyn_cast<ConstantInt>(V))
264 if (this->isValue(CI->getValue())) {
265 Res = &CI->getValue();
266 return true;
267 }
268 if (V->getType()->isVectorTy())
269 if (const auto *C = dyn_cast<Constant>(V))
270 if (auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue()))
271 if (this->isValue(CI->getValue())) {
272 Res = &CI->getValue();
273 return true;
274 }
275
276 return false;
277 }
278};
279
280/// This helper class is used to match scalar and vector floating-point
281/// constants that satisfy a specified predicate.
282/// For vector constants, undefined elements are ignored.
283template <typename Predicate> struct cstfp_pred_ty : public Predicate {
284 template <typename ITy> bool match(ITy *V) {
285 if (const auto *CF = dyn_cast<ConstantFP>(V))
286 return this->isValue(CF->getValueAPF());
287 if (V->getType()->isVectorTy()) {
288 if (const auto *C = dyn_cast<Constant>(V)) {
289 if (const auto *CF = dyn_cast_or_null<ConstantFP>(C->getSplatValue()))
290 return this->isValue(CF->getValueAPF());
291
292 // Non-splat vector constant: check each element for a match.
293 unsigned NumElts = V->getType()->getVectorNumElements();
294 assert(NumElts != 0 && "Constant vector with no elements?")((NumElts != 0 && "Constant vector with no elements?"
) ? static_cast<void> (0) : __assert_fail ("NumElts != 0 && \"Constant vector with no elements?\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/IR/PatternMatch.h"
, 294, __PRETTY_FUNCTION__))
;
295 bool HasNonUndefElements = false;
296 for (unsigned i = 0; i != NumElts; ++i) {
297 Constant *Elt = C->getAggregateElement(i);
298 if (!Elt)
299 return false;
300 if (isa<UndefValue>(Elt))
301 continue;
302 auto *CF = dyn_cast<ConstantFP>(Elt);
303 if (!CF || !this->isValue(CF->getValueAPF()))
304 return false;
305 HasNonUndefElements = true;
306 }
307 return HasNonUndefElements;
308 }
309 }
310 return false;
311 }
312};
313
314///////////////////////////////////////////////////////////////////////////////
315//
316// Encapsulate constant value queries for use in templated predicate matchers.
317// This allows checking if constants match using compound predicates and works
318// with vector constants, possibly with relaxed constraints. For example, ignore
319// undef values.
320//
321///////////////////////////////////////////////////////////////////////////////
322
323struct is_any_apint {
324 bool isValue(const APInt &C) { return true; }
325};
326/// Match an integer or vector with any integral constant.
327/// For vectors, this includes constants with undefined elements.
328inline cst_pred_ty<is_any_apint> m_AnyIntegralConstant() {
329 return cst_pred_ty<is_any_apint>();
330}
331
332struct is_all_ones {
333 bool isValue(const APInt &C) { return C.isAllOnesValue(); }
334};
335/// Match an integer or vector with all bits set.
336/// For vectors, this includes constants with undefined elements.
337inline cst_pred_ty<is_all_ones> m_AllOnes() {
338 return cst_pred_ty<is_all_ones>();
339}
340
341struct is_maxsignedvalue {
342 bool isValue(const APInt &C) { return C.isMaxSignedValue(); }
343};
344/// Match an integer or vector with values having all bits except for the high
345/// bit set (0x7f...).
346/// For vectors, this includes constants with undefined elements.
347inline cst_pred_ty<is_maxsignedvalue> m_MaxSignedValue() {
348 return cst_pred_ty<is_maxsignedvalue>();
349}
350inline api_pred_ty<is_maxsignedvalue> m_MaxSignedValue(const APInt *&V) {
351 return V;
352}
353
354struct is_negative {
355 bool isValue(const APInt &C) { return C.isNegative(); }
356};
357/// Match an integer or vector of negative values.
358/// For vectors, this includes constants with undefined elements.
359inline cst_pred_ty<is_negative> m_Negative() {
360 return cst_pred_ty<is_negative>();
361}
362inline api_pred_ty<is_negative> m_Negative(const APInt *&V) {
363 return V;
364}
365
366struct is_nonnegative {
367 bool isValue(const APInt &C) { return C.isNonNegative(); }
368};
369/// Match an integer or vector of nonnegative values.
370/// For vectors, this includes constants with undefined elements.
371inline cst_pred_ty<is_nonnegative> m_NonNegative() {
372 return cst_pred_ty<is_nonnegative>();
373}
374inline api_pred_ty<is_nonnegative> m_NonNegative(const APInt *&V) {
375 return V;
376}
377
378struct is_one {
379 bool isValue(const APInt &C) { return C.isOneValue(); }
380};
381/// Match an integer 1 or a vector with all elements equal to 1.
382/// For vectors, this includes constants with undefined elements.
383inline cst_pred_ty<is_one> m_One() {
384 return cst_pred_ty<is_one>();
385}
386
387struct is_zero_int {
388 bool isValue(const APInt &C) { return C.isNullValue(); }
389};
390/// Match an integer 0 or a vector with all elements equal to 0.
391/// For vectors, this includes constants with undefined elements.
392inline cst_pred_ty<is_zero_int> m_ZeroInt() {
393 return cst_pred_ty<is_zero_int>();
394}
395
396struct is_zero {
397 template <typename ITy> bool match(ITy *V) {
398 auto *C = dyn_cast<Constant>(V);
399 return C && (C->isNullValue() || cst_pred_ty<is_zero_int>().match(C));
400 }
401};
402/// Match any null constant or a vector with all elements equal to 0.
403/// For vectors, this includes constants with undefined elements.
404inline is_zero m_Zero() {
405 return is_zero();
406}
407
408struct is_power2 {
409 bool isValue(const APInt &C) { return C.isPowerOf2(); }
410};
411/// Match an integer or vector power-of-2.
412/// For vectors, this includes constants with undefined elements.
413inline cst_pred_ty<is_power2> m_Power2() {
414 return cst_pred_ty<is_power2>();
415}
416inline api_pred_ty<is_power2> m_Power2(const APInt *&V) {
417 return V;
418}
419
420struct is_negated_power2 {
421 bool isValue(const APInt &C) { return (-C).isPowerOf2(); }
422};
423/// Match a integer or vector negated power-of-2.
424/// For vectors, this includes constants with undefined elements.
425inline cst_pred_ty<is_negated_power2> m_NegatedPower2() {
426 return cst_pred_ty<is_negated_power2>();
427}
428inline api_pred_ty<is_negated_power2> m_NegatedPower2(const APInt *&V) {
429 return V;
430}
431
432struct is_power2_or_zero {
433 bool isValue(const APInt &C) { return !C || C.isPowerOf2(); }
434};
435/// Match an integer or vector of 0 or power-of-2 values.
436/// For vectors, this includes constants with undefined elements.
437inline cst_pred_ty<is_power2_or_zero> m_Power2OrZero() {
438 return cst_pred_ty<is_power2_or_zero>();
439}
440inline api_pred_ty<is_power2_or_zero> m_Power2OrZero(const APInt *&V) {
441 return V;
442}
443
444struct is_sign_mask {
445 bool isValue(const APInt &C) { return C.isSignMask(); }
446};
447/// Match an integer or vector with only the sign bit(s) set.
448/// For vectors, this includes constants with undefined elements.
449inline cst_pred_ty<is_sign_mask> m_SignMask() {
450 return cst_pred_ty<is_sign_mask>();
451}
452
453struct is_lowbit_mask {
454 bool isValue(const APInt &C) { return C.isMask(); }
455};
456/// Match an integer or vector with only the low bit(s) set.
457/// For vectors, this includes constants with undefined elements.
458inline cst_pred_ty<is_lowbit_mask> m_LowBitMask() {
459 return cst_pred_ty<is_lowbit_mask>();
460}
461
462struct icmp_pred_with_threshold {
463 ICmpInst::Predicate Pred;
464 const APInt *Thr;
465 bool isValue(const APInt &C) {
466 switch (Pred) {
467 case ICmpInst::Predicate::ICMP_EQ:
468 return C.eq(*Thr);
469 case ICmpInst::Predicate::ICMP_NE:
470 return C.ne(*Thr);
471 case ICmpInst::Predicate::ICMP_UGT:
472 return C.ugt(*Thr);
473 case ICmpInst::Predicate::ICMP_UGE:
474 return C.uge(*Thr);
475 case ICmpInst::Predicate::ICMP_ULT:
476 return C.ult(*Thr);
477 case ICmpInst::Predicate::ICMP_ULE:
478 return C.ule(*Thr);
479 case ICmpInst::Predicate::ICMP_SGT:
480 return C.sgt(*Thr);
481 case ICmpInst::Predicate::ICMP_SGE:
482 return C.sge(*Thr);
483 case ICmpInst::Predicate::ICMP_SLT:
484 return C.slt(*Thr);
485 case ICmpInst::Predicate::ICMP_SLE:
486 return C.sle(*Thr);
487 default:
488 llvm_unreachable("Unhandled ICmp predicate")::llvm::llvm_unreachable_internal("Unhandled ICmp predicate",
"/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/IR/PatternMatch.h"
, 488)
;
489 }
490 }
491};
492/// Match an integer or vector with every element comparing 'pred' (eg/ne/...)
493/// to Threshold. For vectors, this includes constants with undefined elements.
494inline cst_pred_ty<icmp_pred_with_threshold>
495m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold) {
496 cst_pred_ty<icmp_pred_with_threshold> P;
497 P.Pred = Predicate;
498 P.Thr = &Threshold;
499 return P;
500}
501
502struct is_nan {
503 bool isValue(const APFloat &C) { return C.isNaN(); }
504};
505/// Match an arbitrary NaN constant. This includes quiet and signalling nans.
506/// For vectors, this includes constants with undefined elements.
507inline cstfp_pred_ty<is_nan> m_NaN() {
508 return cstfp_pred_ty<is_nan>();
509}
510
511struct is_any_zero_fp {
512 bool isValue(const APFloat &C) { return C.isZero(); }
513};
514/// Match a floating-point negative zero or positive zero.
515/// For vectors, this includes constants with undefined elements.
516inline cstfp_pred_ty<is_any_zero_fp> m_AnyZeroFP() {
517 return cstfp_pred_ty<is_any_zero_fp>();
518}
519
520struct is_pos_zero_fp {
521 bool isValue(const APFloat &C) { return C.isPosZero(); }
522};
523/// Match a floating-point positive zero.
524/// For vectors, this includes constants with undefined elements.
525inline cstfp_pred_ty<is_pos_zero_fp> m_PosZeroFP() {
526 return cstfp_pred_ty<is_pos_zero_fp>();
527}
528
529struct is_neg_zero_fp {
530 bool isValue(const APFloat &C) { return C.isNegZero(); }
531};
532/// Match a floating-point negative zero.
533/// For vectors, this includes constants with undefined elements.
534inline cstfp_pred_ty<is_neg_zero_fp> m_NegZeroFP() {
535 return cstfp_pred_ty<is_neg_zero_fp>();
536}
537
538///////////////////////////////////////////////////////////////////////////////
539
540template <typename Class> struct bind_ty {
541 Class *&VR;
542
543 bind_ty(Class *&V) : VR(V) {}
544
545 template <typename ITy> bool match(ITy *V) {
546 if (auto *CV = dyn_cast<Class>(V)) {
547 VR = CV;
548 return true;
549 }
550 return false;
551 }
552};
553
554/// Match a value, capturing it if we match.
555inline bind_ty<Value> m_Value(Value *&V) { return V; }
556inline bind_ty<const Value> m_Value(const Value *&V) { return V; }
557
558/// Match an instruction, capturing it if we match.
559inline bind_ty<Instruction> m_Instruction(Instruction *&I) { return I; }
560/// Match a binary operator, capturing it if we match.
561inline bind_ty<BinaryOperator> m_BinOp(BinaryOperator *&I) { return I; }
562/// Match a with overflow intrinsic, capturing it if we match.
563inline bind_ty<WithOverflowInst> m_WithOverflowInst(WithOverflowInst *&I) { return I; }
564
565/// Match a ConstantInt, capturing the value if we match.
566inline bind_ty<ConstantInt> m_ConstantInt(ConstantInt *&CI) { return CI; }
567
568/// Match a Constant, capturing the value if we match.
569inline bind_ty<Constant> m_Constant(Constant *&C) { return C; }
570
571/// Match a ConstantFP, capturing the value if we match.
572inline bind_ty<ConstantFP> m_ConstantFP(ConstantFP *&C) { return C; }
573
574/// Match a basic block value, capturing it if we match.
575inline bind_ty<BasicBlock> m_BasicBlock(BasicBlock *&V) { return V; }
576inline bind_ty<const BasicBlock> m_BasicBlock(const BasicBlock *&V) {
577 return V;
578}
579
580/// Match a specified Value*.
581struct specificval_ty {
582 const Value *Val;
583
584 specificval_ty(const Value *V) : Val(V) {}
585
586 template <typename ITy> bool match(ITy *V) { return V == Val; }
587};
588
589/// Match if we have a specific specified value.
590inline specificval_ty m_Specific(const Value *V) { return V; }
591
592/// Stores a reference to the Value *, not the Value * itself,
593/// thus can be used in commutative matchers.
594template <typename Class> struct deferredval_ty {
595 Class *const &Val;
596
597 deferredval_ty(Class *const &V) : Val(V) {}
598
599 template <typename ITy> bool match(ITy *const V) { return V == Val; }
600};
601
602/// A commutative-friendly version of m_Specific().
603inline deferredval_ty<Value> m_Deferred(Value *const &V) { return V; }
604inline deferredval_ty<const Value> m_Deferred(const Value *const &V) {
605 return V;
606}
607
608/// Match a specified floating point value or vector of all elements of
609/// that value.
610struct specific_fpval {
611 double Val;
612
613 specific_fpval(double V) : Val(V) {}
614
615 template <typename ITy> bool match(ITy *V) {
616 if (const auto *CFP = dyn_cast<ConstantFP>(V))
617 return CFP->isExactlyValue(Val);
618 if (V->getType()->isVectorTy())
619 if (const auto *C = dyn_cast<Constant>(V))
620 if (auto *CFP = dyn_cast_or_null<ConstantFP>(C->getSplatValue()))
621 return CFP->isExactlyValue(Val);
622 return false;
623 }
624};
625
626/// Match a specific floating point value or vector with all elements
627/// equal to the value.
628inline specific_fpval m_SpecificFP(double V) { return specific_fpval(V); }
629
630/// Match a float 1.0 or vector with all elements equal to 1.0.
631inline specific_fpval m_FPOne() { return m_SpecificFP(1.0); }
632
633struct bind_const_intval_ty {
634 uint64_t &VR;
635
636 bind_const_intval_ty(uint64_t &V) : VR(V) {}
637
638 template <typename ITy> bool match(ITy *V) {
639 if (const auto *CV = dyn_cast<ConstantInt>(V))
640 if (CV->getValue().ule(UINT64_MAX(18446744073709551615UL))) {
641 VR = CV->getZExtValue();
642 return true;
643 }
644 return false;
645 }
646};
647
648/// Match a specified integer value or vector of all elements of that
649/// value.
650struct specific_intval {
651 APInt Val;
652
653 specific_intval(APInt V) : Val(std::move(V)) {}
654
655 template <typename ITy> bool match(ITy *V) {
656 const auto *CI = dyn_cast<ConstantInt>(V);
657 if (!CI && V->getType()->isVectorTy())
658 if (const auto *C = dyn_cast<Constant>(V))
659 CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue());
660
661 return CI && APInt::isSameValue(CI->getValue(), Val);
662 }
663};
664
665/// Match a specific integer value or vector with all elements equal to
666/// the value.
667inline specific_intval m_SpecificInt(APInt V) {
668 return specific_intval(std::move(V));
669}
670
671inline specific_intval m_SpecificInt(uint64_t V) {
672 return m_SpecificInt(APInt(64, V));
673}
674
675/// Match a ConstantInt and bind to its value. This does not match
676/// ConstantInts wider than 64-bits.
677inline bind_const_intval_ty m_ConstantInt(uint64_t &V) { return V; }
678
679/// Match a specified basic block value.
680struct specific_bbval {
681 BasicBlock *Val;
682
683 specific_bbval(BasicBlock *Val) : Val(Val) {}
684
685 template <typename ITy> bool match(ITy *V) {
686 const auto *BB = dyn_cast<BasicBlock>(V);
687 return BB && BB == Val;
688 }
689};
690
691/// Match a specific basic block value.
692inline specific_bbval m_SpecificBB(BasicBlock *BB) {
693 return specific_bbval(BB);
694}
695
696/// A commutative-friendly version of m_Specific().
697inline deferredval_ty<BasicBlock> m_Deferred(BasicBlock *const &BB) {
698 return BB;
699}
700inline deferredval_ty<const BasicBlock>
701m_Deferred(const BasicBlock *const &BB) {
702 return BB;
703}
704
705//===----------------------------------------------------------------------===//
706// Matcher for any binary operator.
707//
708template <typename LHS_t, typename RHS_t, bool Commutable = false>
709struct AnyBinaryOp_match {
710 LHS_t L;
711 RHS_t R;
712
713 // The evaluation order is always stable, regardless of Commutability.
714 // The LHS is always matched first.
715 AnyBinaryOp_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
716
717 template <typename OpTy> bool match(OpTy *V) {
718 if (auto *I = dyn_cast<BinaryOperator>(V))
719 return (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) ||
720 (Commutable && L.match(I->getOperand(1)) &&
721 R.match(I->getOperand(0)));
722 return false;
723 }
724};
725
726template <typename LHS, typename RHS>
727inline AnyBinaryOp_match<LHS, RHS> m_BinOp(const LHS &L, const RHS &R) {
728 return AnyBinaryOp_match<LHS, RHS>(L, R);
729}
730
731//===----------------------------------------------------------------------===//
732// Matchers for specific binary operators.
733//
734
735template <typename LHS_t, typename RHS_t, unsigned Opcode,
736 bool Commutable = false>
737struct BinaryOp_match {
738 LHS_t L;
739 RHS_t R;
740
741 // The evaluation order is always stable, regardless of Commutability.
742 // The LHS is always matched first.
743 BinaryOp_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
744
745 template <typename OpTy> bool match(OpTy *V) {
746 if (V->getValueID() == Value::InstructionVal + Opcode) {
747 auto *I = cast<BinaryOperator>(V);
748 return (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) ||
749 (Commutable && L.match(I->getOperand(1)) &&
750 R.match(I->getOperand(0)));
751 }
752 if (auto *CE = dyn_cast<ConstantExpr>(V))
753 return CE->getOpcode() == Opcode &&
754 ((L.match(CE->getOperand(0)) && R.match(CE->getOperand(1))) ||
755 (Commutable && L.match(CE->getOperand(1)) &&
756 R.match(CE->getOperand(0))));
757 return false;
758 }
759};
760
761template <typename LHS, typename RHS>
762inline BinaryOp_match<LHS, RHS, Instruction::Add> m_Add(const LHS &L,
763 const RHS &R) {
764 return BinaryOp_match<LHS, RHS, Instruction::Add>(L, R);
765}
766
767template <typename LHS, typename RHS>
768inline BinaryOp_match<LHS, RHS, Instruction::FAdd> m_FAdd(const LHS &L,
769 const RHS &R) {
770 return BinaryOp_match<LHS, RHS, Instruction::FAdd>(L, R);
771}
772
773template <typename LHS, typename RHS>
774inline BinaryOp_match<LHS, RHS, Instruction::Sub> m_Sub(const LHS &L,
775 const RHS &R) {
776 return BinaryOp_match<LHS, RHS, Instruction::Sub>(L, R);
777}
778
779template <typename LHS, typename RHS>
780inline BinaryOp_match<LHS, RHS, Instruction::FSub> m_FSub(const LHS &L,
781 const RHS &R) {
782 return BinaryOp_match<LHS, RHS, Instruction::FSub>(L, R);
783}
784
785template <typename Op_t> struct FNeg_match {
786 Op_t X;
787
788 FNeg_match(const Op_t &Op) : X(Op) {}
789 template <typename OpTy> bool match(OpTy *V) {
790 auto *FPMO = dyn_cast<FPMathOperator>(V);
791 if (!FPMO) return false;
792
793 if (FPMO->getOpcode() == Instruction::FNeg)
794 return X.match(FPMO->getOperand(0));
795
796 if (FPMO->getOpcode() == Instruction::FSub) {
797 if (FPMO->hasNoSignedZeros()) {
798 // With 'nsz', any zero goes.
799 if (!cstfp_pred_ty<is_any_zero_fp>().match(FPMO->getOperand(0)))
800 return false;
801 } else {
802 // Without 'nsz', we need fsub -0.0, X exactly.
803 if (!cstfp_pred_ty<is_neg_zero_fp>().match(FPMO->getOperand(0)))
804 return false;
805 }
806
807 return X.match(FPMO->getOperand(1));
808 }
809
810 return false;
811 }
812};
813
814/// Match 'fneg X' as 'fsub -0.0, X'.
815template <typename OpTy>
816inline FNeg_match<OpTy>
817m_FNeg(const OpTy &X) {
818 return FNeg_match<OpTy>(X);
819}
820
821/// Match 'fneg X' as 'fsub +-0.0, X'.
822template <typename RHS>
823inline BinaryOp_match<cstfp_pred_ty<is_any_zero_fp>, RHS, Instruction::FSub>
824m_FNegNSZ(const RHS &X) {
825 return m_FSub(m_AnyZeroFP(), X);
826}
827
828template <typename Op_t> struct Freeze_match {
829 Op_t X;
830
831 Freeze_match(const Op_t &Op) : X(Op) {}
832 template <typename OpTy> bool match(OpTy *V) {
833 auto *I = dyn_cast<UnaryOperator>(V);
834 if (!I) return false;
835
836 if (isa<FreezeOperator>(I))
837 return X.match(I->getOperand(0));
838
839 return false;
840 }
841};
842
843/// Matches freeze.
844template <typename OpTy>
845inline Freeze_match<OpTy>
846m_Freeze(const OpTy &X) {
847 return Freeze_match<OpTy>(X);
848}
849
850template <typename LHS, typename RHS>
851inline BinaryOp_match<LHS, RHS, Instruction::Mul> m_Mul(const LHS &L,
852 const RHS &R) {
853 return BinaryOp_match<LHS, RHS, Instruction::Mul>(L, R);
854}
855
856template <typename LHS, typename RHS>
857inline BinaryOp_match<LHS, RHS, Instruction::FMul> m_FMul(const LHS &L,
858 const RHS &R) {
859 return BinaryOp_match<LHS, RHS, Instruction::FMul>(L, R);
860}
861
862template <typename LHS, typename RHS>
863inline BinaryOp_match<LHS, RHS, Instruction::UDiv> m_UDiv(const LHS &L,
864 const RHS &R) {
865 return BinaryOp_match<LHS, RHS, Instruction::UDiv>(L, R);
866}
867
868template <typename LHS, typename RHS>
869inline BinaryOp_match<LHS, RHS, Instruction::SDiv> m_SDiv(const LHS &L,
870 const RHS &R) {
871 return BinaryOp_match<LHS, RHS, Instruction::SDiv>(L, R);
872}
873
874template <typename LHS, typename RHS>
875inline BinaryOp_match<LHS, RHS, Instruction::FDiv> m_FDiv(const LHS &L,
876 const RHS &R) {
877 return BinaryOp_match<LHS, RHS, Instruction::FDiv>(L, R);
878}
879
880template <typename LHS, typename RHS>
881inline BinaryOp_match<LHS, RHS, Instruction::URem> m_URem(const LHS &L,
882 const RHS &R) {
883 return BinaryOp_match<LHS, RHS, Instruction::URem>(L, R);
884}
885
886template <typename LHS, typename RHS>
887inline BinaryOp_match<LHS, RHS, Instruction::SRem> m_SRem(const LHS &L,
888 const RHS &R) {
889 return BinaryOp_match<LHS, RHS, Instruction::SRem>(L, R);
890}
891
892template <typename LHS, typename RHS>
893inline BinaryOp_match<LHS, RHS, Instruction::FRem> m_FRem(const LHS &L,
894 const RHS &R) {
895 return BinaryOp_match<LHS, RHS, Instruction::FRem>(L, R);
896}
897
898template <typename LHS, typename RHS>
899inline BinaryOp_match<LHS, RHS, Instruction::And> m_And(const LHS &L,
900 const RHS &R) {
901 return BinaryOp_match<LHS, RHS, Instruction::And>(L, R);
902}
903
904template <typename LHS, typename RHS>
905inline BinaryOp_match<LHS, RHS, Instruction::Or> m_Or(const LHS &L,
906 const RHS &R) {
907 return BinaryOp_match<LHS, RHS, Instruction::Or>(L, R);
908}
909
910template <typename LHS, typename RHS>
911inline BinaryOp_match<LHS, RHS, Instruction::Xor> m_Xor(const LHS &L,
912 const RHS &R) {
913 return BinaryOp_match<LHS, RHS, Instruction::Xor>(L, R);
914}
915
916template <typename LHS, typename RHS>
917inline BinaryOp_match<LHS, RHS, Instruction::Shl> m_Shl(const LHS &L,
918 const RHS &R) {
919 return BinaryOp_match<LHS, RHS, Instruction::Shl>(L, R);
920}
921
922template <typename LHS, typename RHS>
923inline BinaryOp_match<LHS, RHS, Instruction::LShr> m_LShr(const LHS &L,
924 const RHS &R) {
925 return BinaryOp_match<LHS, RHS, Instruction::LShr>(L, R);
926}
927
928template <typename LHS, typename RHS>
929inline BinaryOp_match<LHS, RHS, Instruction::AShr> m_AShr(const LHS &L,
930 const RHS &R) {
931 return BinaryOp_match<LHS, RHS, Instruction::AShr>(L, R);
932}
933
934template <typename LHS_t, typename RHS_t, unsigned Opcode,
935 unsigned WrapFlags = 0>
936struct OverflowingBinaryOp_match {
937 LHS_t L;
938 RHS_t R;
939
940 OverflowingBinaryOp_match(const LHS_t &LHS, const RHS_t &RHS)
941 : L(LHS), R(RHS) {}
942
943 template <typename OpTy> bool match(OpTy *V) {
944 if (auto *Op = dyn_cast<OverflowingBinaryOperator>(V)) {
945 if (Op->getOpcode() != Opcode)
946 return false;
947 if (WrapFlags & OverflowingBinaryOperator::NoUnsignedWrap &&
948 !Op->hasNoUnsignedWrap())
949 return false;
950 if (WrapFlags & OverflowingBinaryOperator::NoSignedWrap &&
951 !Op->hasNoSignedWrap())
952 return false;
953 return L.match(Op->getOperand(0)) && R.match(Op->getOperand(1));
954 }
955 return false;
956 }
957};
958
959template <typename LHS, typename RHS>
960inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
961 OverflowingBinaryOperator::NoSignedWrap>
962m_NSWAdd(const LHS &L, const RHS &R) {
963 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
964 OverflowingBinaryOperator::NoSignedWrap>(
965 L, R);
966}
967template <typename LHS, typename RHS>
968inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
969 OverflowingBinaryOperator::NoSignedWrap>
970m_NSWSub(const LHS &L, const RHS &R) {
971 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
972 OverflowingBinaryOperator::NoSignedWrap>(
973 L, R);
974}
975template <typename LHS, typename RHS>
976inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
977 OverflowingBinaryOperator::NoSignedWrap>
978m_NSWMul(const LHS &L, const RHS &R) {
979 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
980 OverflowingBinaryOperator::NoSignedWrap>(
981 L, R);
982}
983template <typename LHS, typename RHS>
984inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
985 OverflowingBinaryOperator::NoSignedWrap>
986m_NSWShl(const LHS &L, const RHS &R) {
987 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
988 OverflowingBinaryOperator::NoSignedWrap>(
989 L, R);
990}
991
992template <typename LHS, typename RHS>
993inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
994 OverflowingBinaryOperator::NoUnsignedWrap>
995m_NUWAdd(const LHS &L, const RHS &R) {
996 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
997 OverflowingBinaryOperator::NoUnsignedWrap>(
998 L, R);
999}
1000template <typename LHS, typename RHS>
1001inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
1002 OverflowingBinaryOperator::NoUnsignedWrap>
1003m_NUWSub(const LHS &L, const RHS &R) {
1004 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
1005 OverflowingBinaryOperator::NoUnsignedWrap>(
1006 L, R);
1007}
1008template <typename LHS, typename RHS>
1009inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
1010 OverflowingBinaryOperator::NoUnsignedWrap>
1011m_NUWMul(const LHS &L, const RHS &R) {
1012 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
1013 OverflowingBinaryOperator::NoUnsignedWrap>(
1014 L, R);
1015}
1016template <typename LHS, typename RHS>
1017inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
1018 OverflowingBinaryOperator::NoUnsignedWrap>
1019m_NUWShl(const LHS &L, const RHS &R) {
1020 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
1021 OverflowingBinaryOperator::NoUnsignedWrap>(
1022 L, R);
1023}
1024
1025//===----------------------------------------------------------------------===//
1026// Class that matches a group of binary opcodes.
1027//
1028template <typename LHS_t, typename RHS_t, typename Predicate>
1029struct BinOpPred_match : Predicate {
1030 LHS_t L;
1031 RHS_t R;
1032
1033 BinOpPred_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
1034
1035 template <typename OpTy> bool match(OpTy *V) {
1036 if (auto *I = dyn_cast<Instruction>(V))
1037 return this->isOpType(I->getOpcode()) && L.match(I->getOperand(0)) &&
1038 R.match(I->getOperand(1));
1039 if (auto *CE = dyn_cast<ConstantExpr>(V))
1040 return this->isOpType(CE->getOpcode()) && L.match(CE->getOperand(0)) &&
1041 R.match(CE->getOperand(1));
1042 return false;
1043 }
1044};
1045
1046struct is_shift_op {
1047 bool isOpType(unsigned Opcode) { return Instruction::isShift(Opcode); }
1048};
1049
1050struct is_right_shift_op {
1051 bool isOpType(unsigned Opcode) {
1052 return Opcode == Instruction::LShr || Opcode == Instruction::AShr;
1053 }
1054};
1055
1056struct is_logical_shift_op {
1057 bool isOpType(unsigned Opcode) {
1058 return Opcode == Instruction::LShr || Opcode == Instruction::Shl;
1059 }
1060};
1061
1062struct is_bitwiselogic_op {
1063 bool isOpType(unsigned Opcode) {
1064 return Instruction::isBitwiseLogicOp(Opcode);
1065 }
1066};
1067
1068struct is_idiv_op {
1069 bool isOpType(unsigned Opcode) {
1070 return Opcode == Instruction::SDiv || Opcode == Instruction::UDiv;
1071 }
1072};
1073
1074struct is_irem_op {
1075 bool isOpType(unsigned Opcode) {
1076 return Opcode == Instruction::SRem || Opcode == Instruction::URem;
1077 }
1078};
1079
1080/// Matches shift operations.
1081template <typename LHS, typename RHS>
1082inline BinOpPred_match<LHS, RHS, is_shift_op> m_Shift(const LHS &L,
1083 const RHS &R) {
1084 return BinOpPred_match<LHS, RHS, is_shift_op>(L, R);
1085}
1086
1087/// Matches logical shift operations.
1088template <typename LHS, typename RHS>
1089inline BinOpPred_match<LHS, RHS, is_right_shift_op> m_Shr(const LHS &L,
1090 const RHS &R) {
1091 return BinOpPred_match<LHS, RHS, is_right_shift_op>(L, R);
1092}
1093
1094/// Matches logical shift operations.
1095template <typename LHS, typename RHS>
1096inline BinOpPred_match<LHS, RHS, is_logical_shift_op>
1097m_LogicalShift(const LHS &L, const RHS &R) {
1098 return BinOpPred_match<LHS, RHS, is_logical_shift_op>(L, R);
1099}
1100
1101/// Matches bitwise logic operations.
1102template <typename LHS, typename RHS>
1103inline BinOpPred_match<LHS, RHS, is_bitwiselogic_op>
1104m_BitwiseLogic(const LHS &L, const RHS &R) {
1105 return BinOpPred_match<LHS, RHS, is_bitwiselogic_op>(L, R);
1106}
1107
1108/// Matches integer division operations.
1109template <typename LHS, typename RHS>
1110inline BinOpPred_match<LHS, RHS, is_idiv_op> m_IDiv(const LHS &L,
1111 const RHS &R) {
1112 return BinOpPred_match<LHS, RHS, is_idiv_op>(L, R);
1113}
1114
1115/// Matches integer remainder operations.
1116template <typename LHS, typename RHS>
1117inline BinOpPred_match<LHS, RHS, is_irem_op> m_IRem(const LHS &L,
1118 const RHS &R) {
1119 return BinOpPred_match<LHS, RHS, is_irem_op>(L, R);
1120}
1121
1122//===----------------------------------------------------------------------===//
1123// Class that matches exact binary ops.
1124//
1125template <typename SubPattern_t> struct Exact_match {
1126 SubPattern_t SubPattern;
1127
1128 Exact_match(const SubPattern_t &SP) : SubPattern(SP) {}
1129
1130 template <typename OpTy> bool match(OpTy *V) {
1131 if (auto *PEO = dyn_cast<PossiblyExactOperator>(V))
1132 return PEO->isExact() && SubPattern.match(V);
1133 return false;
1134 }
1135};
1136
1137template <typename T> inline Exact_match<T> m_Exact(const T &SubPattern) {
1138 return SubPattern;
1139}
1140
1141//===----------------------------------------------------------------------===//
1142// Matchers for CmpInst classes
1143//
1144
1145template <typename LHS_t, typename RHS_t, typename Class, typename PredicateTy,
1146 bool Commutable = false>
1147struct CmpClass_match {
1148 PredicateTy &Predicate;
1149 LHS_t L;
1150 RHS_t R;
1151
1152 // The evaluation order is always stable, regardless of Commutability.
1153 // The LHS is always matched first.
1154 CmpClass_match(PredicateTy &Pred, const LHS_t &LHS, const RHS_t &RHS)
1155 : Predicate(Pred), L(LHS), R(RHS) {}
1156
1157 template <typename OpTy> bool match(OpTy *V) {
1158 if (auto *I = dyn_cast<Class>(V))
1159 if ((L.match(I->getOperand(0)) && R.match(I->getOperand(1))) ||
1160 (Commutable && L.match(I->getOperand(1)) &&
1161 R.match(I->getOperand(0)))) {
1162 Predicate = I->getPredicate();
1163 return true;
1164 }
1165 return false;
1166 }
1167};
1168
1169template <typename LHS, typename RHS>
1170inline CmpClass_match<LHS, RHS, CmpInst, CmpInst::Predicate>
1171m_Cmp(CmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
1172 return CmpClass_match<LHS, RHS, CmpInst, CmpInst::Predicate>(Pred, L, R);
1173}
1174
1175template <typename LHS, typename RHS>
1176inline CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>
1177m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
1178 return CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>(Pred, L, R);
1179}
1180
1181template <typename LHS, typename RHS>
1182inline CmpClass_match<LHS, RHS, FCmpInst, FCmpInst::Predicate>
1183m_FCmp(FCmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
1184 return CmpClass_match<LHS, RHS, FCmpInst, FCmpInst::Predicate>(Pred, L, R);
1185}
1186
1187//===----------------------------------------------------------------------===//
1188// Matchers for instructions with a given opcode and number of operands.
1189//
1190
1191/// Matches instructions with Opcode and three operands.
1192template <typename T0, unsigned Opcode> struct OneOps_match {
1193 T0 Op1;
1194
1195 OneOps_match(const T0 &Op1) : Op1(Op1) {}
1196
1197 template <typename OpTy> bool match(OpTy *V) {
1198 if (V->getValueID() == Value::InstructionVal + Opcode) {
1199 auto *I = cast<Instruction>(V);
1200 return Op1.match(I->getOperand(0));
1201 }
1202 return false;
1203 }
1204};
1205
1206/// Matches instructions with Opcode and three operands.
1207template <typename T0, typename T1, unsigned Opcode> struct TwoOps_match {
1208 T0 Op1;
1209 T1 Op2;
1210
1211 TwoOps_match(const T0 &Op1, const T1 &Op2) : Op1(Op1), Op2(Op2) {}
1212
1213 template <typename OpTy> bool match(OpTy *V) {
1214 if (V->getValueID() == Value::InstructionVal + Opcode) {
1215 auto *I = cast<Instruction>(V);
1216 return Op1.match(I->getOperand(0)) && Op2.match(I->getOperand(1));
1217 }
1218 return false;
1219 }
1220};
1221
1222/// Matches instructions with Opcode and three operands.
1223template <typename T0, typename T1, typename T2, unsigned Opcode>
1224struct ThreeOps_match {
1225 T0 Op1;
1226 T1 Op2;
1227 T2 Op3;
1228
1229 ThreeOps_match(const T0 &Op1, const T1 &Op2, const T2 &Op3)
1230 : Op1(Op1), Op2(Op2), Op3(Op3) {}
1231
1232 template <typename OpTy> bool match(OpTy *V) {
1233 if (V->getValueID() == Value::InstructionVal + Opcode) {
1234 auto *I = cast<Instruction>(V);
1235 return Op1.match(I->getOperand(0)) && Op2.match(I->getOperand(1)) &&
1236 Op3.match(I->getOperand(2));
1237 }
1238 return false;
1239 }
1240};
1241
1242/// Matches SelectInst.
1243template <typename Cond, typename LHS, typename RHS>
1244inline ThreeOps_match<Cond, LHS, RHS, Instruction::Select>
1245m_Select(const Cond &C, const LHS &L, const RHS &R) {
1246 return ThreeOps_match<Cond, LHS, RHS, Instruction::Select>(C, L, R);
1247}
1248
1249/// This matches a select of two constants, e.g.:
1250/// m_SelectCst<-1, 0>(m_Value(V))
1251template <int64_t L, int64_t R, typename Cond>
1252inline ThreeOps_match<Cond, constantint_match<L>, constantint_match<R>,
1253 Instruction::Select>
1254m_SelectCst(const Cond &C) {
1255 return m_Select(C, m_ConstantInt<L>(), m_ConstantInt<R>());
1256}
1257
1258/// Matches InsertElementInst.
1259template <typename Val_t, typename Elt_t, typename Idx_t>
1260inline ThreeOps_match<Val_t, Elt_t, Idx_t, Instruction::InsertElement>
1261m_InsertElement(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx) {
1262 return ThreeOps_match<Val_t, Elt_t, Idx_t, Instruction::InsertElement>(
1263 Val, Elt, Idx);
1264}
1265
1266/// Matches ExtractElementInst.
1267template <typename Val_t, typename Idx_t>
1268inline TwoOps_match<Val_t, Idx_t, Instruction::ExtractElement>
1269m_ExtractElement(const Val_t &Val, const Idx_t &Idx) {
1270 return TwoOps_match<Val_t, Idx_t, Instruction::ExtractElement>(Val, Idx);
1271}
1272
1273/// Matches ShuffleVectorInst.
1274template <typename V1_t, typename V2_t, typename Mask_t>
1275inline ThreeOps_match<V1_t, V2_t, Mask_t, Instruction::ShuffleVector>
1276m_ShuffleVector(const V1_t &v1, const V2_t &v2, const Mask_t &m) {
1277 return ThreeOps_match<V1_t, V2_t, Mask_t, Instruction::ShuffleVector>(v1, v2,
1278 m);
1279}
1280
1281/// Matches LoadInst.
1282template <typename OpTy>
1283inline OneOps_match<OpTy, Instruction::Load> m_Load(const OpTy &Op) {
1284 return OneOps_match<OpTy, Instruction::Load>(Op);
1285}
1286
1287/// Matches StoreInst.
1288template <typename ValueOpTy, typename PointerOpTy>
1289inline TwoOps_match<ValueOpTy, PointerOpTy, Instruction::Store>
1290m_Store(const ValueOpTy &ValueOp, const PointerOpTy &PointerOp) {
1291 return TwoOps_match<ValueOpTy, PointerOpTy, Instruction::Store>(ValueOp,
1292 PointerOp);
1293}
1294
1295//===----------------------------------------------------------------------===//
1296// Matchers for CastInst classes
1297//
1298
1299template <typename Op_t, unsigned Opcode> struct CastClass_match {
1300 Op_t Op;
1301
1302 CastClass_match(const Op_t &OpMatch) : Op(OpMatch) {}
1303
1304 template <typename OpTy> bool match(OpTy *V) {
1305 if (auto *O = dyn_cast<Operator>(V))
1306 return O->getOpcode() == Opcode && Op.match(O->getOperand(0));
1307 return false;
1308 }
1309};
1310
1311/// Matches BitCast.
1312template <typename OpTy>
1313inline CastClass_match<OpTy, Instruction::BitCast> m_BitCast(const OpTy &Op) {
1314 return CastClass_match<OpTy, Instruction::BitCast>(Op);
1315}
1316
1317/// Matches PtrToInt.
1318template <typename OpTy>
1319inline CastClass_match<OpTy, Instruction::PtrToInt> m_PtrToInt(const OpTy &Op) {
1320 return CastClass_match<OpTy, Instruction::PtrToInt>(Op);
1321}
1322
1323/// Matches Trunc.
1324template <typename OpTy>
1325inline CastClass_match<OpTy, Instruction::Trunc> m_Trunc(const OpTy &Op) {
1326 return CastClass_match<OpTy, Instruction::Trunc>(Op);
1327}
1328
1329template <typename OpTy>
1330inline match_combine_or<CastClass_match<OpTy, Instruction::Trunc>, OpTy>
1331m_TruncOrSelf(const OpTy &Op) {
1332 return m_CombineOr(m_Trunc(Op), Op);
1333}
1334
1335/// Matches SExt.
1336template <typename OpTy>
1337inline CastClass_match<OpTy, Instruction::SExt> m_SExt(const OpTy &Op) {
1338 return CastClass_match<OpTy, Instruction::SExt>(Op);
1339}
1340
1341/// Matches ZExt.
1342template <typename OpTy>
1343inline CastClass_match<OpTy, Instruction::ZExt> m_ZExt(const OpTy &Op) {
1344 return CastClass_match<OpTy, Instruction::ZExt>(Op);
1345}
1346
1347template <typename OpTy>
1348inline match_combine_or<CastClass_match<OpTy, Instruction::ZExt>, OpTy>
1349m_ZExtOrSelf(const OpTy &Op) {
1350 return m_CombineOr(m_ZExt(Op), Op);
1351}
1352
1353template <typename OpTy>
1354inline match_combine_or<CastClass_match<OpTy, Instruction::SExt>, OpTy>
1355m_SExtOrSelf(const OpTy &Op) {
1356 return m_CombineOr(m_SExt(Op), Op);
1357}
1358
1359template <typename OpTy>
1360inline match_combine_or<CastClass_match<OpTy, Instruction::ZExt>,
1361 CastClass_match<OpTy, Instruction::SExt>>
1362m_ZExtOrSExt(const OpTy &Op) {
1363 return m_CombineOr(m_ZExt(Op), m_SExt(Op));
1364}
1365
1366template <typename OpTy>
1367inline match_combine_or<
1368 match_combine_or<CastClass_match<OpTy, Instruction::ZExt>,
1369 CastClass_match<OpTy, Instruction::SExt>>,
1370 OpTy>
1371m_ZExtOrSExtOrSelf(const OpTy &Op) {
1372 return m_CombineOr(m_ZExtOrSExt(Op), Op);
1373}
1374
1375/// Matches UIToFP.
1376template <typename OpTy>
1377inline CastClass_match<OpTy, Instruction::UIToFP> m_UIToFP(const OpTy &Op) {
1378 return CastClass_match<OpTy, Instruction::UIToFP>(Op);
1379}
1380
1381/// Matches SIToFP.
1382template <typename OpTy>
1383inline CastClass_match<OpTy, Instruction::SIToFP> m_SIToFP(const OpTy &Op) {
1384 return CastClass_match<OpTy, Instruction::SIToFP>(Op);
1385}
1386
1387/// Matches FPTrunc
1388template <typename OpTy>
1389inline CastClass_match<OpTy, Instruction::FPTrunc> m_FPTrunc(const OpTy &Op) {
1390 return CastClass_match<OpTy, Instruction::FPTrunc>(Op);
1391}
1392
1393/// Matches FPExt
1394template <typename OpTy>
1395inline CastClass_match<OpTy, Instruction::FPExt> m_FPExt(const OpTy &Op) {
1396 return CastClass_match<OpTy, Instruction::FPExt>(Op);
1397}
1398
1399//===----------------------------------------------------------------------===//
1400// Matchers for control flow.
1401//
1402
1403struct br_match {
1404 BasicBlock *&Succ;
1405
1406 br_match(BasicBlock *&Succ) : Succ(Succ) {}
1407
1408 template <typename OpTy> bool match(OpTy *V) {
1409 if (auto *BI = dyn_cast<BranchInst>(V))
1410 if (BI->isUnconditional()) {
1411 Succ = BI->getSuccessor(0);
1412 return true;
1413 }
1414 return false;
1415 }
1416};
1417
1418inline br_match m_UnconditionalBr(BasicBlock *&Succ) { return br_match(Succ); }
1419
1420template <typename Cond_t, typename TrueBlock_t, typename FalseBlock_t>
1421struct brc_match {
1422 Cond_t Cond;
1423 TrueBlock_t T;
1424 FalseBlock_t F;
1425
1426 brc_match(const Cond_t &C, const TrueBlock_t &t, const FalseBlock_t &f)
1427 : Cond(C), T(t), F(f) {}
1428
1429 template <typename OpTy> bool match(OpTy *V) {
1430 if (auto *BI = dyn_cast<BranchInst>(V))
1431 if (BI->isConditional() && Cond.match(BI->getCondition()))
1432 return T.match(BI->getSuccessor(0)) && F.match(BI->getSuccessor(1));
1433 return false;
1434 }
1435};
1436
1437template <typename Cond_t>
1438inline brc_match<Cond_t, bind_ty<BasicBlock>, bind_ty<BasicBlock>>
1439m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F) {
1440 return brc_match<Cond_t, bind_ty<BasicBlock>, bind_ty<BasicBlock>>(
1441 C, m_BasicBlock(T), m_BasicBlock(F));
1442}
1443
1444template <typename Cond_t, typename TrueBlock_t, typename FalseBlock_t>
1445inline brc_match<Cond_t, TrueBlock_t, FalseBlock_t>
1446m_Br(const Cond_t &C, const TrueBlock_t &T, const FalseBlock_t &F) {
1447 return brc_match<Cond_t, TrueBlock_t, FalseBlock_t>(C, T, F);
1448}
1449
1450//===----------------------------------------------------------------------===//
1451// Matchers for max/min idioms, eg: "select (sgt x, y), x, y" -> smax(x,y).
1452//
1453
1454template <typename CmpInst_t, typename LHS_t, typename RHS_t, typename Pred_t,
1455 bool Commutable = false>
1456struct MaxMin_match {
1457 LHS_t L;
1458 RHS_t R;
1459
1460 // The evaluation order is always stable, regardless of Commutability.
1461 // The LHS is always matched first.
1462 MaxMin_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
1463
1464 template <typename OpTy> bool match(OpTy *V) {
1465 // Look for "(x pred y) ? x : y" or "(x pred y) ? y : x".
1466 auto *SI = dyn_cast<SelectInst>(V);
1467 if (!SI)
1468 return false;
1469 auto *Cmp = dyn_cast<CmpInst_t>(SI->getCondition());
1470 if (!Cmp)
1471 return false;
1472 // At this point we have a select conditioned on a comparison. Check that
1473 // it is the values returned by the select that are being compared.
1474 Value *TrueVal = SI->getTrueValue();
1475 Value *FalseVal = SI->getFalseValue();
1476 Value *LHS = Cmp->getOperand(0);
1477 Value *RHS = Cmp->getOperand(1);
1478 if ((TrueVal != LHS || FalseVal != RHS) &&
1479 (TrueVal != RHS || FalseVal != LHS))
1480 return false;
1481 typename CmpInst_t::Predicate Pred =
1482 LHS == TrueVal ? Cmp->getPredicate() : Cmp->getInversePredicate();
1483 // Does "(x pred y) ? x : y" represent the desired max/min operation?
1484 if (!Pred_t::match(Pred))
1485 return false;
1486 // It does! Bind the operands.
1487 return (L.match(LHS) && R.match(RHS)) ||
1488 (Commutable && L.match(RHS) && R.match(LHS));
1489 }
1490};
1491
1492/// Helper class for identifying signed max predicates.
1493struct smax_pred_ty {
1494 static bool match(ICmpInst::Predicate Pred) {
1495 return Pred == CmpInst::ICMP_SGT || Pred == CmpInst::ICMP_SGE;
1496 }
1497};
1498
1499/// Helper class for identifying signed min predicates.
1500struct smin_pred_ty {
1501 static bool match(ICmpInst::Predicate Pred) {
1502 return Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_SLE;
1503 }
1504};
1505
1506/// Helper class for identifying unsigned max predicates.
1507struct umax_pred_ty {
1508 static bool match(ICmpInst::Predicate Pred) {
1509 return Pred == CmpInst::ICMP_UGT || Pred == CmpInst::ICMP_UGE;
1510 }
1511};
1512
1513/// Helper class for identifying unsigned min predicates.
1514struct umin_pred_ty {
1515 static bool match(ICmpInst::Predicate Pred) {
1516 return Pred == CmpInst::ICMP_ULT || Pred == CmpInst::ICMP_ULE;
1517 }
1518};
1519
1520/// Helper class for identifying ordered max predicates.
1521struct ofmax_pred_ty {
1522 static bool match(FCmpInst::Predicate Pred) {
1523 return Pred == CmpInst::FCMP_OGT || Pred == CmpInst::FCMP_OGE;
1524 }
1525};
1526
1527/// Helper class for identifying ordered min predicates.
1528struct ofmin_pred_ty {
1529 static bool match(FCmpInst::Predicate Pred) {
1530 return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE;
1531 }
1532};
1533
1534/// Helper class for identifying unordered max predicates.
1535struct ufmax_pred_ty {
1536 static bool match(FCmpInst::Predicate Pred) {
1537 return Pred == CmpInst::FCMP_UGT || Pred == CmpInst::FCMP_UGE;
1538 }
1539};
1540
1541/// Helper class for identifying unordered min predicates.
1542struct ufmin_pred_ty {
1543 static bool match(FCmpInst::Predicate Pred) {
1544 return Pred == CmpInst::FCMP_ULT || Pred == CmpInst::FCMP_ULE;
1545 }
1546};
1547
1548template <typename LHS, typename RHS>
1549inline MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty> m_SMax(const LHS &L,
1550 const RHS &R) {
1551 return MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty>(L, R);
1552}
1553
1554template <typename LHS, typename RHS>
1555inline MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty> m_SMin(const LHS &L,
1556 const RHS &R) {
1557 return MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty>(L, R);
1558}
1559
1560template <typename LHS, typename RHS>
1561inline MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty> m_UMax(const LHS &L,
1562 const RHS &R) {
1563 return MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty>(L, R);
1564}
1565
1566template <typename LHS, typename RHS>
1567inline MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty> m_UMin(const LHS &L,
1568 const RHS &R) {
1569 return MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty>(L, R);
1570}
1571
1572/// Match an 'ordered' floating point maximum function.
1573/// Floating point has one special value 'NaN'. Therefore, there is no total
1574/// order. However, if we can ignore the 'NaN' value (for example, because of a
1575/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'maximum'
1576/// semantics. In the presence of 'NaN' we have to preserve the original
1577/// select(fcmp(ogt/ge, L, R), L, R) semantics matched by this predicate.
1578///
1579/// max(L, R) iff L and R are not NaN
1580/// m_OrdFMax(L, R) = R iff L or R are NaN
1581template <typename LHS, typename RHS>
1582inline MaxMin_match<FCmpInst, LHS, RHS, ofmax_pred_ty> m_OrdFMax(const LHS &L,
1583 const RHS &R) {
1584 return MaxMin_match<FCmpInst, LHS, RHS, ofmax_pred_ty>(L, R);
1585}
1586
1587/// Match an 'ordered' floating point minimum function.
1588/// Floating point has one special value 'NaN'. Therefore, there is no total
1589/// order. However, if we can ignore the 'NaN' value (for example, because of a
1590/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'minimum'
1591/// semantics. In the presence of 'NaN' we have to preserve the original
1592/// select(fcmp(olt/le, L, R), L, R) semantics matched by this predicate.
1593///
1594/// min(L, R) iff L and R are not NaN
1595/// m_OrdFMin(L, R) = R iff L or R are NaN
1596template <typename LHS, typename RHS>
1597inline MaxMin_match<FCmpInst, LHS, RHS, ofmin_pred_ty> m_OrdFMin(const LHS &L,
1598 const RHS &R) {
1599 return MaxMin_match<FCmpInst, LHS, RHS, ofmin_pred_ty>(L, R);
1600}
1601
1602/// Match an 'unordered' floating point maximum function.
1603/// Floating point has one special value 'NaN'. Therefore, there is no total
1604/// order. However, if we can ignore the 'NaN' value (for example, because of a
1605/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'maximum'
1606/// semantics. In the presence of 'NaN' we have to preserve the original
1607/// select(fcmp(ugt/ge, L, R), L, R) semantics matched by this predicate.
1608///
1609/// max(L, R) iff L and R are not NaN
1610/// m_UnordFMax(L, R) = L iff L or R are NaN
1611template <typename LHS, typename RHS>
1612inline MaxMin_match<FCmpInst, LHS, RHS, ufmax_pred_ty>
1613m_UnordFMax(const LHS &L, const RHS &R) {
1614 return MaxMin_match<FCmpInst, LHS, RHS, ufmax_pred_ty>(L, R);
1615}
1616
1617/// Match an 'unordered' floating point minimum function.
1618/// Floating point has one special value 'NaN'. Therefore, there is no total
1619/// order. However, if we can ignore the 'NaN' value (for example, because of a
1620/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'minimum'
1621/// semantics. In the presence of 'NaN' we have to preserve the original
1622/// select(fcmp(ult/le, L, R), L, R) semantics matched by this predicate.
1623///
1624/// min(L, R) iff L and R are not NaN
1625/// m_UnordFMin(L, R) = L iff L or R are NaN
1626template <typename LHS, typename RHS>
1627inline MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>
1628m_UnordFMin(const LHS &L, const RHS &R) {
1629 return MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>(L, R);
1630}
1631
1632//===----------------------------------------------------------------------===//
1633// Matchers for overflow check patterns: e.g. (a + b) u< a
1634//
1635
1636template <typename LHS_t, typename RHS_t, typename Sum_t>
1637struct UAddWithOverflow_match {
1638 LHS_t L;
1639 RHS_t R;
1640 Sum_t S;
1641
1642 UAddWithOverflow_match(const LHS_t &L, const RHS_t &R, const Sum_t &S)
1643 : L(L), R(R), S(S) {}
1644
1645 template <typename OpTy> bool match(OpTy *V) {
1646 Value *ICmpLHS, *ICmpRHS;
1647 ICmpInst::Predicate Pred;
1648 if (!m_ICmp(Pred, m_Value(ICmpLHS), m_Value(ICmpRHS)).match(V))
1649 return false;
1650
1651 Value *AddLHS, *AddRHS;
1652 auto AddExpr = m_Add(m_Value(AddLHS), m_Value(AddRHS));
1653
1654 // (a + b) u< a, (a + b) u< b
1655 if (Pred == ICmpInst::ICMP_ULT)
1656 if (AddExpr.match(ICmpLHS) && (ICmpRHS == AddLHS || ICmpRHS == AddRHS))
1657 return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpLHS);
1658
1659 // a >u (a + b), b >u (a + b)
1660 if (Pred == ICmpInst::ICMP_UGT)
1661 if (AddExpr.match(ICmpRHS) && (ICmpLHS == AddLHS || ICmpLHS == AddRHS))
1662 return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpRHS);
1663
1664 // Match special-case for increment-by-1.
1665 if (Pred == ICmpInst::ICMP_EQ) {
1666 // (a + 1) == 0
1667 // (1 + a) == 0
1668 if (AddExpr.match(ICmpLHS) && m_ZeroInt().match(ICmpRHS) &&
1669 (m_One().match(AddLHS) || m_One().match(AddRHS)))
1670 return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpLHS);
1671 // 0 == (a + 1)
1672 // 0 == (1 + a)
1673 if (m_ZeroInt().match(ICmpLHS) && AddExpr.match(ICmpRHS) &&
1674 (m_One().match(AddLHS) || m_One().match(AddRHS)))
1675 return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpRHS);
1676 }
1677
1678 return false;
1679 }
1680};
1681
1682/// Match an icmp instruction checking for unsigned overflow on addition.
1683///
1684/// S is matched to the addition whose result is being checked for overflow, and
1685/// L and R are matched to the LHS and RHS of S.
1686template <typename LHS_t, typename RHS_t, typename Sum_t>
1687UAddWithOverflow_match<LHS_t, RHS_t, Sum_t>
1688m_UAddWithOverflow(const LHS_t &L, const RHS_t &R, const Sum_t &S) {
1689 return UAddWithOverflow_match<LHS_t, RHS_t, Sum_t>(L, R, S);
1690}
1691
1692template <typename Opnd_t> struct Argument_match {
1693 unsigned OpI;
1694 Opnd_t Val;
1695
1696 Argument_match(unsigned OpIdx, const Opnd_t &V) : OpI(OpIdx), Val(V) {}
1697
1698 template <typename OpTy> bool match(OpTy *V) {
1699 // FIXME: Should likely be switched to use `CallBase`.
1700 if (const auto *CI = dyn_cast<CallInst>(V))
1701 return Val.match(CI->getArgOperand(OpI));
1702 return false;
1703 }
1704};
1705
1706/// Match an argument.
1707template <unsigned OpI, typename Opnd_t>
1708inline Argument_match<Opnd_t> m_Argument(const Opnd_t &Op) {
1709 return Argument_match<Opnd_t>(OpI, Op);
1710}
1711
1712/// Intrinsic matchers.
1713struct IntrinsicID_match {
1714 unsigned ID;
1715
1716 IntrinsicID_match(Intrinsic::ID IntrID) : ID(IntrID) {}
1717
1718 template <typename OpTy> bool match(OpTy *V) {
1719 if (const auto *CI
34.1
'CI' is null
43.1
'CI' is null
34.1
'CI' is null
43.1
'CI' is null
34.1
'CI' is null
43.1
'CI' is null
34.1
'CI' is null
43.1
'CI' is null
= dyn_cast<CallInst>(V))
34
Assuming 'V' is not a 'CallInst'
35
Taking false branch
43
'V' is not a 'CallInst'
44
Taking false branch
1720 if (const auto *F = CI->getCalledFunction())
1721 return F->getIntrinsicID() == ID;
1722 return false;
36
Returning zero, which participates in a condition later
45
Returning zero, which participates in a condition later
1723 }
1724};
1725
1726/// Intrinsic matches are combinations of ID matchers, and argument
1727/// matchers. Higher arity matcher are defined recursively in terms of and-ing
1728/// them with lower arity matchers. Here's some convenient typedefs for up to
1729/// several arguments, and more can be added as needed
1730template <typename T0 = void, typename T1 = void, typename T2 = void,
1731 typename T3 = void, typename T4 = void, typename T5 = void,
1732 typename T6 = void, typename T7 = void, typename T8 = void,
1733 typename T9 = void, typename T10 = void>
1734struct m_Intrinsic_Ty;
1735template <typename T0> struct m_Intrinsic_Ty<T0> {
1736 using Ty = match_combine_and<IntrinsicID_match, Argument_match<T0>>;
1737};
1738template <typename T0, typename T1> struct m_Intrinsic_Ty<T0, T1> {
1739 using Ty =
1740 match_combine_and<typename m_Intrinsic_Ty<T0>::Ty, Argument_match<T1>>;
1741};
1742template <typename T0, typename T1, typename T2>
1743struct m_Intrinsic_Ty<T0, T1, T2> {
1744 using Ty =
1745 match_combine_and<typename m_Intrinsic_Ty<T0, T1>::Ty,
1746 Argument_match<T2>>;
1747};
1748template <typename T0, typename T1, typename T2, typename T3>
1749struct m_Intrinsic_Ty<T0, T1, T2, T3> {
1750 using Ty =
1751 match_combine_and<typename m_Intrinsic_Ty<T0, T1, T2>::Ty,
1752 Argument_match<T3>>;
1753};
1754
1755/// Match intrinsic calls like this:
1756/// m_Intrinsic<Intrinsic::fabs>(m_Value(X))
1757template <Intrinsic::ID IntrID> inline IntrinsicID_match m_Intrinsic() {
1758 return IntrinsicID_match(IntrID);
1759}
1760
1761template <Intrinsic::ID IntrID, typename T0>
1762inline typename m_Intrinsic_Ty<T0>::Ty m_Intrinsic(const T0 &Op0) {
1763 return m_CombineAnd(m_Intrinsic<IntrID>(), m_Argument<0>(Op0));
1764}
1765
1766template <Intrinsic::ID IntrID, typename T0, typename T1>
1767inline typename m_Intrinsic_Ty<T0, T1>::Ty m_Intrinsic(const T0 &Op0,
1768 const T1 &Op1) {
1769 return m_CombineAnd(m_Intrinsic<IntrID>(Op0), m_Argument<1>(Op1));
1770}
1771
1772template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2>
1773inline typename m_Intrinsic_Ty<T0, T1, T2>::Ty
1774m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2) {
1775 return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1), m_Argument<2>(Op2));
1776}
1777
1778template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2,
1779 typename T3>
1780inline typename m_Intrinsic_Ty<T0, T1, T2, T3>::Ty
1781m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2, const T3 &Op3) {
1782 return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1, Op2), m_Argument<3>(Op3));
1783}
1784
1785// Helper intrinsic matching specializations.
1786template <typename Opnd0>
1787inline typename m_Intrinsic_Ty<Opnd0>::Ty m_BitReverse(const Opnd0 &Op0) {
1788 return m_Intrinsic<Intrinsic::bitreverse>(Op0);
1789}
1790
1791template <typename Opnd0>
1792inline typename m_Intrinsic_Ty<Opnd0>::Ty m_BSwap(const Opnd0 &Op0) {
1793 return m_Intrinsic<Intrinsic::bswap>(Op0);
1794}
1795
1796template <typename Opnd0>
1797inline typename m_Intrinsic_Ty<Opnd0>::Ty m_FAbs(const Opnd0 &Op0) {
1798 return m_Intrinsic<Intrinsic::fabs>(Op0);
1799}
1800
1801template <typename Opnd0>
1802inline typename m_Intrinsic_Ty<Opnd0>::Ty m_FCanonicalize(const Opnd0 &Op0) {
1803 return m_Intrinsic<Intrinsic::canonicalize>(Op0);
1804}
1805
1806template <typename Opnd0, typename Opnd1>
1807inline typename m_Intrinsic_Ty<Opnd0, Opnd1>::Ty m_FMin(const Opnd0 &Op0,
1808 const Opnd1 &Op1) {
1809 return m_Intrinsic<Intrinsic::minnum>(Op0, Op1);
1810}
1811
1812template <typename Opnd0, typename Opnd1>
1813inline typename m_Intrinsic_Ty<Opnd0, Opnd1>::Ty m_FMax(const Opnd0 &Op0,
1814 const Opnd1 &Op1) {
1815 return m_Intrinsic<Intrinsic::maxnum>(Op0, Op1);
1816}
1817
1818//===----------------------------------------------------------------------===//
1819// Matchers for two-operands operators with the operators in either order
1820//
1821
1822/// Matches a BinaryOperator with LHS and RHS in either order.
1823template <typename LHS, typename RHS>
1824inline AnyBinaryOp_match<LHS, RHS, true> m_c_BinOp(const LHS &L, const RHS &R) {
1825 return AnyBinaryOp_match<LHS, RHS, true>(L, R);
1826}
1827
1828/// Matches an ICmp with a predicate over LHS and RHS in either order.
1829/// Does not swap the predicate.
1830template <typename LHS, typename RHS>
1831inline CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate, true>
1832m_c_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
1833 return CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate, true>(Pred, L,
1834 R);
1835}
1836
1837/// Matches a Add with LHS and RHS in either order.
1838template <typename LHS, typename RHS>
1839inline BinaryOp_match<LHS, RHS, Instruction::Add, true> m_c_Add(const LHS &L,
1840 const RHS &R) {
1841 return BinaryOp_match<LHS, RHS, Instruction::Add, true>(L, R);
1842}
1843
1844/// Matches a Mul with LHS and RHS in either order.
1845template <typename LHS, typename RHS>
1846inline BinaryOp_match<LHS, RHS, Instruction::Mul, true> m_c_Mul(const LHS &L,
1847 const RHS &R) {
1848 return BinaryOp_match<LHS, RHS, Instruction::Mul, true>(L, R);
1849}
1850
1851/// Matches an And with LHS and RHS in either order.
1852template <typename LHS, typename RHS>
1853inline BinaryOp_match<LHS, RHS, Instruction::And, true> m_c_And(const LHS &L,
1854 const RHS &R) {
1855 return BinaryOp_match<LHS, RHS, Instruction::And, true>(L, R);
1856}
1857
1858/// Matches an Or with LHS and RHS in either order.
1859template <typename LHS, typename RHS>
1860inline BinaryOp_match<LHS, RHS, Instruction::Or, true> m_c_Or(const LHS &L,
1861 const RHS &R) {
1862 return BinaryOp_match<LHS, RHS, Instruction::Or, true>(L, R);
1863}
1864
1865/// Matches an Xor with LHS and RHS in either order.
1866template <typename LHS, typename RHS>
1867inline BinaryOp_match<LHS, RHS, Instruction::Xor, true> m_c_Xor(const LHS &L,
1868 const RHS &R) {
1869 return BinaryOp_match<LHS, RHS, Instruction::Xor, true>(L, R);
1870}
1871
1872/// Matches a 'Neg' as 'sub 0, V'.
1873template <typename ValTy>
1874inline BinaryOp_match<cst_pred_ty<is_zero_int>, ValTy, Instruction::Sub>
1875m_Neg(const ValTy &V) {
1876 return m_Sub(m_ZeroInt(), V);
1877}
1878
1879/// Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
1880template <typename ValTy>
1881inline BinaryOp_match<ValTy, cst_pred_ty<is_all_ones>, Instruction::Xor, true>
1882m_Not(const ValTy &V) {
1883 return m_c_Xor(V, m_AllOnes());
1884}
1885
1886/// Matches an SMin with LHS and RHS in either order.
1887template <typename LHS, typename RHS>
1888inline MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty, true>
1889m_c_SMin(const LHS &L, const RHS &R) {
1890 return MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty, true>(L, R);
1891}
1892/// Matches an SMax with LHS and RHS in either order.
1893template <typename LHS, typename RHS>
1894inline MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty, true>
1895m_c_SMax(const LHS &L, const RHS &R) {
1896 return MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty, true>(L, R);
1897}
1898/// Matches a UMin with LHS and RHS in either order.
1899template <typename LHS, typename RHS>
1900inline MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty, true>
1901m_c_UMin(const LHS &L, const RHS &R) {
1902 return MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty, true>(L, R);
1903}
1904/// Matches a UMax with LHS and RHS in either order.
1905template <typename LHS, typename RHS>
1906inline MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty, true>
1907m_c_UMax(const LHS &L, const RHS &R) {
1908 return MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty, true>(L, R);
1909}
1910
1911/// Matches FAdd with LHS and RHS in either order.
1912template <typename LHS, typename RHS>
1913inline BinaryOp_match<LHS, RHS, Instruction::FAdd, true>
1914m_c_FAdd(const LHS &L, const RHS &R) {
1915 return BinaryOp_match<LHS, RHS, Instruction::FAdd, true>(L, R);
1916}
1917
1918/// Matches FMul with LHS and RHS in either order.
1919template <typename LHS, typename RHS>
1920inline BinaryOp_match<LHS, RHS, Instruction::FMul, true>
1921m_c_FMul(const LHS &L, const RHS &R) {
1922 return BinaryOp_match<LHS, RHS, Instruction::FMul, true>(L, R);
1923}
1924
1925template <typename Opnd_t> struct Signum_match {
1926 Opnd_t Val;
1927 Signum_match(const Opnd_t &V) : Val(V) {}
1928
1929 template <typename OpTy> bool match(OpTy *V) {
1930 unsigned TypeSize = V->getType()->getScalarSizeInBits();
1931 if (TypeSize == 0)
1932 return false;
1933
1934 unsigned ShiftWidth = TypeSize - 1;
1935 Value *OpL = nullptr, *OpR = nullptr;
1936
1937 // This is the representation of signum we match:
1938 //
1939 // signum(x) == (x >> 63) | (-x >>u 63)
1940 //
1941 // An i1 value is its own signum, so it's correct to match
1942 //
1943 // signum(x) == (x >> 0) | (-x >>u 0)
1944 //
1945 // for i1 values.
1946
1947 auto LHS = m_AShr(m_Value(OpL), m_SpecificInt(ShiftWidth));
1948 auto RHS = m_LShr(m_Neg(m_Value(OpR)), m_SpecificInt(ShiftWidth));
1949 auto Signum = m_Or(LHS, RHS);
1950
1951 return Signum.match(V) && OpL == OpR && Val.match(OpL);
1952 }
1953};
1954
1955/// Matches a signum pattern.
1956///
1957/// signum(x) =
1958/// x > 0 -> 1
1959/// x == 0 -> 0
1960/// x < 0 -> -1
1961template <typename Val_t> inline Signum_match<Val_t> m_Signum(const Val_t &V) {
1962 return Signum_match<Val_t>(V);
1963}
1964
1965template <int Ind, typename Opnd_t> struct ExtractValue_match {
1966 Opnd_t Val;
1967 ExtractValue_match(const Opnd_t &V) : Val(V) {}
1968
1969 template <typename OpTy> bool match(OpTy *V) {
1970 if (auto *I = dyn_cast<ExtractValueInst>(V))
1971 return I->getNumIndices() == 1 && I->getIndices()[0] == Ind &&
1972 Val.match(I->getAggregateOperand());
1973 return false;
1974 }
1975};
1976
1977/// Match a single index ExtractValue instruction.
1978/// For example m_ExtractValue<1>(...)
1979template <int Ind, typename Val_t>
1980inline ExtractValue_match<Ind, Val_t> m_ExtractValue(const Val_t &V) {
1981 return ExtractValue_match<Ind, Val_t>(V);
1982}
1983
1984} // end namespace PatternMatch
1985} // end namespace llvm
1986
1987#endif // LLVM_IR_PATTERNMATCH_H

/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include/llvm/Analysis/AliasAnalysis.h

1//===- llvm/Analysis/AliasAnalysis.h - Alias Analysis Interface -*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the generic AliasAnalysis interface, which is used as the
10// common interface used by all clients of alias analysis information, and
11// implemented by all alias analysis implementations. Mod/Ref information is
12// also captured by this interface.
13//
14// Implementations of this interface must implement the various virtual methods,
15// which automatically provides functionality for the entire suite of client
16// APIs.
17//
18// This API identifies memory regions with the MemoryLocation class. The pointer
19// component specifies the base memory address of the region. The Size specifies
20// the maximum size (in address units) of the memory region, or
21// MemoryLocation::UnknownSize if the size is not known. The TBAA tag
22// identifies the "type" of the memory reference; see the
23// TypeBasedAliasAnalysis class for details.
24//
25// Some non-obvious details include:
26// - Pointers that point to two completely different objects in memory never
27// alias, regardless of the value of the Size component.
28// - NoAlias doesn't imply inequal pointers. The most obvious example of this
29// is two pointers to constant memory. Even if they are equal, constant
30// memory is never stored to, so there will never be any dependencies.
31// In this and other situations, the pointers may be both NoAlias and
32// MustAlias at the same time. The current API can only return one result,
33// though this is rarely a problem in practice.
34//
35//===----------------------------------------------------------------------===//
36
37#ifndef LLVM_ANALYSIS_ALIASANALYSIS_H
38#define LLVM_ANALYSIS_ALIASANALYSIS_H
39
40#include "llvm/ADT/DenseMap.h"
41#include "llvm/ADT/None.h"
42#include "llvm/ADT/Optional.h"
43#include "llvm/ADT/SmallVector.h"
44#include "llvm/Analysis/MemoryLocation.h"
45#include "llvm/Analysis/TargetLibraryInfo.h"
46#include "llvm/IR/Function.h"
47#include "llvm/IR/Instruction.h"
48#include "llvm/IR/Instructions.h"
49#include "llvm/IR/PassManager.h"
50#include "llvm/Pass.h"
51#include <cstdint>
52#include <functional>
53#include <memory>
54#include <vector>
55
56namespace llvm {
57
58class AnalysisUsage;
59class BasicAAResult;
60class BasicBlock;
61class DominatorTree;
62class OrderedBasicBlock;
63class Value;
64
65/// The possible results of an alias query.
66///
67/// These results are always computed between two MemoryLocation objects as
68/// a query to some alias analysis.
69///
70/// Note that these are unscoped enumerations because we would like to support
71/// implicitly testing a result for the existence of any possible aliasing with
72/// a conversion to bool, but an "enum class" doesn't support this. The
73/// canonical names from the literature are suffixed and unique anyways, and so
74/// they serve as global constants in LLVM for these results.
75///
76/// See docs/AliasAnalysis.html for more information on the specific meanings
77/// of these values.
78enum AliasResult : uint8_t {
79 /// The two locations do not alias at all.
80 ///
81 /// This value is arranged to convert to false, while all other values
82 /// convert to true. This allows a boolean context to convert the result to
83 /// a binary flag indicating whether there is the possibility of aliasing.
84 NoAlias = 0,
85 /// The two locations may or may not alias. This is the least precise result.
86 MayAlias,
87 /// The two locations alias, but only due to a partial overlap.
88 PartialAlias,
89 /// The two locations precisely alias each other.
90 MustAlias,
91};
92
93/// << operator for AliasResult.
94raw_ostream &operator<<(raw_ostream &OS, AliasResult AR);
95
96/// Flags indicating whether a memory access modifies or references memory.
97///
98/// This is no access at all, a modification, a reference, or both
99/// a modification and a reference. These are specifically structured such that
100/// they form a three bit matrix and bit-tests for 'mod' or 'ref' or 'must'
101/// work with any of the possible values.
102enum class ModRefInfo : uint8_t {
103 /// Must is provided for completeness, but no routines will return only
104 /// Must today. See definition of Must below.
105 Must = 0,
106 /// The access may reference the value stored in memory,
107 /// a mustAlias relation was found, and no mayAlias or partialAlias found.
108 MustRef = 1,
109 /// The access may modify the value stored in memory,
110 /// a mustAlias relation was found, and no mayAlias or partialAlias found.
111 MustMod = 2,
112 /// The access may reference, modify or both the value stored in memory,
113 /// a mustAlias relation was found, and no mayAlias or partialAlias found.
114 MustModRef = MustRef | MustMod,
115 /// The access neither references nor modifies the value stored in memory.
116 NoModRef = 4,
117 /// The access may reference the value stored in memory.
118 Ref = NoModRef | MustRef,
119 /// The access may modify the value stored in memory.
120 Mod = NoModRef | MustMod,
121 /// The access may reference and may modify the value stored in memory.
122 ModRef = Ref | Mod,
123
124 /// About Must:
125 /// Must is set in a best effort manner.
126 /// We usually do not try our best to infer Must, instead it is merely
127 /// another piece of "free" information that is presented when available.
128 /// Must set means there was certainly a MustAlias found. For calls,
129 /// where multiple arguments are checked (argmemonly), this translates to
130 /// only MustAlias or NoAlias was found.
131 /// Must is not set for RAR accesses, even if the two locations must
132 /// alias. The reason is that two read accesses translate to an early return
133 /// of NoModRef. An additional alias check to set Must may be
134 /// expensive. Other cases may also not set Must(e.g. callCapturesBefore).
135 /// We refer to Must being *set* when the most significant bit is *cleared*.
136 /// Conversely we *clear* Must information by *setting* the Must bit to 1.
137};
138
139LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isNoModRef(const ModRefInfo MRI) {
140 return (static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustModRef)) ==
141 static_cast<int>(ModRefInfo::Must);
142}
143LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isModOrRefSet(const ModRefInfo MRI) {
144 return static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustModRef);
145}
146LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isModAndRefSet(const ModRefInfo MRI) {
147 return (static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustModRef)) ==
148 static_cast<int>(ModRefInfo::MustModRef);
149}
150LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isModSet(const ModRefInfo MRI) {
151 return static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustMod);
152}
153LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isRefSet(const ModRefInfo MRI) {
154 return static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustRef);
155}
156LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isMustSet(const ModRefInfo MRI) {
157 return !(static_cast<int>(MRI) & static_cast<int>(ModRefInfo::NoModRef));
158}
159
160LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo setMod(const ModRefInfo MRI) {
161 return ModRefInfo(static_cast<int>(MRI) |
162 static_cast<int>(ModRefInfo::MustMod));
163}
164LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo setRef(const ModRefInfo MRI) {
165 return ModRefInfo(static_cast<int>(MRI) |
166 static_cast<int>(ModRefInfo::MustRef));
167}
168LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo setMust(const ModRefInfo MRI) {
169 return ModRefInfo(static_cast<int>(MRI) &
170 static_cast<int>(ModRefInfo::MustModRef));
171}
172LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo setModAndRef(const ModRefInfo MRI) {
173 return ModRefInfo(static_cast<int>(MRI) |
174 static_cast<int>(ModRefInfo::MustModRef));
175}
176LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo clearMod(const ModRefInfo MRI) {
177 return ModRefInfo(static_cast<int>(MRI) & static_cast<int>(ModRefInfo::Ref));
178}
179LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo clearRef(const ModRefInfo MRI) {
180 return ModRefInfo(static_cast<int>(MRI) & static_cast<int>(ModRefInfo::Mod));
181}
182LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo clearMust(const ModRefInfo MRI) {
183 return ModRefInfo(static_cast<int>(MRI) |
184 static_cast<int>(ModRefInfo::NoModRef));
185}
186LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo unionModRef(const ModRefInfo MRI1,
187 const ModRefInfo MRI2) {
188 return ModRefInfo(static_cast<int>(MRI1) | static_cast<int>(MRI2));
189}
190LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo intersectModRef(const ModRefInfo MRI1,
191 const ModRefInfo MRI2) {
192 return ModRefInfo(static_cast<int>(MRI1) & static_cast<int>(MRI2));
193}
194
195/// The locations at which a function might access memory.
196///
197/// These are primarily used in conjunction with the \c AccessKind bits to
198/// describe both the nature of access and the locations of access for a
199/// function call.
200enum FunctionModRefLocation {
201 /// Base case is no access to memory.
202 FMRL_Nowhere = 0,
203 /// Access to memory via argument pointers.
204 FMRL_ArgumentPointees = 8,
205 /// Memory that is inaccessible via LLVM IR.
206 FMRL_InaccessibleMem = 16,
207 /// Access to any memory.
208 FMRL_Anywhere = 32 | FMRL_InaccessibleMem | FMRL_ArgumentPointees
209};
210
211/// Summary of how a function affects memory in the program.
212///
213/// Loads from constant globals are not considered memory accesses for this
214/// interface. Also, functions may freely modify stack space local to their
215/// invocation without having to report it through these interfaces.
216enum FunctionModRefBehavior {
217 /// This function does not perform any non-local loads or stores to memory.
218 ///
219 /// This property corresponds to the GCC 'const' attribute.
220 /// This property corresponds to the LLVM IR 'readnone' attribute.
221 /// This property corresponds to the IntrNoMem LLVM intrinsic flag.
222 FMRB_DoesNotAccessMemory =
223 FMRL_Nowhere | static_cast<int>(ModRefInfo::NoModRef),
224
225 /// The only memory references in this function (if it has any) are
226 /// non-volatile loads from objects pointed to by its pointer-typed
227 /// arguments, with arbitrary offsets.
228 ///
229 /// This property corresponds to the IntrReadArgMem LLVM intrinsic flag.
230 FMRB_OnlyReadsArgumentPointees =
231 FMRL_ArgumentPointees | static_cast<int>(ModRefInfo::Ref),
232
233 /// The only memory references in this function (if it has any) are
234 /// non-volatile loads and stores from objects pointed to by its
235 /// pointer-typed arguments, with arbitrary offsets.
236 ///
237 /// This property corresponds to the IntrArgMemOnly LLVM intrinsic flag.
238 FMRB_OnlyAccessesArgumentPointees =
239 FMRL_ArgumentPointees | static_cast<int>(ModRefInfo::ModRef),
240
241 /// The only memory references in this function (if it has any) are
242 /// references of memory that is otherwise inaccessible via LLVM IR.
243 ///
244 /// This property corresponds to the LLVM IR inaccessiblememonly attribute.
245 FMRB_OnlyAccessesInaccessibleMem =
246 FMRL_InaccessibleMem | static_cast<int>(ModRefInfo::ModRef),
247
248 /// The function may perform non-volatile loads and stores of objects
249 /// pointed to by its pointer-typed arguments, with arbitrary offsets, and
250 /// it may also perform loads and stores of memory that is otherwise
251 /// inaccessible via LLVM IR.
252 ///
253 /// This property corresponds to the LLVM IR
254 /// inaccessiblemem_or_argmemonly attribute.
255 FMRB_OnlyAccessesInaccessibleOrArgMem = FMRL_InaccessibleMem |
256 FMRL_ArgumentPointees |
257 static_cast<int>(ModRefInfo::ModRef),
258
259 /// This function does not perform any non-local stores or volatile loads,
260 /// but may read from any memory location.
261 ///
262 /// This property corresponds to the GCC 'pure' attribute.
263 /// This property corresponds to the LLVM IR 'readonly' attribute.
264 /// This property corresponds to the IntrReadMem LLVM intrinsic flag.
265 FMRB_OnlyReadsMemory = FMRL_Anywhere | static_cast<int>(ModRefInfo::Ref),
266
267 // This function does not read from memory anywhere, but may write to any
268 // memory location.
269 //
270 // This property corresponds to the LLVM IR 'writeonly' attribute.
271 // This property corresponds to the IntrWriteMem LLVM intrinsic flag.
272 FMRB_DoesNotReadMemory = FMRL_Anywhere | static_cast<int>(ModRefInfo::Mod),
273
274 /// This indicates that the function could not be classified into one of the
275 /// behaviors above.
276 FMRB_UnknownModRefBehavior =
277 FMRL_Anywhere | static_cast<int>(ModRefInfo::ModRef)
278};
279
280// Wrapper method strips bits significant only in FunctionModRefBehavior,
281// to obtain a valid ModRefInfo. The benefit of using the wrapper is that if
282// ModRefInfo enum changes, the wrapper can be updated to & with the new enum
283// entry with all bits set to 1.
284LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo
285createModRefInfo(const FunctionModRefBehavior FMRB) {
286 return ModRefInfo(FMRB & static_cast<int>(ModRefInfo::ModRef));
287}
288
289/// This class stores info we want to provide to or retain within an alias
290/// query. By default, the root query is stateless and starts with a freshly
291/// constructed info object. Specific alias analyses can use this query info to
292/// store per-query state that is important for recursive or nested queries to
293/// avoid recomputing. To enable preserving this state across multiple queries
294/// where safe (due to the IR not changing), use a `BatchAAResults` wrapper.
295/// The information stored in an `AAQueryInfo` is currently limitted to the
296/// caches used by BasicAA, but can further be extended to fit other AA needs.
297class AAQueryInfo {
298public:
299 using LocPair = std::pair<MemoryLocation, MemoryLocation>;
300 using AliasCacheT = SmallDenseMap<LocPair, AliasResult, 8>;
301 AliasCacheT AliasCache;
302
303 using IsCapturedCacheT = SmallDenseMap<const Value *, bool, 8>;
304 IsCapturedCacheT IsCapturedCache;
305
306 AAQueryInfo() : AliasCache(), IsCapturedCache() {}
307};
308
309class BatchAAResults;
310
311class AAResults {
312public:
313 // Make these results default constructable and movable. We have to spell
314 // these out because MSVC won't synthesize them.
315 AAResults(const TargetLibraryInfo &TLI) : TLI(TLI) {}
316 AAResults(AAResults &&Arg);
317 ~AAResults();
318
319 /// Register a specific AA result.
320 template <typename AAResultT> void addAAResult(AAResultT &AAResult) {
321 // FIXME: We should use a much lighter weight system than the usual
322 // polymorphic pattern because we don't own AAResult. It should
323 // ideally involve two pointers and no separate allocation.
324 AAs.emplace_back(new Model<AAResultT>(AAResult, *this));
325 }
326
327 /// Register a function analysis ID that the results aggregation depends on.
328 ///
329 /// This is used in the new pass manager to implement the invalidation logic
330 /// where we must invalidate the results aggregation if any of our component
331 /// analyses become invalid.
332 void addAADependencyID(AnalysisKey *ID) { AADeps.push_back(ID); }
333
334 /// Handle invalidation events in the new pass manager.
335 ///
336 /// The aggregation is invalidated if any of the underlying analyses is
337 /// invalidated.
338 bool invalidate(Function &F, const PreservedAnalyses &PA,
339 FunctionAnalysisManager::Invalidator &Inv);
340
341 //===--------------------------------------------------------------------===//
342 /// \name Alias Queries
343 /// @{
344
345 /// The main low level interface to the alias analysis implementation.
346 /// Returns an AliasResult indicating whether the two pointers are aliased to
347 /// each other. This is the interface that must be implemented by specific
348 /// alias analysis implementations.
349 AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
350
351 /// A convenience wrapper around the primary \c alias interface.
352 AliasResult alias(const Value *V1, LocationSize V1Size, const Value *V2,
353 LocationSize V2Size) {
354 return alias(MemoryLocation(V1, V1Size), MemoryLocation(V2, V2Size));
355 }
356
357 /// A convenience wrapper around the primary \c alias interface.
358 AliasResult alias(const Value *V1, const Value *V2) {
359 return alias(V1, LocationSize::unknown(), V2, LocationSize::unknown());
360 }
361
362 /// A trivial helper function to check to see if the specified pointers are
363 /// no-alias.
364 bool isNoAlias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
365 return alias(LocA, LocB) == NoAlias;
366 }
367
368 /// A convenience wrapper around the \c isNoAlias helper interface.
369 bool isNoAlias(const Value *V1, LocationSize V1Size, const Value *V2,
370 LocationSize V2Size) {
371 return isNoAlias(MemoryLocation(V1, V1Size), MemoryLocation(V2, V2Size));
372 }
373
374 /// A convenience wrapper around the \c isNoAlias helper interface.
375 bool isNoAlias(const Value *V1, const Value *V2) {
376 return isNoAlias(MemoryLocation(V1), MemoryLocation(V2));
377 }
378
379 /// A trivial helper function to check to see if the specified pointers are
380 /// must-alias.
381 bool isMustAlias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
382 return alias(LocA, LocB) == MustAlias;
383 }
384
385 /// A convenience wrapper around the \c isMustAlias helper interface.
386 bool isMustAlias(const Value *V1, const Value *V2) {
387 return alias(V1, LocationSize::precise(1), V2, LocationSize::precise(1)) ==
388 MustAlias;
389 }
390
391 /// Checks whether the given location points to constant memory, or if
392 /// \p OrLocal is true whether it points to a local alloca.
393 bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal = false);
394
395 /// A convenience wrapper around the primary \c pointsToConstantMemory
396 /// interface.
397 bool pointsToConstantMemory(const Value *P, bool OrLocal = false) {
398 return pointsToConstantMemory(MemoryLocation(P), OrLocal);
399 }
400
401 /// @}
402 //===--------------------------------------------------------------------===//
403 /// \name Simple mod/ref information
404 /// @{
405
406 /// Get the ModRef info associated with a pointer argument of a call. The
407 /// result's bits are set to indicate the allowed aliasing ModRef kinds. Note
408 /// that these bits do not necessarily account for the overall behavior of
409 /// the function, but rather only provide additional per-argument
410 /// information. This never sets ModRefInfo::Must.
411 ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx);
412
413 /// Return the behavior of the given call site.
414 FunctionModRefBehavior getModRefBehavior(const CallBase *Call);
415
416 /// Return the behavior when calling the given function.
417 FunctionModRefBehavior getModRefBehavior(const Function *F);
418
419 /// Checks if the specified call is known to never read or write memory.
420 ///
421 /// Note that if the call only reads from known-constant memory, it is also
422 /// legal to return true. Also, calls that unwind the stack are legal for
423 /// this predicate.
424 ///
425 /// Many optimizations (such as CSE and LICM) can be performed on such calls
426 /// without worrying about aliasing properties, and many calls have this
427 /// property (e.g. calls to 'sin' and 'cos').
428 ///
429 /// This property corresponds to the GCC 'const' attribute.
430 bool doesNotAccessMemory(const CallBase *Call) {
431 return getModRefBehavior(Call) == FMRB_DoesNotAccessMemory;
432 }
433
434 /// Checks if the specified function is known to never read or write memory.
435 ///
436 /// Note that if the function only reads from known-constant memory, it is
437 /// also legal to return true. Also, function that unwind the stack are legal
438 /// for this predicate.
439 ///
440 /// Many optimizations (such as CSE and LICM) can be performed on such calls
441 /// to such functions without worrying about aliasing properties, and many
442 /// functions have this property (e.g. 'sin' and 'cos').
443 ///
444 /// This property corresponds to the GCC 'const' attribute.
445 bool doesNotAccessMemory(const Function *F) {
446 return getModRefBehavior(F) == FMRB_DoesNotAccessMemory;
447 }
448
449 /// Checks if the specified call is known to only read from non-volatile
450 /// memory (or not access memory at all).
451 ///
452 /// Calls that unwind the stack are legal for this predicate.
453 ///
454 /// This property allows many common optimizations to be performed in the
455 /// absence of interfering store instructions, such as CSE of strlen calls.
456 ///
457 /// This property corresponds to the GCC 'pure' attribute.
458 bool onlyReadsMemory(const CallBase *Call) {
459 return onlyReadsMemory(getModRefBehavior(Call));
460 }
461
462 /// Checks if the specified function is known to only read from non-volatile
463 /// memory (or not access memory at all).
464 ///
465 /// Functions that unwind the stack are legal for this predicate.
466 ///
467 /// This property allows many common optimizations to be performed in the
468 /// absence of interfering store instructions, such as CSE of strlen calls.
469 ///
470 /// This property corresponds to the GCC 'pure' attribute.
471 bool onlyReadsMemory(const Function *F) {
472 return onlyReadsMemory(getModRefBehavior(F));
473 }
474
475 /// Checks if functions with the specified behavior are known to only read
476 /// from non-volatile memory (or not access memory at all).
477 static bool onlyReadsMemory(FunctionModRefBehavior MRB) {
478 return !isModSet(createModRefInfo(MRB));
53
Assuming the condition is true
54
Returning the value 1, which participates in a condition later
479 }
480
481 /// Checks if functions with the specified behavior are known to only write
482 /// memory (or not access memory at all).
483 static bool doesNotReadMemory(FunctionModRefBehavior MRB) {
484 return !isRefSet(createModRefInfo(MRB));
485 }
486
487 /// Checks if functions with the specified behavior are known to read and
488 /// write at most from objects pointed to by their pointer-typed arguments
489 /// (with arbitrary offsets).
490 static bool onlyAccessesArgPointees(FunctionModRefBehavior MRB) {
491 return !(MRB & FMRL_Anywhere & ~FMRL_ArgumentPointees);
58
Assuming the condition is true
59
Returning the value 1, which participates in a condition later
492 }
493
494 /// Checks if functions with the specified behavior are known to potentially
495 /// read or write from objects pointed to be their pointer-typed arguments
496 /// (with arbitrary offsets).
497 static bool doesAccessArgPointees(FunctionModRefBehavior MRB) {
498 return isModOrRefSet(createModRefInfo(MRB)) &&
499 (MRB & FMRL_ArgumentPointees);
500 }
501
502 /// Checks if functions with the specified behavior are known to read and
503 /// write at most from memory that is inaccessible from LLVM IR.
504 static bool onlyAccessesInaccessibleMem(FunctionModRefBehavior MRB) {
505 return !(MRB & FMRL_Anywhere & ~FMRL_InaccessibleMem);
506 }
507
508 /// Checks if functions with the specified behavior are known to potentially
509 /// read or write from memory that is inaccessible from LLVM IR.
510 static bool doesAccessInaccessibleMem(FunctionModRefBehavior MRB) {
511 return isModOrRefSet(createModRefInfo(MRB)) && (MRB & FMRL_InaccessibleMem);
512 }
513
514 /// Checks if functions with the specified behavior are known to read and
515 /// write at most from memory that is inaccessible from LLVM IR or objects
516 /// pointed to by their pointer-typed arguments (with arbitrary offsets).
517 static bool onlyAccessesInaccessibleOrArgMem(FunctionModRefBehavior MRB) {
518 return !(MRB & FMRL_Anywhere &
519 ~(FMRL_InaccessibleMem | FMRL_ArgumentPointees));
520 }
521
522 /// getModRefInfo (for call sites) - Return information about whether
523 /// a particular call site modifies or reads the specified memory location.
524 ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc);
525
526 /// getModRefInfo (for call sites) - A convenience wrapper.
527 ModRefInfo getModRefInfo(const CallBase *Call, const Value *P,
528 LocationSize Size) {
529 return getModRefInfo(Call, MemoryLocation(P, Size));
530 }
531
532 /// getModRefInfo (for loads) - Return information about whether
533 /// a particular load modifies or reads the specified memory location.
534 ModRefInfo getModRefInfo(const LoadInst *L, const MemoryLocation &Loc);
535
536 /// getModRefInfo (for loads) - A convenience wrapper.
537 ModRefInfo getModRefInfo(const LoadInst *L, const Value *P,
538 LocationSize Size) {
539 return getModRefInfo(L, MemoryLocation(P, Size));
540 }
541
542 /// getModRefInfo (for stores) - Return information about whether
543 /// a particular store modifies or reads the specified memory location.
544 ModRefInfo getModRefInfo(const StoreInst *S, const MemoryLocation &Loc);
545
546 /// getModRefInfo (for stores) - A convenience wrapper.
547 ModRefInfo getModRefInfo(const StoreInst *S, const Value *P,
548 LocationSize Size) {
549 return getModRefInfo(S, MemoryLocation(P, Size));
550 }
551
552 /// getModRefInfo (for fences) - Return information about whether
553 /// a particular store modifies or reads the specified memory location.
554 ModRefInfo getModRefInfo(const FenceInst *S, const MemoryLocation &Loc);
555
556 /// getModRefInfo (for fences) - A convenience wrapper.
557 ModRefInfo getModRefInfo(const FenceInst *S, const Value *P,
558 LocationSize Size) {
559 return getModRefInfo(S, MemoryLocation(P, Size));
560 }
561
562 /// getModRefInfo (for cmpxchges) - Return information about whether
563 /// a particular cmpxchg modifies or reads the specified memory location.
564 ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX,
565 const MemoryLocation &Loc);
566
567 /// getModRefInfo (for cmpxchges) - A convenience wrapper.
568 ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX, const Value *P,
569 LocationSize Size) {
570 return getModRefInfo(CX, MemoryLocation(P, Size));
571 }
572
573 /// getModRefInfo (for atomicrmws) - Return information about whether
574 /// a particular atomicrmw modifies or reads the specified memory location.
575 ModRefInfo getModRefInfo(const AtomicRMWInst *RMW, const MemoryLocation &Loc);
576
577 /// getModRefInfo (for atomicrmws) - A convenience wrapper.
578 ModRefInfo getModRefInfo(const AtomicRMWInst *RMW, const Value *P,
579 LocationSize Size) {
580 return getModRefInfo(RMW, MemoryLocation(P, Size));
581 }
582
583 /// getModRefInfo (for va_args) - Return information about whether
584 /// a particular va_arg modifies or reads the specified memory location.
585 ModRefInfo getModRefInfo(const VAArgInst *I, const MemoryLocation &Loc);
586
587 /// getModRefInfo (for va_args) - A convenience wrapper.
588 ModRefInfo getModRefInfo(const VAArgInst *I, const Value *P,
589 LocationSize Size) {
590 return getModRefInfo(I, MemoryLocation(P, Size));
591 }
592
593 /// getModRefInfo (for catchpads) - Return information about whether
594 /// a particular catchpad modifies or reads the specified memory location.
595 ModRefInfo getModRefInfo(const CatchPadInst *I, const MemoryLocation &Loc);
596
597 /// getModRefInfo (for catchpads) - A convenience wrapper.
598 ModRefInfo getModRefInfo(const CatchPadInst *I, const Value *P,
599 LocationSize Size) {
600 return getModRefInfo(I, MemoryLocation(P, Size));
601 }
602
603 /// getModRefInfo (for catchrets) - Return information about whether
604 /// a particular catchret modifies or reads the specified memory location.
605 ModRefInfo getModRefInfo(const CatchReturnInst *I, const MemoryLocation &Loc);
606
607 /// getModRefInfo (for catchrets) - A convenience wrapper.
608 ModRefInfo getModRefInfo(const CatchReturnInst *I, const Value *P,
609 LocationSize Size) {
610 return getModRefInfo(I, MemoryLocation(P, Size));
611 }
612
613 /// Check whether or not an instruction may read or write the optionally
614 /// specified memory location.
615 ///
616 ///
617 /// An instruction that doesn't read or write memory may be trivially LICM'd
618 /// for example.
619 ///
620 /// For function calls, this delegates to the alias-analysis specific
621 /// call-site mod-ref behavior queries. Otherwise it delegates to the specific
622 /// helpers above.
623 ModRefInfo getModRefInfo(const Instruction *I,
624 const Optional<MemoryLocation> &OptLoc) {
625 AAQueryInfo AAQIP;
626 return getModRefInfo(I, OptLoc, AAQIP);
627 }
628
629 /// A convenience wrapper for constructing the memory location.
630 ModRefInfo getModRefInfo(const Instruction *I, const Value *P,
631 LocationSize Size) {
632 return getModRefInfo(I, MemoryLocation(P, Size));
633 }
634
635 /// Return information about whether a call and an instruction may refer to
636 /// the same memory locations.
637 ModRefInfo getModRefInfo(Instruction *I, const CallBase *Call);
638
639 /// Return information about whether two call sites may refer to the same set
640 /// of memory locations. See the AA documentation for details:
641 /// http://llvm.org/docs/AliasAnalysis.html#ModRefInfo
642 ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2);
643
644 /// Return information about whether a particular call site modifies
645 /// or reads the specified memory location \p MemLoc before instruction \p I
646 /// in a BasicBlock. An ordered basic block \p OBB can be used to speed up
647 /// instruction ordering queries inside the BasicBlock containing \p I.
648 /// Early exits in callCapturesBefore may lead to ModRefInfo::Must not being
649 /// set.
650 ModRefInfo callCapturesBefore(const Instruction *I,
651 const MemoryLocation &MemLoc, DominatorTree *DT,
652 OrderedBasicBlock *OBB = nullptr);
653
654 /// A convenience wrapper to synthesize a memory location.
655 ModRefInfo callCapturesBefore(const Instruction *I, const Value *P,
656 LocationSize Size, DominatorTree *DT,
657 OrderedBasicBlock *OBB = nullptr) {
658 return callCapturesBefore(I, MemoryLocation(P, Size), DT, OBB);
659 }
660
661 /// @}
662 //===--------------------------------------------------------------------===//
663 /// \name Higher level methods for querying mod/ref information.
664 /// @{
665
666 /// Check if it is possible for execution of the specified basic block to
667 /// modify the location Loc.
668 bool canBasicBlockModify(const BasicBlock &BB, const MemoryLocation &Loc);
669
670 /// A convenience wrapper synthesizing a memory location.
671 bool canBasicBlockModify(const BasicBlock &BB, const Value *P,
672 LocationSize Size) {
673 return canBasicBlockModify(BB, MemoryLocation(P, Size));
674 }
675
676 /// Check if it is possible for the execution of the specified instructions
677 /// to mod\ref (according to the mode) the location Loc.
678 ///
679 /// The instructions to consider are all of the instructions in the range of
680 /// [I1,I2] INCLUSIVE. I1 and I2 must be in the same basic block.
681 bool canInstructionRangeModRef(const Instruction &I1, const Instruction &I2,
682 const MemoryLocation &Loc,
683 const ModRefInfo Mode);
684
685 /// A convenience wrapper synthesizing a memory location.
686 bool canInstructionRangeModRef(const Instruction &I1, const Instruction &I2,
687 const Value *Ptr, LocationSize Size,
688 const ModRefInfo Mode) {
689 return canInstructionRangeModRef(I1, I2, MemoryLocation(Ptr, Size), Mode);
690 }
691
692private:
693 AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
694 AAQueryInfo &AAQI);
695 bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI,
696 bool OrLocal = false);
697 ModRefInfo getModRefInfo(Instruction *I, const CallBase *Call2,
698 AAQueryInfo &AAQIP);
699 ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
700 AAQueryInfo &AAQI);
701 ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
702 AAQueryInfo &AAQI);
703 ModRefInfo getModRefInfo(const VAArgInst *V, const MemoryLocation &Loc,
704 AAQueryInfo &AAQI);
705 ModRefInfo getModRefInfo(const LoadInst *L, const MemoryLocation &Loc,
706 AAQueryInfo &AAQI);
707 ModRefInfo getModRefInfo(const StoreInst *S, const MemoryLocation &Loc,
708 AAQueryInfo &AAQI);
709 ModRefInfo getModRefInfo(const FenceInst *S, const MemoryLocation &Loc,
710 AAQueryInfo &AAQI);
711 ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX,
712 const MemoryLocation &Loc, AAQueryInfo &AAQI);
713 ModRefInfo getModRefInfo(const AtomicRMWInst *RMW, const MemoryLocation &Loc,
714 AAQueryInfo &AAQI);
715 ModRefInfo getModRefInfo(const CatchPadInst *I, const MemoryLocation &Loc,
716 AAQueryInfo &AAQI);
717 ModRefInfo getModRefInfo(const CatchReturnInst *I, const MemoryLocation &Loc,
718 AAQueryInfo &AAQI);
719 ModRefInfo getModRefInfo(const Instruction *I,
720 const Optional<MemoryLocation> &OptLoc,
721 AAQueryInfo &AAQIP) {
722 if (OptLoc == None) {
723 if (const auto *Call = dyn_cast<CallBase>(I)) {
724 return createModRefInfo(getModRefBehavior(Call));
725 }
726 }
727
728 const MemoryLocation &Loc = OptLoc.getValueOr(MemoryLocation());
729
730 switch (I->getOpcode()) {
731 case Instruction::VAArg:
732 return getModRefInfo((const VAArgInst *)I, Loc, AAQIP);
733 case Instruction::Load:
734 return getModRefInfo((const LoadInst *)I, Loc, AAQIP);
735 case Instruction::Store:
736 return getModRefInfo((const StoreInst *)I, Loc, AAQIP);
737 case Instruction::Fence:
738 return getModRefInfo((const FenceInst *)I, Loc, AAQIP);
739 case Instruction::AtomicCmpXchg:
740 return getModRefInfo((const AtomicCmpXchgInst *)I, Loc, AAQIP);
741 case Instruction::AtomicRMW:
742 return getModRefInfo((const AtomicRMWInst *)I, Loc, AAQIP);
743 case Instruction::Call:
744 return getModRefInfo((const CallInst *)I, Loc, AAQIP);
745 case Instruction::Invoke:
746 return getModRefInfo((const InvokeInst *)I, Loc, AAQIP);
747 case Instruction::CatchPad:
748 return getModRefInfo((const CatchPadInst *)I, Loc, AAQIP);
749 case Instruction::CatchRet:
750 return getModRefInfo((const CatchReturnInst *)I, Loc, AAQIP);
751 default:
752 return ModRefInfo::NoModRef;
753 }
754 }
755
756 class Concept;
757
758 template <typename T> class Model;
759
760 template <typename T> friend class AAResultBase;
761
762 const TargetLibraryInfo &TLI;
763
764 std::vector<std::unique_ptr<Concept>> AAs;
765
766 std::vector<AnalysisKey *> AADeps;
767
768 friend class BatchAAResults;
769};
770
771/// This class is a wrapper over an AAResults, and it is intended to be used
772/// only when there are no IR changes inbetween queries. BatchAAResults is
773/// reusing the same `AAQueryInfo` to preserve the state across queries,
774/// esentially making AA work in "batch mode". The internal state cannot be
775/// cleared, so to go "out-of-batch-mode", the user must either use AAResults,
776/// or create a new BatchAAResults.
777class BatchAAResults {
778 AAResults &AA;
779 AAQueryInfo AAQI;
780
781public:
782 BatchAAResults(AAResults &AAR) : AA(AAR), AAQI() {}
783 AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
784 return AA.alias(LocA, LocB, AAQI);
785 }
786 bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal = false) {
787 return AA.pointsToConstantMemory(Loc, AAQI, OrLocal);
788 }
789 ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc) {
790 return AA.getModRefInfo(Call, Loc, AAQI);
791 }
792 ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2) {
793 return AA.getModRefInfo(Call1, Call2, AAQI);
794 }
795 ModRefInfo getModRefInfo(const Instruction *I,
796 const Optional<MemoryLocation> &OptLoc) {
797 return AA.getModRefInfo(I, OptLoc, AAQI);
798 }
799 ModRefInfo getModRefInfo(Instruction *I, const CallBase *Call2) {
800 return AA.getModRefInfo(I, Call2, AAQI);
801 }
802 ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) {
803 return AA.getArgModRefInfo(Call, ArgIdx);
804 }
805 FunctionModRefBehavior getModRefBehavior(const CallBase *Call) {
806 return AA.getModRefBehavior(Call);
807 }
808};
809
810/// Temporary typedef for legacy code that uses a generic \c AliasAnalysis
811/// pointer or reference.
812using AliasAnalysis = AAResults;
813
814/// A private abstract base class describing the concept of an individual alias
815/// analysis implementation.
816///
817/// This interface is implemented by any \c Model instantiation. It is also the
818/// interface which a type used to instantiate the model must provide.
819///
820/// All of these methods model methods by the same name in the \c
821/// AAResults class. Only differences and specifics to how the
822/// implementations are called are documented here.
823class AAResults::Concept {
824public:
825 virtual ~Concept() = 0;
826
827 /// An update API used internally by the AAResults to provide
828 /// a handle back to the top level aggregation.
829 virtual void setAAResults(AAResults *NewAAR) = 0;
830
831 //===--------------------------------------------------------------------===//
832 /// \name Alias Queries
833 /// @{
834
835 /// The main low level interface to the alias analysis implementation.
836 /// Returns an AliasResult indicating whether the two pointers are aliased to
837 /// each other. This is the interface that must be implemented by specific
838 /// alias analysis implementations.
839 virtual AliasResult alias(const MemoryLocation &LocA,
840 const MemoryLocation &LocB, AAQueryInfo &AAQI) = 0;
841
842 /// Checks whether the given location points to constant memory, or if
843 /// \p OrLocal is true whether it points to a local alloca.
844 virtual bool pointsToConstantMemory(const MemoryLocation &Loc,
845 AAQueryInfo &AAQI, bool OrLocal) = 0;
846
847 /// @}
848 //===--------------------------------------------------------------------===//
849 /// \name Simple mod/ref information
850 /// @{
851
852 /// Get the ModRef info associated with a pointer argument of a callsite. The
853 /// result's bits are set to indicate the allowed aliasing ModRef kinds. Note
854 /// that these bits do not necessarily account for the overall behavior of
855 /// the function, but rather only provide additional per-argument
856 /// information.
857 virtual ModRefInfo getArgModRefInfo(const CallBase *Call,
858 unsigned ArgIdx) = 0;
859
860 /// Return the behavior of the given call site.
861 virtual FunctionModRefBehavior getModRefBehavior(const CallBase *Call) = 0;
862
863 /// Return the behavior when calling the given function.
864 virtual FunctionModRefBehavior getModRefBehavior(const Function *F) = 0;
865
866 /// getModRefInfo (for call sites) - Return information about whether
867 /// a particular call site modifies or reads the specified memory location.
868 virtual ModRefInfo getModRefInfo(const CallBase *Call,
869 const MemoryLocation &Loc,
870 AAQueryInfo &AAQI) = 0;
871
872 /// Return information about whether two call sites may refer to the same set
873 /// of memory locations. See the AA documentation for details:
874 /// http://llvm.org/docs/AliasAnalysis.html#ModRefInfo
875 virtual ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
876 AAQueryInfo &AAQI) = 0;
877
878 /// @}
879};
880
881/// A private class template which derives from \c Concept and wraps some other
882/// type.
883///
884/// This models the concept by directly forwarding each interface point to the
885/// wrapped type which must implement a compatible interface. This provides
886/// a type erased binding.
887template <typename AAResultT> class AAResults::Model final : public Concept {
888 AAResultT &Result;
889
890public:
891 explicit Model(AAResultT &Result, AAResults &AAR) : Result(Result) {
892 Result.setAAResults(&AAR);
893 }
894 ~Model() override = default;
895
896 void setAAResults(AAResults *NewAAR) override { Result.setAAResults(NewAAR); }
897
898 AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
899 AAQueryInfo &AAQI) override {
900 return Result.alias(LocA, LocB, AAQI);
901 }
902
903 bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI,
904 bool OrLocal) override {
905 return Result.pointsToConstantMemory(Loc, AAQI, OrLocal);
906 }
907
908 ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) override {
909 return Result.getArgModRefInfo(Call, ArgIdx);
910 }
911
912 FunctionModRefBehavior getModRefBehavior(const CallBase *Call) override {
913 return Result.getModRefBehavior(Call);
914 }
915
916 FunctionModRefBehavior getModRefBehavior(const Function *F) override {
917 return Result.getModRefBehavior(F);
918 }
919
920 ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
921 AAQueryInfo &AAQI) override {
922 return Result.getModRefInfo(Call, Loc, AAQI);
923 }
924
925 ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
926 AAQueryInfo &AAQI) override {
927 return Result.getModRefInfo(Call1, Call2, AAQI);
928 }
929};
930
931/// A CRTP-driven "mixin" base class to help implement the function alias
932/// analysis results concept.
933///
934/// Because of the nature of many alias analysis implementations, they often
935/// only implement a subset of the interface. This base class will attempt to
936/// implement the remaining portions of the interface in terms of simpler forms
937/// of the interface where possible, and otherwise provide conservatively
938/// correct fallback implementations.
939///
940/// Implementors of an alias analysis should derive from this CRTP, and then
941/// override specific methods that they wish to customize. There is no need to
942/// use virtual anywhere, the CRTP base class does static dispatch to the
943/// derived type passed into it.
944template <typename DerivedT> class AAResultBase {
945 // Expose some parts of the interface only to the AAResults::Model
946 // for wrapping. Specifically, this allows the model to call our
947 // setAAResults method without exposing it as a fully public API.
948 friend class AAResults::Model<DerivedT>;
949
950 /// A pointer to the AAResults object that this AAResult is
951 /// aggregated within. May be null if not aggregated.
952 AAResults *AAR = nullptr;
953
954 /// Helper to dispatch calls back through the derived type.
955 DerivedT &derived() { return static_cast<DerivedT &>(*this); }
956
957 /// A setter for the AAResults pointer, which is used to satisfy the
958 /// AAResults::Model contract.
959 void setAAResults(AAResults *NewAAR) { AAR = NewAAR; }
960
961protected:
962 /// This proxy class models a common pattern where we delegate to either the
963 /// top-level \c AAResults aggregation if one is registered, or to the
964 /// current result if none are registered.
965 class AAResultsProxy {
966 AAResults *AAR;
967 DerivedT &CurrentResult;
968
969 public:
970 AAResultsProxy(AAResults *AAR, DerivedT &CurrentResult)
971 : AAR(AAR), CurrentResult(CurrentResult) {}
972
973 AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
974 AAQueryInfo &AAQI) {
975 return AAR ? AAR->alias(LocA, LocB, AAQI)
976 : CurrentResult.alias(LocA, LocB, AAQI);
977 }
978
979 bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI,
980 bool OrLocal) {
981 return AAR ? AAR->pointsToConstantMemory(Loc, AAQI, OrLocal)
982 : CurrentResult.pointsToConstantMemory(Loc, AAQI, OrLocal);
983 }
984
985 ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) {
986 return AAR ? AAR->getArgModRefInfo(Call, ArgIdx)
987 : CurrentResult.getArgModRefInfo(Call, ArgIdx);
988 }
989
990 FunctionModRefBehavior getModRefBehavior(const CallBase *Call) {
991 return AAR ? AAR->getModRefBehavior(Call)
992 : CurrentResult.getModRefBehavior(Call);
993 }
994
995 FunctionModRefBehavior getModRefBehavior(const Function *F) {
996 return AAR ? AAR->getModRefBehavior(F) : CurrentResult.getModRefBehavior(F);
997 }
998
999 ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
1000 AAQueryInfo &AAQI) {
1001 return AAR ? AAR->getModRefInfo(Call, Loc, AAQI)
1002 : CurrentResult.getModRefInfo(Call, Loc, AAQI);
1003 }
1004
1005 ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
1006 AAQueryInfo &AAQI) {
1007 return AAR ? AAR->getModRefInfo(Call1, Call2, AAQI)
1008 : CurrentResult.getModRefInfo(Call1, Call2, AAQI);
1009 }
1010 };
1011
1012 explicit AAResultBase() = default;
1013
1014 // Provide all the copy and move constructors so that derived types aren't
1015 // constrained.
1016 AAResultBase(const AAResultBase &Arg) {}
1017 AAResultBase(AAResultBase &&Arg) {}
1018
1019 /// Get a proxy for the best AA result set to query at this time.
1020 ///
1021 /// When this result is part of a larger aggregation, this will proxy to that
1022 /// aggregation. When this result is used in isolation, it will just delegate
1023 /// back to the derived class's implementation.
1024 ///
1025 /// Note that callers of this need to take considerable care to not cause
1026 /// performance problems when they use this routine, in the case of a large
1027 /// number of alias analyses being aggregated, it can be expensive to walk
1028 /// back across the chain.
1029 AAResultsProxy getBestAAResults() { return AAResultsProxy(AAR, derived()); }
1030
1031public:
1032 AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
1033 AAQueryInfo &AAQI) {
1034 return MayAlias;
1035 }
1036
1037 bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI,
1038 bool OrLocal) {
1039 return false;
1040 }
1041
1042 ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) {
1043 return ModRefInfo::ModRef;
1044 }
1045
1046 FunctionModRefBehavior getModRefBehavior(const CallBase *Call) {
1047 return FMRB_UnknownModRefBehavior;
1048 }
1049
1050 FunctionModRefBehavior getModRefBehavior(const Function *F) {
1051 return FMRB_UnknownModRefBehavior;
1052 }
1053
1054 ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
1055 AAQueryInfo &AAQI) {
1056 return ModRefInfo::ModRef;
1057 }
1058
1059 ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
1060 AAQueryInfo &AAQI) {
1061 return ModRefInfo::ModRef;
1062 }
1063};
1064
1065/// Return true if this pointer is returned by a noalias function.
1066bool isNoAliasCall(const Value *V);
1067
1068/// Return true if this is an argument with the noalias attribute.
1069bool isNoAliasArgument(const Value *V);
1070
1071/// Return true if this pointer refers to a distinct and identifiable object.
1072/// This returns true for:
1073/// Global Variables and Functions (but not Global Aliases)
1074/// Allocas
1075/// ByVal and NoAlias Arguments
1076/// NoAlias returns (e.g. calls to malloc)
1077///
1078bool isIdentifiedObject(const Value *V);
1079
1080/// Return true if V is umabigously identified at the function-level.
1081/// Different IdentifiedFunctionLocals can't alias.
1082/// Further, an IdentifiedFunctionLocal can not alias with any function
1083/// arguments other than itself, which is not necessarily true for
1084/// IdentifiedObjects.
1085bool isIdentifiedFunctionLocal(const Value *V);
1086
1087/// A manager for alias analyses.
1088///
1089/// This class can have analyses registered with it and when run, it will run
1090/// all of them and aggregate their results into single AA results interface
1091/// that dispatches across all of the alias analysis results available.
1092///
1093/// Note that the order in which analyses are registered is very significant.
1094/// That is the order in which the results will be aggregated and queried.
1095///
1096/// This manager effectively wraps the AnalysisManager for registering alias
1097/// analyses. When you register your alias analysis with this manager, it will
1098/// ensure the analysis itself is registered with its AnalysisManager.
1099///
1100/// The result of this analysis is only invalidated if one of the particular
1101/// aggregated AA results end up being invalidated. This removes the need to
1102/// explicitly preserve the results of `AAManager`. Note that analyses should no
1103/// longer be registered once the `AAManager` is run.
1104class AAManager : public AnalysisInfoMixin<AAManager> {
1105public:
1106 using Result = AAResults;
1107
1108 /// Register a specific AA result.
1109 template <typename AnalysisT> void registerFunctionAnalysis() {
1110 ResultGetters.push_back(&getFunctionAAResultImpl<AnalysisT>);
1111 }
1112
1113 /// Register a specific AA result.
1114 template <typename AnalysisT> void registerModuleAnalysis() {
1115 ResultGetters.push_back(&getModuleAAResultImpl<AnalysisT>);
1116 }
1117
1118 Result run(Function &F, FunctionAnalysisManager &AM) {
1119 Result R(AM.getResult<TargetLibraryAnalysis>(F));
1120 for (auto &Getter : ResultGetters)
1121 (*Getter)(F, AM, R);
1122 return R;
1123 }
1124
1125private:
1126 friend AnalysisInfoMixin<AAManager>;
1127
1128 static AnalysisKey Key;
1129
1130 SmallVector<void (*)(Function &F, FunctionAnalysisManager &AM,
1131 AAResults &AAResults),
1132 4> ResultGetters;
1133
1134 template <typename AnalysisT>
1135 static void getFunctionAAResultImpl(Function &F,
1136 FunctionAnalysisManager &AM,
1137 AAResults &AAResults) {
1138 AAResults.addAAResult(AM.template getResult<AnalysisT>(F));
1139 AAResults.addAADependencyID(AnalysisT::ID());
1140 }
1141
1142 template <typename AnalysisT>
1143 static void getModuleAAResultImpl(Function &F, FunctionAnalysisManager &AM,
1144 AAResults &AAResults) {
1145 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
1146 auto &MAM = MAMProxy.getManager();
1147 if (auto *R = MAM.template getCachedResult<AnalysisT>(*F.getParent())) {
1148 AAResults.addAAResult(*R);
1149 MAMProxy
1150 .template registerOuterAnalysisInvalidation<AnalysisT, AAManager>();
1151 }
1152 }
1153};
1154
1155/// A wrapper pass to provide the legacy pass manager access to a suitably
1156/// prepared AAResults object.
1157class AAResultsWrapperPass : public FunctionPass {
1158 std::unique_ptr<AAResults> AAR;
1159
1160public:
1161 static char ID;
1162
1163 AAResultsWrapperPass();
1164
1165 AAResults &getAAResults() { return *AAR; }
1166 const AAResults &getAAResults() const { return *AAR; }
1167
1168 bool runOnFunction(Function &F) override;
1169
1170 void getAnalysisUsage(AnalysisUsage &AU) const override;
1171};