Bug Summary

File:llvm/lib/Transforms/Scalar/LICM.cpp
Warning:line 1131, column 11
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name LICM.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Transforms/Scalar -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include -D NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Transforms/Scalar -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-09-04-040900-46481-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Transforms/Scalar/LICM.cpp

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Transforms/Scalar/LICM.cpp

1//===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass performs loop invariant code motion, attempting to remove as much
10// code from the body of a loop as possible. It does this by either hoisting
11// code into the preheader block, or by sinking code to the exit blocks if it is
12// safe. This pass also promotes must-aliased memory locations in the loop to
13// live in registers, thus hoisting and sinking "invariant" loads and stores.
14//
15// Hoisting operations out of loops is a canonicalization transform. It
16// enables and simplifies subsequent optimizations in the middle-end.
17// Rematerialization of hoisted instructions to reduce register pressure is the
18// responsibility of the back-end, which has more accurate information about
19// register pressure and also handles other optimizations than LICM that
20// increase live-ranges.
21//
22// This pass uses alias analysis for two purposes:
23//
24// 1. Moving loop invariant loads and calls out of loops. If we can determine
25// that a load or call inside of a loop never aliases anything stored to,
26// we can hoist it or sink it like any other instruction.
27// 2. Scalar Promotion of Memory - If there is a store instruction inside of
28// the loop, we try to move the store to happen AFTER the loop instead of
29// inside of the loop. This can only happen if a few conditions are true:
30// A. The pointer stored through is loop invariant
31// B. There are no stores or loads in the loop which _may_ alias the
32// pointer. There are no calls in the loop which mod/ref the pointer.
33// If these conditions are true, we can promote the loads and stores in the
34// loop of the pointer to use a temporary alloca'd variable. We then use
35// the SSAUpdater to construct the appropriate SSA form for the value.
36//
37//===----------------------------------------------------------------------===//
38
39#include "llvm/Transforms/Scalar/LICM.h"
40#include "llvm/ADT/SetOperations.h"
41#include "llvm/ADT/Statistic.h"
42#include "llvm/Analysis/AliasAnalysis.h"
43#include "llvm/Analysis/AliasSetTracker.h"
44#include "llvm/Analysis/BasicAliasAnalysis.h"
45#include "llvm/Analysis/BlockFrequencyInfo.h"
46#include "llvm/Analysis/CaptureTracking.h"
47#include "llvm/Analysis/ConstantFolding.h"
48#include "llvm/Analysis/GlobalsModRef.h"
49#include "llvm/Analysis/GuardUtils.h"
50#include "llvm/Analysis/LazyBlockFrequencyInfo.h"
51#include "llvm/Analysis/Loads.h"
52#include "llvm/Analysis/LoopInfo.h"
53#include "llvm/Analysis/LoopIterator.h"
54#include "llvm/Analysis/LoopPass.h"
55#include "llvm/Analysis/MemoryBuiltins.h"
56#include "llvm/Analysis/MemorySSA.h"
57#include "llvm/Analysis/MemorySSAUpdater.h"
58#include "llvm/Analysis/MustExecute.h"
59#include "llvm/Analysis/OptimizationRemarkEmitter.h"
60#include "llvm/Analysis/ScalarEvolution.h"
61#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
62#include "llvm/Analysis/TargetLibraryInfo.h"
63#include "llvm/Analysis/ValueTracking.h"
64#include "llvm/IR/CFG.h"
65#include "llvm/IR/Constants.h"
66#include "llvm/IR/DataLayout.h"
67#include "llvm/IR/DebugInfoMetadata.h"
68#include "llvm/IR/DerivedTypes.h"
69#include "llvm/IR/Dominators.h"
70#include "llvm/IR/Instructions.h"
71#include "llvm/IR/IntrinsicInst.h"
72#include "llvm/IR/LLVMContext.h"
73#include "llvm/IR/Metadata.h"
74#include "llvm/IR/PatternMatch.h"
75#include "llvm/IR/PredIteratorCache.h"
76#include "llvm/InitializePasses.h"
77#include "llvm/Support/CommandLine.h"
78#include "llvm/Support/Debug.h"
79#include "llvm/Support/raw_ostream.h"
80#include "llvm/Transforms/Scalar.h"
81#include "llvm/Transforms/Scalar/LoopPassManager.h"
82#include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
83#include "llvm/Transforms/Utils/BasicBlockUtils.h"
84#include "llvm/Transforms/Utils/Local.h"
85#include "llvm/Transforms/Utils/LoopUtils.h"
86#include "llvm/Transforms/Utils/SSAUpdater.h"
87#include <algorithm>
88#include <utility>
89using namespace llvm;
90
91#define DEBUG_TYPE"licm" "licm"
92
93STATISTIC(NumCreatedBlocks, "Number of blocks created")static llvm::Statistic NumCreatedBlocks = {"licm", "NumCreatedBlocks"
, "Number of blocks created"}
;
94STATISTIC(NumClonedBranches, "Number of branches cloned")static llvm::Statistic NumClonedBranches = {"licm", "NumClonedBranches"
, "Number of branches cloned"}
;
95STATISTIC(NumSunk, "Number of instructions sunk out of loop")static llvm::Statistic NumSunk = {"licm", "NumSunk", "Number of instructions sunk out of loop"
}
;
96STATISTIC(NumHoisted, "Number of instructions hoisted out of loop")static llvm::Statistic NumHoisted = {"licm", "NumHoisted", "Number of instructions hoisted out of loop"
}
;
97STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk")static llvm::Statistic NumMovedLoads = {"licm", "NumMovedLoads"
, "Number of load insts hoisted or sunk"}
;
98STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk")static llvm::Statistic NumMovedCalls = {"licm", "NumMovedCalls"
, "Number of call insts hoisted or sunk"}
;
99STATISTIC(NumPromoted, "Number of memory locations promoted to registers")static llvm::Statistic NumPromoted = {"licm", "NumPromoted", "Number of memory locations promoted to registers"
}
;
100
101/// Memory promotion is enabled by default.
102static cl::opt<bool>
103 DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false),
104 cl::desc("Disable memory promotion in LICM pass"));
105
106static cl::opt<bool> ControlFlowHoisting(
107 "licm-control-flow-hoisting", cl::Hidden, cl::init(false),
108 cl::desc("Enable control flow (and PHI) hoisting in LICM"));
109
110static cl::opt<unsigned> HoistSinkColdnessThreshold(
111 "licm-coldness-threshold", cl::Hidden, cl::init(4),
112 cl::desc("Relative coldness Threshold of hoisting/sinking destination "
113 "block for LICM to be considered beneficial"));
114
115static cl::opt<uint32_t> MaxNumUsesTraversed(
116 "licm-max-num-uses-traversed", cl::Hidden, cl::init(8),
117 cl::desc("Max num uses visited for identifying load "
118 "invariance in loop using invariant start (default = 8)"));
119
120// Experimental option to allow imprecision in LICM in pathological cases, in
121// exchange for faster compile. This is to be removed if MemorySSA starts to
122// address the same issue. This flag applies only when LICM uses MemorySSA
123// instead on AliasSetTracker. LICM calls MemorySSAWalker's
124// getClobberingMemoryAccess, up to the value of the Cap, getting perfect
125// accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess,
126// which may not be precise, since optimizeUses is capped. The result is
127// correct, but we may not get as "far up" as possible to get which access is
128// clobbering the one queried.
129cl::opt<unsigned> llvm::SetLicmMssaOptCap(
130 "licm-mssa-optimization-cap", cl::init(100), cl::Hidden,
131 cl::desc("Enable imprecision in LICM in pathological cases, in exchange "
132 "for faster compile. Caps the MemorySSA clobbering calls."));
133
134// Experimentally, memory promotion carries less importance than sinking and
135// hoisting. Limit when we do promotion when using MemorySSA, in order to save
136// compile time.
137cl::opt<unsigned> llvm::SetLicmMssaNoAccForPromotionCap(
138 "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden,
139 cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no "
140 "effect. When MSSA in LICM is enabled, then this is the maximum "
141 "number of accesses allowed to be present in a loop in order to "
142 "enable memory promotion."));
143
144static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI);
145static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop,
146 const LoopSafetyInfo *SafetyInfo,
147 TargetTransformInfo *TTI, bool &FreeInLoop,
148 bool LoopNestMode);
149static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
150 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
151 MemorySSAUpdater *MSSAU, ScalarEvolution *SE,
152 OptimizationRemarkEmitter *ORE);
153static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
154 BlockFrequencyInfo *BFI, const Loop *CurLoop,
155 ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU,
156 OptimizationRemarkEmitter *ORE);
157static bool isSafeToExecuteUnconditionally(Instruction &Inst,
158 const DominatorTree *DT,
159 const TargetLibraryInfo *TLI,
160 const Loop *CurLoop,
161 const LoopSafetyInfo *SafetyInfo,
162 OptimizationRemarkEmitter *ORE,
163 const Instruction *CtxI = nullptr);
164static bool pointerInvalidatedByLoop(MemoryLocation MemLoc,
165 AliasSetTracker *CurAST, Loop *CurLoop,
166 AAResults *AA);
167static bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU,
168 Loop *CurLoop, Instruction &I,
169 SinkAndHoistLICMFlags &Flags);
170static bool pointerInvalidatedByBlockWithMSSA(BasicBlock &BB, MemorySSA &MSSA,
171 MemoryUse &MU);
172static Instruction *cloneInstructionInExitBlock(
173 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
174 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU);
175
176static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
177 MemorySSAUpdater *MSSAU);
178
179static void moveInstructionBefore(Instruction &I, Instruction &Dest,
180 ICFLoopSafetyInfo &SafetyInfo,
181 MemorySSAUpdater *MSSAU, ScalarEvolution *SE);
182
183static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
184 function_ref<void(Instruction *)> Fn);
185static SmallVector<SmallSetVector<Value *, 8>, 0>
186collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L);
187
188namespace {
189struct LoopInvariantCodeMotion {
190 bool runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
191 BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI,
192 TargetTransformInfo *TTI, ScalarEvolution *SE, MemorySSA *MSSA,
193 OptimizationRemarkEmitter *ORE, bool LoopNestMode = false);
194
195 LoopInvariantCodeMotion(unsigned LicmMssaOptCap,
196 unsigned LicmMssaNoAccForPromotionCap)
197 : LicmMssaOptCap(LicmMssaOptCap),
198 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap) {}
199
200private:
201 unsigned LicmMssaOptCap;
202 unsigned LicmMssaNoAccForPromotionCap;
203};
204
205struct LegacyLICMPass : public LoopPass {
206 static char ID; // Pass identification, replacement for typeid
207 LegacyLICMPass(
208 unsigned LicmMssaOptCap = SetLicmMssaOptCap,
209 unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap)
210 : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap) {
211 initializeLegacyLICMPassPass(*PassRegistry::getPassRegistry());
212 }
213
214 bool runOnLoop(Loop *L, LPPassManager &LPM) override {
215 if (skipLoop(L))
216 return false;
217
218 LLVM_DEBUG(dbgs() << "Perform LICM on Loop with header at block "do { } while (false)
219 << L->getHeader()->getNameOrAsOperand() << "\n")do { } while (false);
220
221 auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>();
222 MemorySSA *MSSA = &getAnalysis<MemorySSAWrapperPass>().getMSSA();
223 bool hasProfileData = L->getHeader()->getParent()->hasProfileData();
224 BlockFrequencyInfo *BFI =
225 hasProfileData ? &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI()
226 : nullptr;
227 // For the old PM, we can't use OptimizationRemarkEmitter as an analysis
228 // pass. Function analyses need to be preserved across loop transformations
229 // but ORE cannot be preserved (see comment before the pass definition).
230 OptimizationRemarkEmitter ORE(L->getHeader()->getParent());
231 return LICM.runOnLoop(
232 L, &getAnalysis<AAResultsWrapperPass>().getAAResults(),
233 &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(),
234 &getAnalysis<DominatorTreeWrapperPass>().getDomTree(), BFI,
235 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
236 *L->getHeader()->getParent()),
237 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
238 *L->getHeader()->getParent()),
239 SE ? &SE->getSE() : nullptr, MSSA, &ORE);
240 }
241
242 /// This transformation requires natural loop information & requires that
243 /// loop preheaders be inserted into the CFG...
244 ///
245 void getAnalysisUsage(AnalysisUsage &AU) const override {
246 AU.addPreserved<DominatorTreeWrapperPass>();
247 AU.addPreserved<LoopInfoWrapperPass>();
248 AU.addRequired<TargetLibraryInfoWrapperPass>();
249 AU.addRequired<MemorySSAWrapperPass>();
250 AU.addPreserved<MemorySSAWrapperPass>();
251 AU.addRequired<TargetTransformInfoWrapperPass>();
252 getLoopAnalysisUsage(AU);
253 LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU);
254 AU.addPreserved<LazyBlockFrequencyInfoPass>();
255 AU.addPreserved<LazyBranchProbabilityInfoPass>();
256 }
257
258private:
259 LoopInvariantCodeMotion LICM;
260};
261} // namespace
262
263PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM,
264 LoopStandardAnalysisResults &AR, LPMUpdater &) {
265 if (!AR.MSSA)
266 report_fatal_error("LICM requires MemorySSA (loop-mssa)");
267
268 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
269 // pass. Function analyses need to be preserved across loop transformations
270 // but ORE cannot be preserved (see comment before the pass definition).
271 OptimizationRemarkEmitter ORE(L.getHeader()->getParent());
272
273 LoopInvariantCodeMotion LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap);
274 if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, AR.BFI, &AR.TLI, &AR.TTI,
275 &AR.SE, AR.MSSA, &ORE))
276 return PreservedAnalyses::all();
277
278 auto PA = getLoopPassPreservedAnalyses();
279
280 PA.preserve<DominatorTreeAnalysis>();
281 PA.preserve<LoopAnalysis>();
282 PA.preserve<MemorySSAAnalysis>();
283
284 return PA;
285}
286
287PreservedAnalyses LNICMPass::run(LoopNest &LN, LoopAnalysisManager &AM,
288 LoopStandardAnalysisResults &AR,
289 LPMUpdater &) {
290 if (!AR.MSSA)
291 report_fatal_error("LNICM requires MemorySSA (loop-mssa)");
292
293 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
294 // pass. Function analyses need to be preserved across loop transformations
295 // but ORE cannot be preserved (see comment before the pass definition).
296 OptimizationRemarkEmitter ORE(LN.getParent());
297
298 LoopInvariantCodeMotion LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap);
299
300 Loop &OutermostLoop = LN.getOutermostLoop();
301 bool Changed = LICM.runOnLoop(&OutermostLoop, &AR.AA, &AR.LI, &AR.DT, AR.BFI,
302 &AR.TLI, &AR.TTI, &AR.SE, AR.MSSA, &ORE, true);
303
304 if (!Changed)
305 return PreservedAnalyses::all();
306
307 auto PA = getLoopPassPreservedAnalyses();
308
309 PA.preserve<DominatorTreeAnalysis>();
310 PA.preserve<LoopAnalysis>();
311 PA.preserve<MemorySSAAnalysis>();
312
313 return PA;
314}
315
316char LegacyLICMPass::ID = 0;
317INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion",static void *initializeLegacyLICMPassPassOnce(PassRegistry &
Registry) {
318 false, false)static void *initializeLegacyLICMPassPassOnce(PassRegistry &
Registry) {
319INITIALIZE_PASS_DEPENDENCY(LoopPass)initializeLoopPassPass(Registry);
320INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry);
321INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)initializeTargetTransformInfoWrapperPassPass(Registry);
322INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)initializeMemorySSAWrapperPassPass(Registry);
323INITIALIZE_PASS_DEPENDENCY(LazyBFIPass)initializeLazyBFIPassPass(Registry);
324INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false,PassInfo *PI = new PassInfo( "Loop Invariant Code Motion", "licm"
, &LegacyLICMPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<LegacyLICMPass>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeLegacyLICMPassPassFlag
; void llvm::initializeLegacyLICMPassPass(PassRegistry &Registry
) { llvm::call_once(InitializeLegacyLICMPassPassFlag, initializeLegacyLICMPassPassOnce
, std::ref(Registry)); }
325 false)PassInfo *PI = new PassInfo( "Loop Invariant Code Motion", "licm"
, &LegacyLICMPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<LegacyLICMPass>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeLegacyLICMPassPassFlag
; void llvm::initializeLegacyLICMPassPass(PassRegistry &Registry
) { llvm::call_once(InitializeLegacyLICMPassPassFlag, initializeLegacyLICMPassPassOnce
, std::ref(Registry)); }
326
327Pass *llvm::createLICMPass() { return new LegacyLICMPass(); }
328Pass *llvm::createLICMPass(unsigned LicmMssaOptCap,
329 unsigned LicmMssaNoAccForPromotionCap) {
330 return new LegacyLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap);
331}
332
333llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags(bool IsSink, Loop *L,
334 MemorySSA *MSSA)
335 : SinkAndHoistLICMFlags(SetLicmMssaOptCap, SetLicmMssaNoAccForPromotionCap,
336 IsSink, L, MSSA) {}
337
338llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags(
339 unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink,
340 Loop *L, MemorySSA *MSSA)
341 : LicmMssaOptCap(LicmMssaOptCap),
342 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap),
343 IsSink(IsSink) {
344 assert(((L != nullptr) == (MSSA != nullptr)) &&(static_cast<void> (0))
345 "Unexpected values for SinkAndHoistLICMFlags")(static_cast<void> (0));
346 if (!MSSA)
347 return;
348
349 unsigned AccessCapCount = 0;
350 for (auto *BB : L->getBlocks())
351 if (const auto *Accesses = MSSA->getBlockAccesses(BB))
352 for (const auto &MA : *Accesses) {
353 (void)MA;
354 ++AccessCapCount;
355 if (AccessCapCount > LicmMssaNoAccForPromotionCap) {
356 NoOfMemAccTooLarge = true;
357 return;
358 }
359 }
360}
361
362/// Hoist expressions out of the specified loop. Note, alias info for inner
363/// loop is not preserved so it is not a good idea to run LICM multiple
364/// times on one loop.
365bool LoopInvariantCodeMotion::runOnLoop(
366 Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
367 BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI, TargetTransformInfo *TTI,
368 ScalarEvolution *SE, MemorySSA *MSSA, OptimizationRemarkEmitter *ORE,
369 bool LoopNestMode) {
370 bool Changed = false;
371
372 assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.")(static_cast<void> (0));
373
374 // If this loop has metadata indicating that LICM is not to be performed then
375 // just exit.
376 if (hasDisableLICMTransformsHint(L)) {
377 return false;
378 }
379
380 // Don't sink stores from loops with coroutine suspend instructions.
381 // LICM would sink instructions into the default destination of
382 // the coroutine switch. The default destination of the switch is to
383 // handle the case where the coroutine is suspended, by which point the
384 // coroutine frame may have been destroyed. No instruction can be sunk there.
385 // FIXME: This would unfortunately hurt the performance of coroutines, however
386 // there is currently no general solution for this. Similar issues could also
387 // potentially happen in other passes where instructions are being moved
388 // across that edge.
389 bool HasCoroSuspendInst = llvm::any_of(L->getBlocks(), [](BasicBlock *BB) {
390 return llvm::any_of(*BB, [](Instruction &I) {
391 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
392 return II && II->getIntrinsicID() == Intrinsic::coro_suspend;
393 });
394 });
395
396 MemorySSAUpdater MSSAU(MSSA);
397 SinkAndHoistLICMFlags Flags(LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
398 /*IsSink=*/true, L, MSSA);
399
400 // Get the preheader block to move instructions into...
401 BasicBlock *Preheader = L->getLoopPreheader();
402
403 // Compute loop safety information.
404 ICFLoopSafetyInfo SafetyInfo;
405 SafetyInfo.computeLoopSafetyInfo(L);
406
407 // We want to visit all of the instructions in this loop... that are not parts
408 // of our subloops (they have already had their invariants hoisted out of
409 // their loop, into this loop, so there is no need to process the BODIES of
410 // the subloops).
411 //
412 // Traverse the body of the loop in depth first order on the dominator tree so
413 // that we are guaranteed to see definitions before we see uses. This allows
414 // us to sink instructions in one pass, without iteration. After sinking
415 // instructions, we perform another pass to hoist them out of the loop.
416 if (L->hasDedicatedExits())
417 Changed |= LoopNestMode
418 ? sinkRegionForLoopNest(DT->getNode(L->getHeader()), AA, LI,
419 DT, BFI, TLI, TTI, L, &MSSAU,
420 &SafetyInfo, Flags, ORE)
421 : sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI,
422 TLI, TTI, L, &MSSAU, &SafetyInfo, Flags, ORE);
423 Flags.setIsSink(false);
424 if (Preheader)
425 Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI, TLI, L,
426 &MSSAU, SE, &SafetyInfo, Flags, ORE, LoopNestMode);
427
428 // Now that all loop invariants have been removed from the loop, promote any
429 // memory references to scalars that we can.
430 // Don't sink stores from loops without dedicated block exits. Exits
431 // containing indirect branches are not transformed by loop simplify,
432 // make sure we catch that. An additional load may be generated in the
433 // preheader for SSA updater, so also avoid sinking when no preheader
434 // is available.
435 if (!DisablePromotion && Preheader && L->hasDedicatedExits() &&
436 !Flags.tooManyMemoryAccesses() && !HasCoroSuspendInst) {
437 // Figure out the loop exits and their insertion points
438 SmallVector<BasicBlock *, 8> ExitBlocks;
439 L->getUniqueExitBlocks(ExitBlocks);
440
441 // We can't insert into a catchswitch.
442 bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) {
443 return isa<CatchSwitchInst>(Exit->getTerminator());
444 });
445
446 if (!HasCatchSwitch) {
447 SmallVector<Instruction *, 8> InsertPts;
448 SmallVector<MemoryAccess *, 8> MSSAInsertPts;
449 InsertPts.reserve(ExitBlocks.size());
450 MSSAInsertPts.reserve(ExitBlocks.size());
451 for (BasicBlock *ExitBlock : ExitBlocks) {
452 InsertPts.push_back(&*ExitBlock->getFirstInsertionPt());
453 MSSAInsertPts.push_back(nullptr);
454 }
455
456 PredIteratorCache PIC;
457
458 // Promoting one set of accesses may make the pointers for another set
459 // loop invariant, so run this in a loop (with the MaybePromotable set
460 // decreasing in size over time).
461 bool Promoted = false;
462 bool LocalPromoted;
463 do {
464 LocalPromoted = false;
465 for (const SmallSetVector<Value *, 8> &PointerMustAliases :
466 collectPromotionCandidates(MSSA, AA, L)) {
467 LocalPromoted |= promoteLoopAccessesToScalars(
468 PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC,
469 LI, DT, TLI, L, &MSSAU, &SafetyInfo, ORE);
470 }
471 Promoted |= LocalPromoted;
472 } while (LocalPromoted);
473
474 // Once we have promoted values across the loop body we have to
475 // recursively reform LCSSA as any nested loop may now have values defined
476 // within the loop used in the outer loop.
477 // FIXME: This is really heavy handed. It would be a bit better to use an
478 // SSAUpdater strategy during promotion that was LCSSA aware and reformed
479 // it as it went.
480 if (Promoted)
481 formLCSSARecursively(*L, *DT, LI, SE);
482
483 Changed |= Promoted;
484 }
485 }
486
487 // Check that neither this loop nor its parent have had LCSSA broken. LICM is
488 // specifically moving instructions across the loop boundary and so it is
489 // especially in need of sanity checking here.
490 assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!")(static_cast<void> (0));
491 assert((L->isOutermost() || L->getParentLoop()->isLCSSAForm(*DT)) &&(static_cast<void> (0))
492 "Parent loop not left in LCSSA form after LICM!")(static_cast<void> (0));
493
494 if (VerifyMemorySSA)
495 MSSA->verifyMemorySSA();
496
497 if (Changed && SE)
498 SE->forgetLoopDispositions(L);
499 return Changed;
500}
501
502/// Walk the specified region of the CFG (defined by all blocks dominated by
503/// the specified block, and that are in the current loop) in reverse depth
504/// first order w.r.t the DominatorTree. This allows us to visit uses before
505/// definitions, allowing us to sink a loop body in one pass without iteration.
506///
507bool llvm::sinkRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
508 DominatorTree *DT, BlockFrequencyInfo *BFI,
509 TargetLibraryInfo *TLI, TargetTransformInfo *TTI,
510 Loop *CurLoop, MemorySSAUpdater *MSSAU,
511 ICFLoopSafetyInfo *SafetyInfo,
512 SinkAndHoistLICMFlags &Flags,
513 OptimizationRemarkEmitter *ORE, Loop *OutermostLoop) {
514
515 // Verify inputs.
516 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&(static_cast<void> (0))
517 CurLoop != nullptr && MSSAU != nullptr && SafetyInfo != nullptr &&(static_cast<void> (0))
518 "Unexpected input to sinkRegion.")(static_cast<void> (0));
519
520 // We want to visit children before parents. We will enque all the parents
521 // before their children in the worklist and process the worklist in reverse
522 // order.
523 SmallVector<DomTreeNode *, 16> Worklist = collectChildrenInLoop(N, CurLoop);
524
525 bool Changed = false;
526 for (DomTreeNode *DTN : reverse(Worklist)) {
527 BasicBlock *BB = DTN->getBlock();
528 // Only need to process the contents of this block if it is not part of a
529 // subloop (which would already have been processed).
530 if (inSubLoop(BB, CurLoop, LI))
531 continue;
532
533 for (BasicBlock::iterator II = BB->end(); II != BB->begin();) {
534 Instruction &I = *--II;
535
536 // The instruction is not used in the loop if it is dead. In this case,
537 // we just delete it instead of sinking it.
538 if (isInstructionTriviallyDead(&I, TLI)) {
539 LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n')do { } while (false);
540 salvageKnowledge(&I);
541 salvageDebugInfo(I);
542 ++II;
543 eraseInstruction(I, *SafetyInfo, MSSAU);
544 Changed = true;
545 continue;
546 }
547
548 // Check to see if we can sink this instruction to the exit blocks
549 // of the loop. We can do this if the all users of the instruction are
550 // outside of the loop. In this case, it doesn't even matter if the
551 // operands of the instruction are loop invariant.
552 //
553 bool FreeInLoop = false;
554 bool LoopNestMode = OutermostLoop != nullptr;
555 if (!I.mayHaveSideEffects() &&
556 isNotUsedOrFreeInLoop(I, LoopNestMode ? OutermostLoop : CurLoop,
557 SafetyInfo, TTI, FreeInLoop, LoopNestMode) &&
558 canSinkOrHoistInst(I, AA, DT, CurLoop, /*CurAST*/nullptr, MSSAU, true,
559 &Flags, ORE)) {
560 if (sink(I, LI, DT, BFI, CurLoop, SafetyInfo, MSSAU, ORE)) {
561 if (!FreeInLoop) {
562 ++II;
563 salvageDebugInfo(I);
564 eraseInstruction(I, *SafetyInfo, MSSAU);
565 }
566 Changed = true;
567 }
568 }
569 }
570 }
571 if (VerifyMemorySSA)
572 MSSAU->getMemorySSA()->verifyMemorySSA();
573 return Changed;
574}
575
576bool llvm::sinkRegionForLoopNest(
577 DomTreeNode *N, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
578 BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI, TargetTransformInfo *TTI,
579 Loop *CurLoop, MemorySSAUpdater *MSSAU, ICFLoopSafetyInfo *SafetyInfo,
580 SinkAndHoistLICMFlags &Flags, OptimizationRemarkEmitter *ORE) {
581
582 bool Changed = false;
583 SmallPriorityWorklist<Loop *, 4> Worklist;
584 Worklist.insert(CurLoop);
585 appendLoopsToWorklist(*CurLoop, Worklist);
586 while (!Worklist.empty()) {
587 Loop *L = Worklist.pop_back_val();
588 Changed |= sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI, TLI,
589 TTI, L, MSSAU, SafetyInfo, Flags, ORE, CurLoop);
590 }
591 return Changed;
592}
593
594namespace {
595// This is a helper class for hoistRegion to make it able to hoist control flow
596// in order to be able to hoist phis. The way this works is that we initially
597// start hoisting to the loop preheader, and when we see a loop invariant branch
598// we make note of this. When we then come to hoist an instruction that's
599// conditional on such a branch we duplicate the branch and the relevant control
600// flow, then hoist the instruction into the block corresponding to its original
601// block in the duplicated control flow.
602class ControlFlowHoister {
603private:
604 // Information about the loop we are hoisting from
605 LoopInfo *LI;
606 DominatorTree *DT;
607 Loop *CurLoop;
608 MemorySSAUpdater *MSSAU;
609
610 // A map of blocks in the loop to the block their instructions will be hoisted
611 // to.
612 DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap;
613
614 // The branches that we can hoist, mapped to the block that marks a
615 // convergence point of their control flow.
616 DenseMap<BranchInst *, BasicBlock *> HoistableBranches;
617
618public:
619 ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop,
620 MemorySSAUpdater *MSSAU)
621 : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {}
622
623 void registerPossiblyHoistableBranch(BranchInst *BI) {
624 // We can only hoist conditional branches with loop invariant operands.
625 if (!ControlFlowHoisting || !BI->isConditional() ||
626 !CurLoop->hasLoopInvariantOperands(BI))
627 return;
628
629 // The branch destinations need to be in the loop, and we don't gain
630 // anything by duplicating conditional branches with duplicate successors,
631 // as it's essentially the same as an unconditional branch.
632 BasicBlock *TrueDest = BI->getSuccessor(0);
633 BasicBlock *FalseDest = BI->getSuccessor(1);
634 if (!CurLoop->contains(TrueDest) || !CurLoop->contains(FalseDest) ||
635 TrueDest == FalseDest)
636 return;
637
638 // We can hoist BI if one branch destination is the successor of the other,
639 // or both have common successor which we check by seeing if the
640 // intersection of their successors is non-empty.
641 // TODO: This could be expanded to allowing branches where both ends
642 // eventually converge to a single block.
643 SmallPtrSet<BasicBlock *, 4> TrueDestSucc, FalseDestSucc;
644 TrueDestSucc.insert(succ_begin(TrueDest), succ_end(TrueDest));
645 FalseDestSucc.insert(succ_begin(FalseDest), succ_end(FalseDest));
646 BasicBlock *CommonSucc = nullptr;
647 if (TrueDestSucc.count(FalseDest)) {
648 CommonSucc = FalseDest;
649 } else if (FalseDestSucc.count(TrueDest)) {
650 CommonSucc = TrueDest;
651 } else {
652 set_intersect(TrueDestSucc, FalseDestSucc);
653 // If there's one common successor use that.
654 if (TrueDestSucc.size() == 1)
655 CommonSucc = *TrueDestSucc.begin();
656 // If there's more than one pick whichever appears first in the block list
657 // (we can't use the value returned by TrueDestSucc.begin() as it's
658 // unpredicatable which element gets returned).
659 else if (!TrueDestSucc.empty()) {
660 Function *F = TrueDest->getParent();
661 auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(&BB); };
662 auto It = llvm::find_if(*F, IsSucc);
663 assert(It != F->end() && "Could not find successor in function")(static_cast<void> (0));
664 CommonSucc = &*It;
665 }
666 }
667 // The common successor has to be dominated by the branch, as otherwise
668 // there will be some other path to the successor that will not be
669 // controlled by this branch so any phi we hoist would be controlled by the
670 // wrong condition. This also takes care of avoiding hoisting of loop back
671 // edges.
672 // TODO: In some cases this could be relaxed if the successor is dominated
673 // by another block that's been hoisted and we can guarantee that the
674 // control flow has been replicated exactly.
675 if (CommonSucc && DT->dominates(BI, CommonSucc))
676 HoistableBranches[BI] = CommonSucc;
677 }
678
679 bool canHoistPHI(PHINode *PN) {
680 // The phi must have loop invariant operands.
681 if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(PN))
682 return false;
683 // We can hoist phis if the block they are in is the target of hoistable
684 // branches which cover all of the predecessors of the block.
685 SmallPtrSet<BasicBlock *, 8> PredecessorBlocks;
686 BasicBlock *BB = PN->getParent();
687 for (BasicBlock *PredBB : predecessors(BB))
688 PredecessorBlocks.insert(PredBB);
689 // If we have less predecessor blocks than predecessors then the phi will
690 // have more than one incoming value for the same block which we can't
691 // handle.
692 // TODO: This could be handled be erasing some of the duplicate incoming
693 // values.
694 if (PredecessorBlocks.size() != pred_size(BB))
695 return false;
696 for (auto &Pair : HoistableBranches) {
697 if (Pair.second == BB) {
698 // Which blocks are predecessors via this branch depends on if the
699 // branch is triangle-like or diamond-like.
700 if (Pair.first->getSuccessor(0) == BB) {
701 PredecessorBlocks.erase(Pair.first->getParent());
702 PredecessorBlocks.erase(Pair.first->getSuccessor(1));
703 } else if (Pair.first->getSuccessor(1) == BB) {
704 PredecessorBlocks.erase(Pair.first->getParent());
705 PredecessorBlocks.erase(Pair.first->getSuccessor(0));
706 } else {
707 PredecessorBlocks.erase(Pair.first->getSuccessor(0));
708 PredecessorBlocks.erase(Pair.first->getSuccessor(1));
709 }
710 }
711 }
712 // PredecessorBlocks will now be empty if for every predecessor of BB we
713 // found a hoistable branch source.
714 return PredecessorBlocks.empty();
715 }
716
717 BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) {
718 if (!ControlFlowHoisting)
719 return CurLoop->getLoopPreheader();
720 // If BB has already been hoisted, return that
721 if (HoistDestinationMap.count(BB))
722 return HoistDestinationMap[BB];
723
724 // Check if this block is conditional based on a pending branch
725 auto HasBBAsSuccessor =
726 [&](DenseMap<BranchInst *, BasicBlock *>::value_type &Pair) {
727 return BB != Pair.second && (Pair.first->getSuccessor(0) == BB ||
728 Pair.first->getSuccessor(1) == BB);
729 };
730 auto It = llvm::find_if(HoistableBranches, HasBBAsSuccessor);
731
732 // If not involved in a pending branch, hoist to preheader
733 BasicBlock *InitialPreheader = CurLoop->getLoopPreheader();
734 if (It == HoistableBranches.end()) {
735 LLVM_DEBUG(dbgs() << "LICM using "do { } while (false)
736 << InitialPreheader->getNameOrAsOperand()do { } while (false)
737 << " as hoist destination for "do { } while (false)
738 << BB->getNameOrAsOperand() << "\n")do { } while (false);
739 HoistDestinationMap[BB] = InitialPreheader;
740 return InitialPreheader;
741 }
742 BranchInst *BI = It->first;
743 assert(std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) ==(static_cast<void> (0))
744 HoistableBranches.end() &&(static_cast<void> (0))
745 "BB is expected to be the target of at most one branch")(static_cast<void> (0));
746
747 LLVMContext &C = BB->getContext();
748 BasicBlock *TrueDest = BI->getSuccessor(0);
749 BasicBlock *FalseDest = BI->getSuccessor(1);
750 BasicBlock *CommonSucc = HoistableBranches[BI];
751 BasicBlock *HoistTarget = getOrCreateHoistedBlock(BI->getParent());
752
753 // Create hoisted versions of blocks that currently don't have them
754 auto CreateHoistedBlock = [&](BasicBlock *Orig) {
755 if (HoistDestinationMap.count(Orig))
756 return HoistDestinationMap[Orig];
757 BasicBlock *New =
758 BasicBlock::Create(C, Orig->getName() + ".licm", Orig->getParent());
759 HoistDestinationMap[Orig] = New;
760 DT->addNewBlock(New, HoistTarget);
761 if (CurLoop->getParentLoop())
762 CurLoop->getParentLoop()->addBasicBlockToLoop(New, *LI);
763 ++NumCreatedBlocks;
764 LLVM_DEBUG(dbgs() << "LICM created " << New->getName()do { } while (false)
765 << " as hoist destination for " << Orig->getName()do { } while (false)
766 << "\n")do { } while (false);
767 return New;
768 };
769 BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest);
770 BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest);
771 BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc);
772
773 // Link up these blocks with branches.
774 if (!HoistCommonSucc->getTerminator()) {
775 // The new common successor we've generated will branch to whatever that
776 // hoist target branched to.
777 BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor();
778 assert(TargetSucc && "Expected hoist target to have a single successor")(static_cast<void> (0));
779 HoistCommonSucc->moveBefore(TargetSucc);
780 BranchInst::Create(TargetSucc, HoistCommonSucc);
781 }
782 if (!HoistTrueDest->getTerminator()) {
783 HoistTrueDest->moveBefore(HoistCommonSucc);
784 BranchInst::Create(HoistCommonSucc, HoistTrueDest);
785 }
786 if (!HoistFalseDest->getTerminator()) {
787 HoistFalseDest->moveBefore(HoistCommonSucc);
788 BranchInst::Create(HoistCommonSucc, HoistFalseDest);
789 }
790
791 // If BI is being cloned to what was originally the preheader then
792 // HoistCommonSucc will now be the new preheader.
793 if (HoistTarget == InitialPreheader) {
794 // Phis in the loop header now need to use the new preheader.
795 InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc);
796 MSSAU->wireOldPredecessorsToNewImmediatePredecessor(
797 HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget});
798 // The new preheader dominates the loop header.
799 DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc);
800 DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader());
801 DT->changeImmediateDominator(HeaderNode, PreheaderNode);
802 // The preheader hoist destination is now the new preheader, with the
803 // exception of the hoist destination of this branch.
804 for (auto &Pair : HoistDestinationMap)
805 if (Pair.second == InitialPreheader && Pair.first != BI->getParent())
806 Pair.second = HoistCommonSucc;
807 }
808
809 // Now finally clone BI.
810 ReplaceInstWithInst(
811 HoistTarget->getTerminator(),
812 BranchInst::Create(HoistTrueDest, HoistFalseDest, BI->getCondition()));
813 ++NumClonedBranches;
814
815 assert(CurLoop->getLoopPreheader() &&(static_cast<void> (0))
816 "Hoisting blocks should not have destroyed preheader")(static_cast<void> (0));
817 return HoistDestinationMap[BB];
818 }
819};
820} // namespace
821
822// Hoisting/sinking instruction out of a loop isn't always beneficial. It's only
823// only worthwhile if the destination block is actually colder than current
824// block.
825static bool worthSinkOrHoistInst(Instruction &I, BasicBlock *DstBlock,
826 OptimizationRemarkEmitter *ORE,
827 BlockFrequencyInfo *BFI) {
828 // Check block frequency only when runtime profile is available
829 // to avoid pathological cases. With static profile, lean towards
830 // hosting because it helps canonicalize the loop for vectorizer.
831 if (!DstBlock->getParent()->hasProfileData())
832 return true;
833
834 if (!HoistSinkColdnessThreshold || !BFI)
835 return true;
836
837 BasicBlock *SrcBlock = I.getParent();
838 if (BFI->getBlockFreq(DstBlock).getFrequency() / HoistSinkColdnessThreshold >
839 BFI->getBlockFreq(SrcBlock).getFrequency()) {
840 ORE->emit([&]() {
841 return OptimizationRemarkMissed(DEBUG_TYPE"licm", "SinkHoistInst", &I)
842 << "failed to sink or hoist instruction because containing block "
843 "has lower frequency than destination block";
844 });
845 return false;
846 }
847
848 return true;
849}
850
851/// Walk the specified region of the CFG (defined by all blocks dominated by
852/// the specified block, and that are in the current loop) in depth first
853/// order w.r.t the DominatorTree. This allows us to visit definitions before
854/// uses, allowing us to hoist a loop body in one pass without iteration.
855///
856bool llvm::hoistRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
857 DominatorTree *DT, BlockFrequencyInfo *BFI,
858 TargetLibraryInfo *TLI, Loop *CurLoop,
859 MemorySSAUpdater *MSSAU, ScalarEvolution *SE,
860 ICFLoopSafetyInfo *SafetyInfo,
861 SinkAndHoistLICMFlags &Flags,
862 OptimizationRemarkEmitter *ORE, bool LoopNestMode) {
863 // Verify inputs.
864 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&(static_cast<void> (0))
865 CurLoop != nullptr && MSSAU != nullptr && SafetyInfo != nullptr &&(static_cast<void> (0))
866 "Unexpected input to hoistRegion.")(static_cast<void> (0));
867
868 ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU);
869
870 // Keep track of instructions that have been hoisted, as they may need to be
871 // re-hoisted if they end up not dominating all of their uses.
872 SmallVector<Instruction *, 16> HoistedInstructions;
873
874 // For PHI hoisting to work we need to hoist blocks before their successors.
875 // We can do this by iterating through the blocks in the loop in reverse
876 // post-order.
877 LoopBlocksRPO Worklist(CurLoop);
878 Worklist.perform(LI);
879 bool Changed = false;
880 for (BasicBlock *BB : Worklist) {
881 // Only need to process the contents of this block if it is not part of a
882 // subloop (which would already have been processed).
883 if (!LoopNestMode && inSubLoop(BB, CurLoop, LI))
884 continue;
885
886 for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;) {
887 Instruction &I = *II++;
888 // Try constant folding this instruction. If all the operands are
889 // constants, it is technically hoistable, but it would be better to
890 // just fold it.
891 if (Constant *C = ConstantFoldInstruction(
892 &I, I.getModule()->getDataLayout(), TLI)) {
893 LLVM_DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *Cdo { } while (false)
894 << '\n')do { } while (false);
895 // FIXME MSSA: Such replacements may make accesses unoptimized (D51960).
896 I.replaceAllUsesWith(C);
897 if (isInstructionTriviallyDead(&I, TLI))
898 eraseInstruction(I, *SafetyInfo, MSSAU);
899 Changed = true;
900 continue;
901 }
902
903 // Try hoisting the instruction out to the preheader. We can only do
904 // this if all of the operands of the instruction are loop invariant and
905 // if it is safe to hoist the instruction. We also check block frequency
906 // to make sure instruction only gets hoisted into colder blocks.
907 // TODO: It may be safe to hoist if we are hoisting to a conditional block
908 // and we have accurately duplicated the control flow from the loop header
909 // to that block.
910 if (CurLoop->hasLoopInvariantOperands(&I) &&
911 canSinkOrHoistInst(I, AA, DT, CurLoop, /*CurAST*/ nullptr, MSSAU,
912 true, &Flags, ORE) &&
913 worthSinkOrHoistInst(I, CurLoop->getLoopPreheader(), ORE, BFI) &&
914 isSafeToExecuteUnconditionally(
915 I, DT, TLI, CurLoop, SafetyInfo, ORE,
916 CurLoop->getLoopPreheader()->getTerminator())) {
917 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
918 MSSAU, SE, ORE);
919 HoistedInstructions.push_back(&I);
920 Changed = true;
921 continue;
922 }
923
924 // Attempt to remove floating point division out of the loop by
925 // converting it to a reciprocal multiplication.
926 if (I.getOpcode() == Instruction::FDiv && I.hasAllowReciprocal() &&
927 CurLoop->isLoopInvariant(I.getOperand(1))) {
928 auto Divisor = I.getOperand(1);
929 auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0);
930 auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor);
931 ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags());
932 SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent());
933 ReciprocalDivisor->insertBefore(&I);
934
935 auto Product =
936 BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor);
937 Product->setFastMathFlags(I.getFastMathFlags());
938 SafetyInfo->insertInstructionTo(Product, I.getParent());
939 Product->insertAfter(&I);
940 I.replaceAllUsesWith(Product);
941 eraseInstruction(I, *SafetyInfo, MSSAU);
942
943 hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB),
944 SafetyInfo, MSSAU, SE, ORE);
945 HoistedInstructions.push_back(ReciprocalDivisor);
946 Changed = true;
947 continue;
948 }
949
950 auto IsInvariantStart = [&](Instruction &I) {
951 using namespace PatternMatch;
952 return I.use_empty() &&
953 match(&I, m_Intrinsic<Intrinsic::invariant_start>());
954 };
955 auto MustExecuteWithoutWritesBefore = [&](Instruction &I) {
956 return SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) &&
957 SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop);
958 };
959 if ((IsInvariantStart(I) || isGuard(&I)) &&
960 CurLoop->hasLoopInvariantOperands(&I) &&
961 MustExecuteWithoutWritesBefore(I)) {
962 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
963 MSSAU, SE, ORE);
964 HoistedInstructions.push_back(&I);
965 Changed = true;
966 continue;
967 }
968
969 if (PHINode *PN = dyn_cast<PHINode>(&I)) {
970 if (CFH.canHoistPHI(PN)) {
971 // Redirect incoming blocks first to ensure that we create hoisted
972 // versions of those blocks before we hoist the phi.
973 for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i)
974 PN->setIncomingBlock(
975 i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i)));
976 hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
977 MSSAU, SE, ORE);
978 assert(DT->dominates(PN, BB) && "Conditional PHIs not expected")(static_cast<void> (0));
979 Changed = true;
980 continue;
981 }
982 }
983
984 // Remember possibly hoistable branches so we can actually hoist them
985 // later if needed.
986 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
987 CFH.registerPossiblyHoistableBranch(BI);
988 }
989 }
990
991 // If we hoisted instructions to a conditional block they may not dominate
992 // their uses that weren't hoisted (such as phis where some operands are not
993 // loop invariant). If so make them unconditional by moving them to their
994 // immediate dominator. We iterate through the instructions in reverse order
995 // which ensures that when we rehoist an instruction we rehoist its operands,
996 // and also keep track of where in the block we are rehoisting to to make sure
997 // that we rehoist instructions before the instructions that use them.
998 Instruction *HoistPoint = nullptr;
999 if (ControlFlowHoisting) {
1000 for (Instruction *I : reverse(HoistedInstructions)) {
1001 if (!llvm::all_of(I->uses(),
1002 [&](Use &U) { return DT->dominates(I, U); })) {
1003 BasicBlock *Dominator =
1004 DT->getNode(I->getParent())->getIDom()->getBlock();
1005 if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) {
1006 if (HoistPoint)
1007 assert(DT->dominates(Dominator, HoistPoint->getParent()) &&(static_cast<void> (0))
1008 "New hoist point expected to dominate old hoist point")(static_cast<void> (0));
1009 HoistPoint = Dominator->getTerminator();
1010 }
1011 LLVM_DEBUG(dbgs() << "LICM rehoisting to "do { } while (false)
1012 << HoistPoint->getParent()->getNameOrAsOperand()do { } while (false)
1013 << ": " << *I << "\n")do { } while (false);
1014 moveInstructionBefore(*I, *HoistPoint, *SafetyInfo, MSSAU, SE);
1015 HoistPoint = I;
1016 Changed = true;
1017 }
1018 }
1019 }
1020 if (VerifyMemorySSA)
1021 MSSAU->getMemorySSA()->verifyMemorySSA();
1022
1023 // Now that we've finished hoisting make sure that LI and DT are still
1024 // valid.
1025#ifdef EXPENSIVE_CHECKS
1026 if (Changed) {
1027 assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&(static_cast<void> (0))
1028 "Dominator tree verification failed")(static_cast<void> (0));
1029 LI->verify(*DT);
1030 }
1031#endif
1032
1033 return Changed;
1034}
1035
1036// Return true if LI is invariant within scope of the loop. LI is invariant if
1037// CurLoop is dominated by an invariant.start representing the same memory
1038// location and size as the memory location LI loads from, and also the
1039// invariant.start has no uses.
1040static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT,
1041 Loop *CurLoop) {
1042 Value *Addr = LI->getOperand(0);
1043 const DataLayout &DL = LI->getModule()->getDataLayout();
1044 const TypeSize LocSizeInBits = DL.getTypeSizeInBits(LI->getType());
1045
1046 // It is not currently possible for clang to generate an invariant.start
1047 // intrinsic with scalable vector types because we don't support thread local
1048 // sizeless types and we don't permit sizeless types in structs or classes.
1049 // Furthermore, even if support is added for this in future the intrinsic
1050 // itself is defined to have a size of -1 for variable sized objects. This
1051 // makes it impossible to verify if the intrinsic envelops our region of
1052 // interest. For example, both <vscale x 32 x i8> and <vscale x 16 x i8>
1053 // types would have a -1 parameter, but the former is clearly double the size
1054 // of the latter.
1055 if (LocSizeInBits.isScalable())
1056 return false;
1057
1058 // if the type is i8 addrspace(x)*, we know this is the type of
1059 // llvm.invariant.start operand
1060 auto *PtrInt8Ty = PointerType::get(Type::getInt8Ty(LI->getContext()),
1061 LI->getPointerAddressSpace());
1062 unsigned BitcastsVisited = 0;
1063 // Look through bitcasts until we reach the i8* type (this is invariant.start
1064 // operand type).
1065 while (Addr->getType() != PtrInt8Ty) {
1066 auto *BC = dyn_cast<BitCastInst>(Addr);
1067 // Avoid traversing high number of bitcast uses.
1068 if (++BitcastsVisited > MaxNumUsesTraversed || !BC)
1069 return false;
1070 Addr = BC->getOperand(0);
1071 }
1072
1073 unsigned UsesVisited = 0;
1074 // Traverse all uses of the load operand value, to see if invariant.start is
1075 // one of the uses, and whether it dominates the load instruction.
1076 for (auto *U : Addr->users()) {
1077 // Avoid traversing for Load operand with high number of users.
1078 if (++UsesVisited > MaxNumUsesTraversed)
1079 return false;
1080 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
1081 // If there are escaping uses of invariant.start instruction, the load maybe
1082 // non-invariant.
1083 if (!II || II->getIntrinsicID() != Intrinsic::invariant_start ||
1084 !II->use_empty())
1085 continue;
1086 ConstantInt *InvariantSize = cast<ConstantInt>(II->getArgOperand(0));
1087 // The intrinsic supports having a -1 argument for variable sized objects
1088 // so we should check for that here.
1089 if (InvariantSize->isNegative())
1090 continue;
1091 uint64_t InvariantSizeInBits = InvariantSize->getSExtValue() * 8;
1092 // Confirm the invariant.start location size contains the load operand size
1093 // in bits. Also, the invariant.start should dominate the load, and we
1094 // should not hoist the load out of a loop that contains this dominating
1095 // invariant.start.
1096 if (LocSizeInBits.getFixedSize() <= InvariantSizeInBits &&
1097 DT->properlyDominates(II->getParent(), CurLoop->getHeader()))
1098 return true;
1099 }
1100
1101 return false;
1102}
1103
1104namespace {
1105/// Return true if-and-only-if we know how to (mechanically) both hoist and
1106/// sink a given instruction out of a loop. Does not address legality
1107/// concerns such as aliasing or speculation safety.
1108bool isHoistableAndSinkableInst(Instruction &I) {
1109 // Only these instructions are hoistable/sinkable.
1110 return (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
2
Assuming 'I' is a 'LoadInst'
3
Returning the value 1, which participates in a condition later
1111 isa<FenceInst>(I) || isa<CastInst>(I) || isa<UnaryOperator>(I) ||
1112 isa<BinaryOperator>(I) || isa<SelectInst>(I) ||
1113 isa<GetElementPtrInst>(I) || isa<CmpInst>(I) ||
1114 isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
1115 isa<ShuffleVectorInst>(I) || isa<ExtractValueInst>(I) ||
1116 isa<InsertValueInst>(I) || isa<FreezeInst>(I));
1117}
1118/// Return true if all of the alias sets within this AST are known not to
1119/// contain a Mod, or if MSSA knows there are no MemoryDefs in the loop.
1120bool isReadOnly(AliasSetTracker *CurAST, const MemorySSAUpdater *MSSAU,
1121 const Loop *L) {
1122 if (CurAST) {
67
Assuming 'CurAST' is null
68
Taking false branch
1123 for (AliasSet &AS : *CurAST) {
1124 if (!AS.isForwardingAliasSet() && AS.isMod()) {
1125 return false;
1126 }
1127 }
1128 return true;
1129 } else { /*MSSAU*/
1130 for (auto *BB : L->getBlocks())
69
Assuming '__begin2' is not equal to '__end2'
1131 if (MSSAU->getMemorySSA()->getBlockDefs(BB))
70
Called C++ object pointer is null
1132 return false;
1133 return true;
1134 }
1135}
1136
1137/// Return true if I is the only Instruction with a MemoryAccess in L.
1138bool isOnlyMemoryAccess(const Instruction *I, const Loop *L,
1139 const MemorySSAUpdater *MSSAU) {
1140 for (auto *BB : L->getBlocks())
1141 if (auto *Accs = MSSAU->getMemorySSA()->getBlockAccesses(BB)) {
1142 int NotAPhi = 0;
1143 for (const auto &Acc : *Accs) {
1144 if (isa<MemoryPhi>(&Acc))
1145 continue;
1146 const auto *MUD = cast<MemoryUseOrDef>(&Acc);
1147 if (MUD->getMemoryInst() != I || NotAPhi++ == 1)
1148 return false;
1149 }
1150 }
1151 return true;
1152}
1153}
1154
1155bool llvm::canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT,
1156 Loop *CurLoop, AliasSetTracker *CurAST,
1157 MemorySSAUpdater *MSSAU,
1158 bool TargetExecutesOncePerLoop,
1159 SinkAndHoistLICMFlags *Flags,
1160 OptimizationRemarkEmitter *ORE) {
1161 assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&(static_cast<void> (0))
1162 "Either AliasSetTracker or MemorySSA should be initialized.")(static_cast<void> (0));
1163
1164 // If we don't understand the instruction, bail early.
1165 if (!isHoistableAndSinkableInst(I))
1
Calling 'isHoistableAndSinkableInst'
4
Returning from 'isHoistableAndSinkableInst'
1166 return false;
1167
1168 MemorySSA *MSSA = MSSAU ? MSSAU->getMemorySSA() : nullptr;
5
Taking false branch
6
Assuming 'MSSAU' is null
7
'?' condition is false
1169 if (MSSA
7.1
'MSSA' is null
7.1
'MSSA' is null
7.1
'MSSA' is null
7.1
'MSSA' is null
)
8
Taking false branch
1170 assert(Flags != nullptr && "Flags cannot be null.")(static_cast<void> (0));
1171
1172 // Loads have extra constraints we have to verify before we can hoist them.
1173 if (LoadInst *LI
9.1
'LI' is null
9.1
'LI' is null
9.1
'LI' is null
9.1
'LI' is null
= dyn_cast<LoadInst>(&I)) {
9
Assuming the object is not a 'LoadInst'
10
Taking false branch
1174 if (!LI->isUnordered())
1175 return false; // Don't sink/hoist volatile or ordered atomic loads!
1176
1177 // Loads from constant memory are always safe to move, even if they end up
1178 // in the same alias set as something that ends up being modified.
1179 if (AA->pointsToConstantMemory(LI->getOperand(0)))
1180 return true;
1181 if (LI->hasMetadata(LLVMContext::MD_invariant_load))
1182 return true;
1183
1184 if (LI->isAtomic() && !TargetExecutesOncePerLoop)
1185 return false; // Don't risk duplicating unordered loads
1186
1187 // This checks for an invariant.start dominating the load.
1188 if (isLoadInvariantInLoop(LI, DT, CurLoop))
1189 return true;
1190
1191 bool Invalidated;
1192 if (CurAST)
1193 Invalidated = pointerInvalidatedByLoop(MemoryLocation::get(LI), CurAST,
1194 CurLoop, AA);
1195 else
1196 Invalidated = pointerInvalidatedByLoopWithMSSA(
1197 MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(LI)), CurLoop, I, *Flags);
1198 // Check loop-invariant address because this may also be a sinkable load
1199 // whose address is not necessarily loop-invariant.
1200 if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1201 ORE->emit([&]() {
1202 return OptimizationRemarkMissed(
1203 DEBUG_TYPE"licm", "LoadWithLoopInvariantAddressInvalidated", LI)
1204 << "failed to move load with loop-invariant address "
1205 "because the loop may invalidate its value";
1206 });
1207
1208 return !Invalidated;
1209 } else if (CallInst *CI
11.1
'CI' is non-null
11.1
'CI' is non-null
11.1
'CI' is non-null
11.1
'CI' is non-null
= dyn_cast<CallInst>(&I)) {
11
Assuming the object is a 'CallInst'
12
Taking true branch
1210 // Don't sink or hoist dbg info; it's legal, but not useful.
1211 if (isa<DbgInfoIntrinsic>(I))
13
Assuming 'I' is not a 'DbgInfoIntrinsic'
14
Taking false branch
1212 return false;
1213
1214 // Don't sink calls which can throw.
1215 if (CI->mayThrow())
15
Assuming the condition is false
16
Taking false branch
1216 return false;
1217
1218 // Convergent attribute has been used on operations that involve
1219 // inter-thread communication which results are implicitly affected by the
1220 // enclosing control flows. It is not safe to hoist or sink such operations
1221 // across control flow.
1222 if (CI->isConvergent())
17
Calling 'CallBase::isConvergent'
32
Returning from 'CallBase::isConvergent'
33
Assuming the condition is false
34
Taking false branch
1223 return false;
1224
1225 using namespace PatternMatch;
1226 if (match(CI, m_Intrinsic<Intrinsic::assume>()))
35
Calling 'match<llvm::CallInst, llvm::PatternMatch::IntrinsicID_match>'
42
Returning from 'match<llvm::CallInst, llvm::PatternMatch::IntrinsicID_match>'
43
Taking false branch
1227 // Assumes don't actually alias anything or throw
1228 return true;
1229
1230 if (match(CI, m_Intrinsic<Intrinsic::experimental_widenable_condition>()))
44
Calling 'match<llvm::CallInst, llvm::PatternMatch::IntrinsicID_match>'
51
Returning from 'match<llvm::CallInst, llvm::PatternMatch::IntrinsicID_match>'
52
Taking false branch
1231 // Widenable conditions don't actually alias anything or throw
1232 return true;
1233
1234 // Handle simple cases by querying alias analysis.
1235 FunctionModRefBehavior Behavior = AA->getModRefBehavior(CI);
1236 if (Behavior == FMRB_DoesNotAccessMemory)
53
Assuming 'Behavior' is not equal to FMRB_DoesNotAccessMemory
54
Taking false branch
1237 return true;
1238 if (AAResults::onlyReadsMemory(Behavior)) {
55
Calling 'AAResults::onlyReadsMemory'
58
Returning from 'AAResults::onlyReadsMemory'
59
Taking true branch
1239 // A readonly argmemonly function only reads from memory pointed to by
1240 // it's arguments with arbitrary offsets. If we can prove there are no
1241 // writes to this memory in the loop, we can hoist or sink.
1242 if (AAResults::onlyAccessesArgPointees(Behavior)) {
60
Calling 'AAResults::onlyAccessesArgPointees'
63
Returning from 'AAResults::onlyAccessesArgPointees'
64
Taking false branch
1243 // TODO: expand to writeable arguments
1244 for (Value *Op : CI->arg_operands())
1245 if (Op->getType()->isPointerTy()) {
1246 bool Invalidated;
1247 if (CurAST)
1248 Invalidated = pointerInvalidatedByLoop(
1249 MemoryLocation::getBeforeOrAfter(Op), CurAST, CurLoop, AA);
1250 else
1251 Invalidated = pointerInvalidatedByLoopWithMSSA(
1252 MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(CI)), CurLoop, I,
1253 *Flags);
1254 if (Invalidated)
1255 return false;
1256 }
1257 return true;
1258 }
1259
1260 // If this call only reads from memory and there are no writes to memory
1261 // in the loop, we can hoist or sink the call as appropriate.
1262 if (isReadOnly(CurAST, MSSAU, CurLoop))
65
Passing null pointer value via 2nd parameter 'MSSAU'
66
Calling 'isReadOnly'
1263 return true;
1264 }
1265
1266 // FIXME: This should use mod/ref information to see if we can hoist or
1267 // sink the call.
1268
1269 return false;
1270 } else if (auto *FI = dyn_cast<FenceInst>(&I)) {
1271 // Fences alias (most) everything to provide ordering. For the moment,
1272 // just give up if there are any other memory operations in the loop.
1273 if (CurAST) {
1274 auto Begin = CurAST->begin();
1275 assert(Begin != CurAST->end() && "must contain FI")(static_cast<void> (0));
1276 if (std::next(Begin) != CurAST->end())
1277 // constant memory for instance, TODO: handle better
1278 return false;
1279 auto *UniqueI = Begin->getUniqueInstruction();
1280 if (!UniqueI)
1281 // other memory op, give up
1282 return false;
1283 (void)FI; // suppress unused variable warning
1284 assert(UniqueI == FI && "AS must contain FI")(static_cast<void> (0));
1285 return true;
1286 } else // MSSAU
1287 return isOnlyMemoryAccess(FI, CurLoop, MSSAU);
1288 } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
1289 if (!SI->isUnordered())
1290 return false; // Don't sink/hoist volatile or ordered atomic store!
1291
1292 // We can only hoist a store that we can prove writes a value which is not
1293 // read or overwritten within the loop. For those cases, we fallback to
1294 // load store promotion instead. TODO: We can extend this to cases where
1295 // there is exactly one write to the location and that write dominates an
1296 // arbitrary number of reads in the loop.
1297 if (CurAST) {
1298 auto &AS = CurAST->getAliasSetFor(MemoryLocation::get(SI));
1299
1300 if (AS.isRef() || !AS.isMustAlias())
1301 // Quick exit test, handled by the full path below as well.
1302 return false;
1303 auto *UniqueI = AS.getUniqueInstruction();
1304 if (!UniqueI)
1305 // other memory op, give up
1306 return false;
1307 assert(UniqueI == SI && "AS must contain SI")(static_cast<void> (0));
1308 return true;
1309 } else { // MSSAU
1310 if (isOnlyMemoryAccess(SI, CurLoop, MSSAU))
1311 return true;
1312 // If there are more accesses than the Promotion cap or no "quota" to
1313 // check clobber, then give up as we're not walking a list that long.
1314 if (Flags->tooManyMemoryAccesses() || Flags->tooManyClobberingCalls())
1315 return false;
1316 // If there are interfering Uses (i.e. their defining access is in the
1317 // loop), or ordered loads (stored as Defs!), don't move this store.
1318 // Could do better here, but this is conservatively correct.
1319 // TODO: Cache set of Uses on the first walk in runOnLoop, update when
1320 // moving accesses. Can also extend to dominating uses.
1321 auto *SIMD = MSSA->getMemoryAccess(SI);
1322 for (auto *BB : CurLoop->getBlocks())
1323 if (auto *Accesses = MSSA->getBlockAccesses(BB)) {
1324 for (const auto &MA : *Accesses)
1325 if (const auto *MU = dyn_cast<MemoryUse>(&MA)) {
1326 auto *MD = MU->getDefiningAccess();
1327 if (!MSSA->isLiveOnEntryDef(MD) &&
1328 CurLoop->contains(MD->getBlock()))
1329 return false;
1330 // Disable hoisting past potentially interfering loads. Optimized
1331 // Uses may point to an access outside the loop, as getClobbering
1332 // checks the previous iteration when walking the backedge.
1333 // FIXME: More precise: no Uses that alias SI.
1334 if (!Flags->getIsSink() && !MSSA->dominates(SIMD, MU))
1335 return false;
1336 } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) {
1337 if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) {
1338 (void)LI; // Silence warning.
1339 assert(!LI->isUnordered() && "Expected unordered load")(static_cast<void> (0));
1340 return false;
1341 }
1342 // Any call, while it may not be clobbering SI, it may be a use.
1343 if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) {
1344 // Check if the call may read from the memory location written
1345 // to by SI. Check CI's attributes and arguments; the number of
1346 // such checks performed is limited above by NoOfMemAccTooLarge.
1347 ModRefInfo MRI = AA->getModRefInfo(CI, MemoryLocation::get(SI));
1348 if (isModOrRefSet(MRI))
1349 return false;
1350 }
1351 }
1352 }
1353 auto *Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(SI);
1354 Flags->incrementClobberingCalls();
1355 // If there are no clobbering Defs in the loop, store is safe to hoist.
1356 return MSSA->isLiveOnEntryDef(Source) ||
1357 !CurLoop->contains(Source->getBlock());
1358 }
1359 }
1360
1361 assert(!I.mayReadOrWriteMemory() && "unhandled aliasing")(static_cast<void> (0));
1362
1363 // We've established mechanical ability and aliasing, it's up to the caller
1364 // to check fault safety
1365 return true;
1366}
1367
1368/// Returns true if a PHINode is a trivially replaceable with an
1369/// Instruction.
1370/// This is true when all incoming values are that instruction.
1371/// This pattern occurs most often with LCSSA PHI nodes.
1372///
1373static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) {
1374 for (const Value *IncValue : PN.incoming_values())
1375 if (IncValue != &I)
1376 return false;
1377
1378 return true;
1379}
1380
1381/// Return true if the instruction is free in the loop.
1382static bool isFreeInLoop(const Instruction &I, const Loop *CurLoop,
1383 const TargetTransformInfo *TTI) {
1384
1385 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1386 if (TTI->getUserCost(GEP, TargetTransformInfo::TCK_SizeAndLatency) !=
1387 TargetTransformInfo::TCC_Free)
1388 return false;
1389 // For a GEP, we cannot simply use getUserCost because currently it
1390 // optimistically assume that a GEP will fold into addressing mode
1391 // regardless of its users.
1392 const BasicBlock *BB = GEP->getParent();
1393 for (const User *U : GEP->users()) {
1394 const Instruction *UI = cast<Instruction>(U);
1395 if (CurLoop->contains(UI) &&
1396 (BB != UI->getParent() ||
1397 (!isa<StoreInst>(UI) && !isa<LoadInst>(UI))))
1398 return false;
1399 }
1400 return true;
1401 } else
1402 return TTI->getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency) ==
1403 TargetTransformInfo::TCC_Free;
1404}
1405
1406/// Return true if the only users of this instruction are outside of
1407/// the loop. If this is true, we can sink the instruction to the exit
1408/// blocks of the loop.
1409///
1410/// We also return true if the instruction could be folded away in lowering.
1411/// (e.g., a GEP can be folded into a load as an addressing mode in the loop).
1412static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop,
1413 const LoopSafetyInfo *SafetyInfo,
1414 TargetTransformInfo *TTI, bool &FreeInLoop,
1415 bool LoopNestMode) {
1416 const auto &BlockColors = SafetyInfo->getBlockColors();
1417 bool IsFree = isFreeInLoop(I, CurLoop, TTI);
1418 for (const User *U : I.users()) {
1419 const Instruction *UI = cast<Instruction>(U);
1420 if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
1421 const BasicBlock *BB = PN->getParent();
1422 // We cannot sink uses in catchswitches.
1423 if (isa<CatchSwitchInst>(BB->getTerminator()))
1424 return false;
1425
1426 // We need to sink a callsite to a unique funclet. Avoid sinking if the
1427 // phi use is too muddled.
1428 if (isa<CallInst>(I))
1429 if (!BlockColors.empty() &&
1430 BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1)
1431 return false;
1432
1433 if (LoopNestMode) {
1434 while (isa<PHINode>(UI) && UI->hasOneUser() &&
1435 UI->getNumOperands() == 1) {
1436 if (!CurLoop->contains(UI))
1437 break;
1438 UI = cast<Instruction>(UI->user_back());
1439 }
1440 }
1441 }
1442
1443 if (CurLoop->contains(UI)) {
1444 if (IsFree) {
1445 FreeInLoop = true;
1446 continue;
1447 }
1448 return false;
1449 }
1450 }
1451 return true;
1452}
1453
1454static Instruction *cloneInstructionInExitBlock(
1455 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
1456 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU) {
1457 Instruction *New;
1458 if (auto *CI = dyn_cast<CallInst>(&I)) {
1459 const auto &BlockColors = SafetyInfo->getBlockColors();
1460
1461 // Sinking call-sites need to be handled differently from other
1462 // instructions. The cloned call-site needs a funclet bundle operand
1463 // appropriate for its location in the CFG.
1464 SmallVector<OperandBundleDef, 1> OpBundles;
1465 for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles();
1466 BundleIdx != BundleEnd; ++BundleIdx) {
1467 OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx);
1468 if (Bundle.getTagID() == LLVMContext::OB_funclet)
1469 continue;
1470
1471 OpBundles.emplace_back(Bundle);
1472 }
1473
1474 if (!BlockColors.empty()) {
1475 const ColorVector &CV = BlockColors.find(&ExitBlock)->second;
1476 assert(CV.size() == 1 && "non-unique color for exit block!")(static_cast<void> (0));
1477 BasicBlock *BBColor = CV.front();
1478 Instruction *EHPad = BBColor->getFirstNonPHI();
1479 if (EHPad->isEHPad())
1480 OpBundles.emplace_back("funclet", EHPad);
1481 }
1482
1483 New = CallInst::Create(CI, OpBundles);
1484 } else {
1485 New = I.clone();
1486 }
1487
1488 ExitBlock.getInstList().insert(ExitBlock.getFirstInsertionPt(), New);
1489 if (!I.getName().empty())
1490 New->setName(I.getName() + ".le");
1491
1492 if (MSSAU && MSSAU->getMemorySSA()->getMemoryAccess(&I)) {
1493 // Create a new MemoryAccess and let MemorySSA set its defining access.
1494 MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB(
1495 New, nullptr, New->getParent(), MemorySSA::Beginning);
1496 if (NewMemAcc) {
1497 if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc))
1498 MSSAU->insertDef(MemDef, /*RenameUses=*/true);
1499 else {
1500 auto *MemUse = cast<MemoryUse>(NewMemAcc);
1501 MSSAU->insertUse(MemUse, /*RenameUses=*/true);
1502 }
1503 }
1504 }
1505
1506 // Build LCSSA PHI nodes for any in-loop operands (if legal). Note that
1507 // this is particularly cheap because we can rip off the PHI node that we're
1508 // replacing for the number and blocks of the predecessors.
1509 // OPT: If this shows up in a profile, we can instead finish sinking all
1510 // invariant instructions, and then walk their operands to re-establish
1511 // LCSSA. That will eliminate creating PHI nodes just to nuke them when
1512 // sinking bottom-up.
1513 for (Use &Op : New->operands())
1514 if (LI->wouldBeOutOfLoopUseRequiringLCSSA(Op.get(), PN.getParent())) {
1515 auto *OInst = cast<Instruction>(Op.get());
1516 PHINode *OpPN =
1517 PHINode::Create(OInst->getType(), PN.getNumIncomingValues(),
1518 OInst->getName() + ".lcssa", &ExitBlock.front());
1519 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
1520 OpPN->addIncoming(OInst, PN.getIncomingBlock(i));
1521 Op = OpPN;
1522 }
1523 return New;
1524}
1525
1526static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
1527 MemorySSAUpdater *MSSAU) {
1528 if (MSSAU)
1529 MSSAU->removeMemoryAccess(&I);
1530 SafetyInfo.removeInstruction(&I);
1531 I.eraseFromParent();
1532}
1533
1534static void moveInstructionBefore(Instruction &I, Instruction &Dest,
1535 ICFLoopSafetyInfo &SafetyInfo,
1536 MemorySSAUpdater *MSSAU,
1537 ScalarEvolution *SE) {
1538 SafetyInfo.removeInstruction(&I);
1539 SafetyInfo.insertInstructionTo(&I, Dest.getParent());
1540 I.moveBefore(&Dest);
1541 if (MSSAU)
1542 if (MemoryUseOrDef *OldMemAcc = cast_or_null<MemoryUseOrDef>(
1543 MSSAU->getMemorySSA()->getMemoryAccess(&I)))
1544 MSSAU->moveToPlace(OldMemAcc, Dest.getParent(),
1545 MemorySSA::BeforeTerminator);
1546 if (SE)
1547 SE->forgetValue(&I);
1548}
1549
1550static Instruction *sinkThroughTriviallyReplaceablePHI(
1551 PHINode *TPN, Instruction *I, LoopInfo *LI,
1552 SmallDenseMap<BasicBlock *, Instruction *, 32> &SunkCopies,
1553 const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop,
1554 MemorySSAUpdater *MSSAU) {
1555 assert(isTriviallyReplaceablePHI(*TPN, *I) &&(static_cast<void> (0))
1556 "Expect only trivially replaceable PHI")(static_cast<void> (0));
1557 BasicBlock *ExitBlock = TPN->getParent();
1558 Instruction *New;
1559 auto It = SunkCopies.find(ExitBlock);
1560 if (It != SunkCopies.end())
1561 New = It->second;
1562 else
1563 New = SunkCopies[ExitBlock] = cloneInstructionInExitBlock(
1564 *I, *ExitBlock, *TPN, LI, SafetyInfo, MSSAU);
1565 return New;
1566}
1567
1568static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) {
1569 BasicBlock *BB = PN->getParent();
1570 if (!BB->canSplitPredecessors())
1571 return false;
1572 // It's not impossible to split EHPad blocks, but if BlockColors already exist
1573 // it require updating BlockColors for all offspring blocks accordingly. By
1574 // skipping such corner case, we can make updating BlockColors after splitting
1575 // predecessor fairly simple.
1576 if (!SafetyInfo->getBlockColors().empty() && BB->getFirstNonPHI()->isEHPad())
1577 return false;
1578 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
1579 BasicBlock *BBPred = *PI;
1580 if (isa<IndirectBrInst>(BBPred->getTerminator()) ||
1581 isa<CallBrInst>(BBPred->getTerminator()))
1582 return false;
1583 }
1584 return true;
1585}
1586
1587static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT,
1588 LoopInfo *LI, const Loop *CurLoop,
1589 LoopSafetyInfo *SafetyInfo,
1590 MemorySSAUpdater *MSSAU) {
1591#ifndef NDEBUG1
1592 SmallVector<BasicBlock *, 32> ExitBlocks;
1593 CurLoop->getUniqueExitBlocks(ExitBlocks);
1594 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1595 ExitBlocks.end());
1596#endif
1597 BasicBlock *ExitBB = PN->getParent();
1598 assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block.")(static_cast<void> (0));
1599
1600 // Split predecessors of the loop exit to make instructions in the loop are
1601 // exposed to exit blocks through trivially replaceable PHIs while keeping the
1602 // loop in the canonical form where each predecessor of each exit block should
1603 // be contained within the loop. For example, this will convert the loop below
1604 // from
1605 //
1606 // LB1:
1607 // %v1 =
1608 // br %LE, %LB2
1609 // LB2:
1610 // %v2 =
1611 // br %LE, %LB1
1612 // LE:
1613 // %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable
1614 //
1615 // to
1616 //
1617 // LB1:
1618 // %v1 =
1619 // br %LE.split, %LB2
1620 // LB2:
1621 // %v2 =
1622 // br %LE.split2, %LB1
1623 // LE.split:
1624 // %p1 = phi [%v1, %LB1] <-- trivially replaceable
1625 // br %LE
1626 // LE.split2:
1627 // %p2 = phi [%v2, %LB2] <-- trivially replaceable
1628 // br %LE
1629 // LE:
1630 // %p = phi [%p1, %LE.split], [%p2, %LE.split2]
1631 //
1632 const auto &BlockColors = SafetyInfo->getBlockColors();
1633 SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB));
1634 while (!PredBBs.empty()) {
1635 BasicBlock *PredBB = *PredBBs.begin();
1636 assert(CurLoop->contains(PredBB) &&(static_cast<void> (0))
1637 "Expect all predecessors are in the loop")(static_cast<void> (0));
1638 if (PN->getBasicBlockIndex(PredBB) >= 0) {
1639 BasicBlock *NewPred = SplitBlockPredecessors(
1640 ExitBB, PredBB, ".split.loop.exit", DT, LI, MSSAU, true);
1641 // Since we do not allow splitting EH-block with BlockColors in
1642 // canSplitPredecessors(), we can simply assign predecessor's color to
1643 // the new block.
1644 if (!BlockColors.empty())
1645 // Grab a reference to the ColorVector to be inserted before getting the
1646 // reference to the vector we are copying because inserting the new
1647 // element in BlockColors might cause the map to be reallocated.
1648 SafetyInfo->copyColors(NewPred, PredBB);
1649 }
1650 PredBBs.remove(PredBB);
1651 }
1652}
1653
1654/// When an instruction is found to only be used outside of the loop, this
1655/// function moves it to the exit blocks and patches up SSA form as needed.
1656/// This method is guaranteed to remove the original instruction from its
1657/// position, and may either delete it or move it to outside of the loop.
1658///
1659static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
1660 BlockFrequencyInfo *BFI, const Loop *CurLoop,
1661 ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU,
1662 OptimizationRemarkEmitter *ORE) {
1663 bool Changed = false;
1664 LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n")do { } while (false);
1665
1666 // Iterate over users to be ready for actual sinking. Replace users via
1667 // unreachable blocks with undef and make all user PHIs trivially replaceable.
1668 SmallPtrSet<Instruction *, 8> VisitedUsers;
1669 for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) {
1670 auto *User = cast<Instruction>(*UI);
1671 Use &U = UI.getUse();
1672 ++UI;
1673
1674 if (VisitedUsers.count(User) || CurLoop->contains(User))
1675 continue;
1676
1677 if (!DT->isReachableFromEntry(User->getParent())) {
1678 U = UndefValue::get(I.getType());
1679 Changed = true;
1680 continue;
1681 }
1682
1683 // The user must be a PHI node.
1684 PHINode *PN = cast<PHINode>(User);
1685
1686 // Surprisingly, instructions can be used outside of loops without any
1687 // exits. This can only happen in PHI nodes if the incoming block is
1688 // unreachable.
1689 BasicBlock *BB = PN->getIncomingBlock(U);
1690 if (!DT->isReachableFromEntry(BB)) {
1691 U = UndefValue::get(I.getType());
1692 Changed = true;
1693 continue;
1694 }
1695
1696 VisitedUsers.insert(PN);
1697 if (isTriviallyReplaceablePHI(*PN, I))
1698 continue;
1699
1700 if (!canSplitPredecessors(PN, SafetyInfo))
1701 return Changed;
1702
1703 // Split predecessors of the PHI so that we can make users trivially
1704 // replaceable.
1705 splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, MSSAU);
1706
1707 // Should rebuild the iterators, as they may be invalidated by
1708 // splitPredecessorsOfLoopExit().
1709 UI = I.user_begin();
1710 UE = I.user_end();
1711 }
1712
1713 if (VisitedUsers.empty())
1714 return Changed;
1715
1716 ORE->emit([&]() {
1717 return OptimizationRemark(DEBUG_TYPE"licm", "InstSunk", &I)
1718 << "sinking " << ore::NV("Inst", &I);
1719 });
1720 if (isa<LoadInst>(I))
1721 ++NumMovedLoads;
1722 else if (isa<CallInst>(I))
1723 ++NumMovedCalls;
1724 ++NumSunk;
1725
1726#ifndef NDEBUG1
1727 SmallVector<BasicBlock *, 32> ExitBlocks;
1728 CurLoop->getUniqueExitBlocks(ExitBlocks);
1729 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1730 ExitBlocks.end());
1731#endif
1732
1733 // Clones of this instruction. Don't create more than one per exit block!
1734 SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies;
1735
1736 // If this instruction is only used outside of the loop, then all users are
1737 // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of
1738 // the instruction.
1739 // First check if I is worth sinking for all uses. Sink only when it is worth
1740 // across all uses.
1741 SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end());
1742 SmallVector<PHINode *, 8> ExitPNs;
1743 for (auto *UI : Users) {
1744 auto *User = cast<Instruction>(UI);
1745
1746 if (CurLoop->contains(User))
1747 continue;
1748
1749 PHINode *PN = cast<PHINode>(User);
1750 assert(ExitBlockSet.count(PN->getParent()) &&(static_cast<void> (0))
1751 "The LCSSA PHI is not in an exit block!")(static_cast<void> (0));
1752 if (!worthSinkOrHoistInst(I, PN->getParent(), ORE, BFI)) {
1753 return Changed;
1754 }
1755
1756 ExitPNs.push_back(PN);
1757 }
1758
1759 for (auto *PN : ExitPNs) {
1760
1761 // The PHI must be trivially replaceable.
1762 Instruction *New = sinkThroughTriviallyReplaceablePHI(
1763 PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU);
1764 PN->replaceAllUsesWith(New);
1765 eraseInstruction(*PN, *SafetyInfo, nullptr);
1766 Changed = true;
1767 }
1768 return Changed;
1769}
1770
1771/// When an instruction is found to only use loop invariant operands that
1772/// is safe to hoist, this instruction is called to do the dirty work.
1773///
1774static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
1775 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
1776 MemorySSAUpdater *MSSAU, ScalarEvolution *SE,
1777 OptimizationRemarkEmitter *ORE) {
1778 LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getNameOrAsOperand() << ": "do { } while (false)
1779 << I << "\n")do { } while (false);
1780 ORE->emit([&]() {
1781 return OptimizationRemark(DEBUG_TYPE"licm", "Hoisted", &I) << "hoisting "
1782 << ore::NV("Inst", &I);
1783 });
1784
1785 // Metadata can be dependent on conditions we are hoisting above.
1786 // Conservatively strip all metadata on the instruction unless we were
1787 // guaranteed to execute I if we entered the loop, in which case the metadata
1788 // is valid in the loop preheader.
1789 // Similarly, If I is a call and it is not guaranteed to execute in the loop,
1790 // then moving to the preheader means we should strip attributes on the call
1791 // that can cause UB since we may be hoisting above conditions that allowed
1792 // inferring those attributes. They may not be valid at the preheader.
1793 if ((I.hasMetadataOtherThanDebugLoc() || isa<CallInst>(I)) &&
1794 // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning
1795 // time in isGuaranteedToExecute if we don't actually have anything to
1796 // drop. It is a compile time optimization, not required for correctness.
1797 !SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop))
1798 I.dropUndefImplyingAttrsAndUnknownMetadata();
1799
1800 if (isa<PHINode>(I))
1801 // Move the new node to the end of the phi list in the destination block.
1802 moveInstructionBefore(I, *Dest->getFirstNonPHI(), *SafetyInfo, MSSAU, SE);
1803 else
1804 // Move the new node to the destination block, before its terminator.
1805 moveInstructionBefore(I, *Dest->getTerminator(), *SafetyInfo, MSSAU, SE);
1806
1807 I.updateLocationAfterHoist();
1808
1809 if (isa<LoadInst>(I))
1810 ++NumMovedLoads;
1811 else if (isa<CallInst>(I))
1812 ++NumMovedCalls;
1813 ++NumHoisted;
1814}
1815
1816/// Only sink or hoist an instruction if it is not a trapping instruction,
1817/// or if the instruction is known not to trap when moved to the preheader.
1818/// or if it is a trapping instruction and is guaranteed to execute.
1819static bool isSafeToExecuteUnconditionally(Instruction &Inst,
1820 const DominatorTree *DT,
1821 const TargetLibraryInfo *TLI,
1822 const Loop *CurLoop,
1823 const LoopSafetyInfo *SafetyInfo,
1824 OptimizationRemarkEmitter *ORE,
1825 const Instruction *CtxI) {
1826 if (isSafeToSpeculativelyExecute(&Inst, CtxI, DT, TLI))
1827 return true;
1828
1829 bool GuaranteedToExecute =
1830 SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop);
1831
1832 if (!GuaranteedToExecute) {
1833 auto *LI = dyn_cast<LoadInst>(&Inst);
1834 if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1835 ORE->emit([&]() {
1836 return OptimizationRemarkMissed(
1837 DEBUG_TYPE"licm", "LoadWithLoopInvariantAddressCondExecuted", LI)
1838 << "failed to hoist load with loop-invariant address "
1839 "because load is conditionally executed";
1840 });
1841 }
1842
1843 return GuaranteedToExecute;
1844}
1845
1846namespace {
1847class LoopPromoter : public LoadAndStorePromoter {
1848 Value *SomePtr; // Designated pointer to store to.
1849 const SmallSetVector<Value *, 8> &PointerMustAliases;
1850 SmallVectorImpl<BasicBlock *> &LoopExitBlocks;
1851 SmallVectorImpl<Instruction *> &LoopInsertPts;
1852 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts;
1853 PredIteratorCache &PredCache;
1854 MemorySSAUpdater *MSSAU;
1855 LoopInfo &LI;
1856 DebugLoc DL;
1857 int Alignment;
1858 bool UnorderedAtomic;
1859 AAMDNodes AATags;
1860 ICFLoopSafetyInfo &SafetyInfo;
1861
1862 // We're about to add a use of V in a loop exit block. Insert an LCSSA phi
1863 // (if legal) if doing so would add an out-of-loop use to an instruction
1864 // defined in-loop.
1865 Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const {
1866 if (!LI.wouldBeOutOfLoopUseRequiringLCSSA(V, BB))
1867 return V;
1868
1869 Instruction *I = cast<Instruction>(V);
1870 // We need to create an LCSSA PHI node for the incoming value and
1871 // store that.
1872 PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB),
1873 I->getName() + ".lcssa", &BB->front());
1874 for (BasicBlock *Pred : PredCache.get(BB))
1875 PN->addIncoming(I, Pred);
1876 return PN;
1877 }
1878
1879public:
1880 LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S,
1881 const SmallSetVector<Value *, 8> &PMA,
1882 SmallVectorImpl<BasicBlock *> &LEB,
1883 SmallVectorImpl<Instruction *> &LIP,
1884 SmallVectorImpl<MemoryAccess *> &MSSAIP, PredIteratorCache &PIC,
1885 MemorySSAUpdater *MSSAU, LoopInfo &li, DebugLoc dl,
1886 int alignment, bool UnorderedAtomic, const AAMDNodes &AATags,
1887 ICFLoopSafetyInfo &SafetyInfo)
1888 : LoadAndStorePromoter(Insts, S), SomePtr(SP), PointerMustAliases(PMA),
1889 LoopExitBlocks(LEB), LoopInsertPts(LIP), MSSAInsertPts(MSSAIP),
1890 PredCache(PIC), MSSAU(MSSAU), LI(li), DL(std::move(dl)),
1891 Alignment(alignment), UnorderedAtomic(UnorderedAtomic), AATags(AATags),
1892 SafetyInfo(SafetyInfo) {}
1893
1894 bool isInstInList(Instruction *I,
1895 const SmallVectorImpl<Instruction *> &) const override {
1896 Value *Ptr;
1897 if (LoadInst *LI = dyn_cast<LoadInst>(I))
1898 Ptr = LI->getOperand(0);
1899 else
1900 Ptr = cast<StoreInst>(I)->getPointerOperand();
1901 return PointerMustAliases.count(Ptr);
1902 }
1903
1904 void doExtraRewritesBeforeFinalDeletion() override {
1905 // Insert stores after in the loop exit blocks. Each exit block gets a
1906 // store of the live-out values that feed them. Since we've already told
1907 // the SSA updater about the defs in the loop and the preheader
1908 // definition, it is all set and we can start using it.
1909 for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) {
1910 BasicBlock *ExitBlock = LoopExitBlocks[i];
1911 Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
1912 LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock);
1913 Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock);
1914 Instruction *InsertPos = LoopInsertPts[i];
1915 StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos);
1916 if (UnorderedAtomic)
1917 NewSI->setOrdering(AtomicOrdering::Unordered);
1918 NewSI->setAlignment(Align(Alignment));
1919 NewSI->setDebugLoc(DL);
1920 if (AATags)
1921 NewSI->setAAMetadata(AATags);
1922
1923 MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i];
1924 MemoryAccess *NewMemAcc;
1925 if (!MSSAInsertPoint) {
1926 NewMemAcc = MSSAU->createMemoryAccessInBB(
1927 NewSI, nullptr, NewSI->getParent(), MemorySSA::Beginning);
1928 } else {
1929 NewMemAcc =
1930 MSSAU->createMemoryAccessAfter(NewSI, nullptr, MSSAInsertPoint);
1931 }
1932 MSSAInsertPts[i] = NewMemAcc;
1933 MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true);
1934 // FIXME: true for safety, false may still be correct.
1935 }
1936 }
1937
1938 void instructionDeleted(Instruction *I) const override {
1939 SafetyInfo.removeInstruction(I);
1940 MSSAU->removeMemoryAccess(I);
1941 }
1942};
1943
1944bool isNotCapturedBeforeOrInLoop(const Value *V, const Loop *L,
1945 DominatorTree *DT) {
1946 // We can perform the captured-before check against any instruction in the
1947 // loop header, as the loop header is reachable from any instruction inside
1948 // the loop.
1949 // TODO: ReturnCaptures=true shouldn't be necessary here.
1950 return !PointerMayBeCapturedBefore(V, /* ReturnCaptures */ true,
1951 /* StoreCaptures */ true,
1952 L->getHeader()->getTerminator(), DT);
1953}
1954
1955/// Return true iff we can prove that a caller of this function can not inspect
1956/// the contents of the provided object in a well defined program.
1957bool isKnownNonEscaping(Value *Object, const Loop *L,
1958 const TargetLibraryInfo *TLI, DominatorTree *DT) {
1959 if (isa<AllocaInst>(Object))
1960 // Since the alloca goes out of scope, we know the caller can't retain a
1961 // reference to it and be well defined. Thus, we don't need to check for
1962 // capture.
1963 return true;
1964
1965 // For all other objects we need to know that the caller can't possibly
1966 // have gotten a reference to the object. There are two components of
1967 // that:
1968 // 1) Object can't be escaped by this function. This is what
1969 // PointerMayBeCaptured checks.
1970 // 2) Object can't have been captured at definition site. For this, we
1971 // need to know the return value is noalias. At the moment, we use a
1972 // weaker condition and handle only AllocLikeFunctions (which are
1973 // known to be noalias). TODO
1974 return isAllocLikeFn(Object, TLI) &&
1975 isNotCapturedBeforeOrInLoop(Object, L, DT);
1976}
1977
1978} // namespace
1979
1980/// Try to promote memory values to scalars by sinking stores out of the
1981/// loop and moving loads to before the loop. We do this by looping over
1982/// the stores in the loop, looking for stores to Must pointers which are
1983/// loop invariant.
1984///
1985bool llvm::promoteLoopAccessesToScalars(
1986 const SmallSetVector<Value *, 8> &PointerMustAliases,
1987 SmallVectorImpl<BasicBlock *> &ExitBlocks,
1988 SmallVectorImpl<Instruction *> &InsertPts,
1989 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts, PredIteratorCache &PIC,
1990 LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI,
1991 Loop *CurLoop, MemorySSAUpdater *MSSAU, ICFLoopSafetyInfo *SafetyInfo,
1992 OptimizationRemarkEmitter *ORE) {
1993 // Verify inputs.
1994 assert(LI != nullptr && DT != nullptr && CurLoop != nullptr &&(static_cast<void> (0))
1995 SafetyInfo != nullptr &&(static_cast<void> (0))
1996 "Unexpected Input to promoteLoopAccessesToScalars")(static_cast<void> (0));
1997
1998 Value *SomePtr = *PointerMustAliases.begin();
1999 BasicBlock *Preheader = CurLoop->getLoopPreheader();
2000
2001 // It is not safe to promote a load/store from the loop if the load/store is
2002 // conditional. For example, turning:
2003 //
2004 // for () { if (c) *P += 1; }
2005 //
2006 // into:
2007 //
2008 // tmp = *P; for () { if (c) tmp +=1; } *P = tmp;
2009 //
2010 // is not safe, because *P may only be valid to access if 'c' is true.
2011 //
2012 // The safety property divides into two parts:
2013 // p1) The memory may not be dereferenceable on entry to the loop. In this
2014 // case, we can't insert the required load in the preheader.
2015 // p2) The memory model does not allow us to insert a store along any dynamic
2016 // path which did not originally have one.
2017 //
2018 // If at least one store is guaranteed to execute, both properties are
2019 // satisfied, and promotion is legal.
2020 //
2021 // This, however, is not a necessary condition. Even if no store/load is
2022 // guaranteed to execute, we can still establish these properties.
2023 // We can establish (p1) by proving that hoisting the load into the preheader
2024 // is safe (i.e. proving dereferenceability on all paths through the loop). We
2025 // can use any access within the alias set to prove dereferenceability,
2026 // since they're all must alias.
2027 //
2028 // There are two ways establish (p2):
2029 // a) Prove the location is thread-local. In this case the memory model
2030 // requirement does not apply, and stores are safe to insert.
2031 // b) Prove a store dominates every exit block. In this case, if an exit
2032 // blocks is reached, the original dynamic path would have taken us through
2033 // the store, so inserting a store into the exit block is safe. Note that this
2034 // is different from the store being guaranteed to execute. For instance,
2035 // if an exception is thrown on the first iteration of the loop, the original
2036 // store is never executed, but the exit blocks are not executed either.
2037
2038 bool DereferenceableInPH = false;
2039 bool SafeToInsertStore = false;
2040
2041 SmallVector<Instruction *, 64> LoopUses;
2042
2043 // We start with an alignment of one and try to find instructions that allow
2044 // us to prove better alignment.
2045 Align Alignment;
2046 // Keep track of which types of access we see
2047 bool SawUnorderedAtomic = false;
2048 bool SawNotAtomic = false;
2049 AAMDNodes AATags;
2050
2051 const DataLayout &MDL = Preheader->getModule()->getDataLayout();
2052
2053 bool IsKnownThreadLocalObject = false;
2054 if (SafetyInfo->anyBlockMayThrow()) {
2055 // If a loop can throw, we have to insert a store along each unwind edge.
2056 // That said, we can't actually make the unwind edge explicit. Therefore,
2057 // we have to prove that the store is dead along the unwind edge. We do
2058 // this by proving that the caller can't have a reference to the object
2059 // after return and thus can't possibly load from the object.
2060 Value *Object = getUnderlyingObject(SomePtr);
2061 if (!isKnownNonEscaping(Object, CurLoop, TLI, DT))
2062 return false;
2063 // Subtlety: Alloca's aren't visible to callers, but *are* potentially
2064 // visible to other threads if captured and used during their lifetimes.
2065 IsKnownThreadLocalObject = !isa<AllocaInst>(Object);
2066 }
2067
2068 // Check that all of the pointers in the alias set have the same type. We
2069 // cannot (yet) promote a memory location that is loaded and stored in
2070 // different sizes. While we are at it, collect alignment and AA info.
2071 for (Value *ASIV : PointerMustAliases) {
2072 // Check that all of the pointers in the alias set have the same type. We
2073 // cannot (yet) promote a memory location that is loaded and stored in
2074 // different sizes.
2075 if (SomePtr->getType() != ASIV->getType())
2076 return false;
2077
2078 for (User *U : ASIV->users()) {
2079 // Ignore instructions that are outside the loop.
2080 Instruction *UI = dyn_cast<Instruction>(U);
2081 if (!UI || !CurLoop->contains(UI))
2082 continue;
2083
2084 // If there is an non-load/store instruction in the loop, we can't promote
2085 // it.
2086 if (LoadInst *Load = dyn_cast<LoadInst>(UI)) {
2087 if (!Load->isUnordered())
2088 return false;
2089
2090 SawUnorderedAtomic |= Load->isAtomic();
2091 SawNotAtomic |= !Load->isAtomic();
2092
2093 Align InstAlignment = Load->getAlign();
2094
2095 // Note that proving a load safe to speculate requires proving
2096 // sufficient alignment at the target location. Proving it guaranteed
2097 // to execute does as well. Thus we can increase our guaranteed
2098 // alignment as well.
2099 if (!DereferenceableInPH || (InstAlignment > Alignment))
2100 if (isSafeToExecuteUnconditionally(*Load, DT, TLI, CurLoop,
2101 SafetyInfo, ORE,
2102 Preheader->getTerminator())) {
2103 DereferenceableInPH = true;
2104 Alignment = std::max(Alignment, InstAlignment);
2105 }
2106 } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) {
2107 // Stores *of* the pointer are not interesting, only stores *to* the
2108 // pointer.
2109 if (UI->getOperand(1) != ASIV)
2110 continue;
2111 if (!Store->isUnordered())
2112 return false;
2113
2114 SawUnorderedAtomic |= Store->isAtomic();
2115 SawNotAtomic |= !Store->isAtomic();
2116
2117 // If the store is guaranteed to execute, both properties are satisfied.
2118 // We may want to check if a store is guaranteed to execute even if we
2119 // already know that promotion is safe, since it may have higher
2120 // alignment than any other guaranteed stores, in which case we can
2121 // raise the alignment on the promoted store.
2122 Align InstAlignment = Store->getAlign();
2123
2124 if (!DereferenceableInPH || !SafeToInsertStore ||
2125 (InstAlignment > Alignment)) {
2126 if (SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop)) {
2127 DereferenceableInPH = true;
2128 SafeToInsertStore = true;
2129 Alignment = std::max(Alignment, InstAlignment);
2130 }
2131 }
2132
2133 // If a store dominates all exit blocks, it is safe to sink.
2134 // As explained above, if an exit block was executed, a dominating
2135 // store must have been executed at least once, so we are not
2136 // introducing stores on paths that did not have them.
2137 // Note that this only looks at explicit exit blocks. If we ever
2138 // start sinking stores into unwind edges (see above), this will break.
2139 if (!SafeToInsertStore)
2140 SafeToInsertStore = llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) {
2141 return DT->dominates(Store->getParent(), Exit);
2142 });
2143
2144 // If the store is not guaranteed to execute, we may still get
2145 // deref info through it.
2146 if (!DereferenceableInPH) {
2147 DereferenceableInPH = isDereferenceableAndAlignedPointer(
2148 Store->getPointerOperand(), Store->getValueOperand()->getType(),
2149 Store->getAlign(), MDL, Preheader->getTerminator(), DT, TLI);
2150 }
2151 } else
2152 return false; // Not a load or store.
2153
2154 // Merge the AA tags.
2155 if (LoopUses.empty()) {
2156 // On the first load/store, just take its AA tags.
2157 UI->getAAMetadata(AATags);
2158 } else if (AATags) {
2159 UI->getAAMetadata(AATags, /* Merge = */ true);
2160 }
2161
2162 LoopUses.push_back(UI);
2163 }
2164 }
2165
2166 // If we found both an unordered atomic instruction and a non-atomic memory
2167 // access, bail. We can't blindly promote non-atomic to atomic since we
2168 // might not be able to lower the result. We can't downgrade since that
2169 // would violate memory model. Also, align 0 is an error for atomics.
2170 if (SawUnorderedAtomic && SawNotAtomic)
2171 return false;
2172
2173 // If we're inserting an atomic load in the preheader, we must be able to
2174 // lower it. We're only guaranteed to be able to lower naturally aligned
2175 // atomics.
2176 auto *SomePtrElemType = SomePtr->getType()->getPointerElementType();
2177 if (SawUnorderedAtomic &&
2178 Alignment < MDL.getTypeStoreSize(SomePtrElemType))
2179 return false;
2180
2181 // If we couldn't prove we can hoist the load, bail.
2182 if (!DereferenceableInPH)
2183 return false;
2184
2185 // We know we can hoist the load, but don't have a guaranteed store.
2186 // Check whether the location is thread-local. If it is, then we can insert
2187 // stores along paths which originally didn't have them without violating the
2188 // memory model.
2189 if (!SafeToInsertStore) {
2190 if (IsKnownThreadLocalObject)
2191 SafeToInsertStore = true;
2192 else {
2193 Value *Object = getUnderlyingObject(SomePtr);
2194 SafeToInsertStore =
2195 (isAllocLikeFn(Object, TLI) || isa<AllocaInst>(Object)) &&
2196 isNotCapturedBeforeOrInLoop(Object, CurLoop, DT);
2197 }
2198 }
2199
2200 // If we've still failed to prove we can sink the store, give up.
2201 if (!SafeToInsertStore)
2202 return false;
2203
2204 // Otherwise, this is safe to promote, lets do it!
2205 LLVM_DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " << *SomePtrdo { } while (false)
2206 << '\n')do { } while (false);
2207 ORE->emit([&]() {
2208 return OptimizationRemark(DEBUG_TYPE"licm", "PromoteLoopAccessesToScalar",
2209 LoopUses[0])
2210 << "Moving accesses to memory location out of the loop";
2211 });
2212 ++NumPromoted;
2213
2214 // Look at all the loop uses, and try to merge their locations.
2215 std::vector<const DILocation *> LoopUsesLocs;
2216 for (auto U : LoopUses)
2217 LoopUsesLocs.push_back(U->getDebugLoc().get());
2218 auto DL = DebugLoc(DILocation::getMergedLocations(LoopUsesLocs));
2219
2220 // We use the SSAUpdater interface to insert phi nodes as required.
2221 SmallVector<PHINode *, 16> NewPHIs;
2222 SSAUpdater SSA(&NewPHIs);
2223 LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks,
2224 InsertPts, MSSAInsertPts, PIC, MSSAU, *LI, DL,
2225 Alignment.value(), SawUnorderedAtomic, AATags,
2226 *SafetyInfo);
2227
2228 // Set up the preheader to have a definition of the value. It is the live-out
2229 // value from the preheader that uses in the loop will use.
2230 LoadInst *PreheaderLoad = new LoadInst(
2231 SomePtr->getType()->getPointerElementType(), SomePtr,
2232 SomePtr->getName() + ".promoted", Preheader->getTerminator());
2233 if (SawUnorderedAtomic)
2234 PreheaderLoad->setOrdering(AtomicOrdering::Unordered);
2235 PreheaderLoad->setAlignment(Alignment);
2236 PreheaderLoad->setDebugLoc(DebugLoc());
2237 if (AATags)
2238 PreheaderLoad->setAAMetadata(AATags);
2239 SSA.AddAvailableValue(Preheader, PreheaderLoad);
2240
2241 MemoryAccess *PreheaderLoadMemoryAccess = MSSAU->createMemoryAccessInBB(
2242 PreheaderLoad, nullptr, PreheaderLoad->getParent(), MemorySSA::End);
2243 MemoryUse *NewMemUse = cast<MemoryUse>(PreheaderLoadMemoryAccess);
2244 MSSAU->insertUse(NewMemUse, /*RenameUses=*/true);
2245
2246 if (VerifyMemorySSA)
2247 MSSAU->getMemorySSA()->verifyMemorySSA();
2248 // Rewrite all the loads in the loop and remember all the definitions from
2249 // stores in the loop.
2250 Promoter.run(LoopUses);
2251
2252 if (VerifyMemorySSA)
2253 MSSAU->getMemorySSA()->verifyMemorySSA();
2254 // If the SSAUpdater didn't use the load in the preheader, just zap it now.
2255 if (PreheaderLoad->use_empty())
2256 eraseInstruction(*PreheaderLoad, *SafetyInfo, MSSAU);
2257
2258 return true;
2259}
2260
2261static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
2262 function_ref<void(Instruction *)> Fn) {
2263 for (const BasicBlock *BB : L->blocks())
2264 if (const auto *Accesses = MSSA->getBlockAccesses(BB))
2265 for (const auto &Access : *Accesses)
2266 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(&Access))
2267 Fn(MUD->getMemoryInst());
2268}
2269
2270static SmallVector<SmallSetVector<Value *, 8>, 0>
2271collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L) {
2272 AliasSetTracker AST(*AA);
2273
2274 auto IsPotentiallyPromotable = [L](const Instruction *I) {
2275 if (const auto *SI = dyn_cast<StoreInst>(I))
2276 return L->isLoopInvariant(SI->getPointerOperand());
2277 if (const auto *LI = dyn_cast<LoadInst>(I))
2278 return L->isLoopInvariant(LI->getPointerOperand());
2279 return false;
2280 };
2281
2282 // Populate AST with potentially promotable accesses and remove them from
2283 // MaybePromotable, so they will not be checked again on the next iteration.
2284 SmallPtrSet<Value *, 16> AttemptingPromotion;
2285 foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
2286 if (IsPotentiallyPromotable(I)) {
2287 AttemptingPromotion.insert(I);
2288 AST.add(I);
2289 }
2290 });
2291
2292 // We're only interested in must-alias sets that contain a mod.
2293 SmallVector<const AliasSet *, 8> Sets;
2294 for (AliasSet &AS : AST)
2295 if (!AS.isForwardingAliasSet() && AS.isMod() && AS.isMustAlias())
2296 Sets.push_back(&AS);
2297
2298 if (Sets.empty())
2299 return {}; // Nothing to promote...
2300
2301 // Discard any sets for which there is an aliasing non-promotable access.
2302 foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
2303 if (AttemptingPromotion.contains(I))
2304 return;
2305
2306 llvm::erase_if(Sets, [&](const AliasSet *AS) {
2307 return AS->aliasesUnknownInst(I, *AA);
2308 });
2309 });
2310
2311 SmallVector<SmallSetVector<Value *, 8>, 0> Result;
2312 for (const AliasSet *Set : Sets) {
2313 SmallSetVector<Value *, 8> PointerMustAliases;
2314 for (const auto &ASI : *Set)
2315 PointerMustAliases.insert(ASI.getValue());
2316 Result.push_back(std::move(PointerMustAliases));
2317 }
2318
2319 return Result;
2320}
2321
2322static bool pointerInvalidatedByLoop(MemoryLocation MemLoc,
2323 AliasSetTracker *CurAST, Loop *CurLoop,
2324 AAResults *AA) {
2325 return CurAST->getAliasSetFor(MemLoc).isMod();
2326}
2327
2328bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU,
2329 Loop *CurLoop, Instruction &I,
2330 SinkAndHoistLICMFlags &Flags) {
2331 // For hoisting, use the walker to determine safety
2332 if (!Flags.getIsSink()) {
2333 MemoryAccess *Source;
2334 // See declaration of SetLicmMssaOptCap for usage details.
2335 if (Flags.tooManyClobberingCalls())
2336 Source = MU->getDefiningAccess();
2337 else {
2338 Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(MU);
2339 Flags.incrementClobberingCalls();
2340 }
2341 return !MSSA->isLiveOnEntryDef(Source) &&
2342 CurLoop->contains(Source->getBlock());
2343 }
2344
2345 // For sinking, we'd need to check all Defs below this use. The getClobbering
2346 // call will look on the backedge of the loop, but will check aliasing with
2347 // the instructions on the previous iteration.
2348 // For example:
2349 // for (i ... )
2350 // load a[i] ( Use (LoE)
2351 // store a[i] ( 1 = Def (2), with 2 = Phi for the loop.
2352 // i++;
2353 // The load sees no clobbering inside the loop, as the backedge alias check
2354 // does phi translation, and will check aliasing against store a[i-1].
2355 // However sinking the load outside the loop, below the store is incorrect.
2356
2357 // For now, only sink if there are no Defs in the loop, and the existing ones
2358 // precede the use and are in the same block.
2359 // FIXME: Increase precision: Safe to sink if Use post dominates the Def;
2360 // needs PostDominatorTreeAnalysis.
2361 // FIXME: More precise: no Defs that alias this Use.
2362 if (Flags.tooManyMemoryAccesses())
2363 return true;
2364 for (auto *BB : CurLoop->getBlocks())
2365 if (pointerInvalidatedByBlockWithMSSA(*BB, *MSSA, *MU))
2366 return true;
2367 // When sinking, the source block may not be part of the loop so check it.
2368 if (!CurLoop->contains(&I))
2369 return pointerInvalidatedByBlockWithMSSA(*I.getParent(), *MSSA, *MU);
2370
2371 return false;
2372}
2373
2374bool pointerInvalidatedByBlockWithMSSA(BasicBlock &BB, MemorySSA &MSSA,
2375 MemoryUse &MU) {
2376 if (const auto *Accesses = MSSA.getBlockDefs(&BB))
2377 for (const auto &MA : *Accesses)
2378 if (const auto *MD = dyn_cast<MemoryDef>(&MA))
2379 if (MU.getBlock() != MD->getBlock() || !MSSA.locallyDominates(MD, &MU))
2380 return true;
2381 return false;
2382}
2383
2384/// Little predicate that returns true if the specified basic block is in
2385/// a subloop of the current one, not the current one itself.
2386///
2387static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) {
2388 assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop")(static_cast<void> (0));
2389 return LI->getLoopFor(BB) != CurLoop;
2390}

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include/llvm/IR/InstrTypes.h

1//===- llvm/InstrTypes.h - Important Instruction subclasses -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines various meta classes of instructions that exist in the VM
10// representation. Specific concrete subclasses of these may be found in the
11// i*.h files...
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRTYPES_H
16#define LLVM_IR_INSTRTYPES_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/Optional.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/StringMap.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator_range.h"
26#include "llvm/IR/Attributes.h"
27#include "llvm/IR/CallingConv.h"
28#include "llvm/IR/Constants.h"
29#include "llvm/IR/DerivedTypes.h"
30#include "llvm/IR/Function.h"
31#include "llvm/IR/Instruction.h"
32#include "llvm/IR/LLVMContext.h"
33#include "llvm/IR/OperandTraits.h"
34#include "llvm/IR/Type.h"
35#include "llvm/IR/User.h"
36#include "llvm/IR/Value.h"
37#include "llvm/Support/Casting.h"
38#include "llvm/Support/ErrorHandling.h"
39#include <algorithm>
40#include <cassert>
41#include <cstddef>
42#include <cstdint>
43#include <iterator>
44#include <string>
45#include <vector>
46
47namespace llvm {
48
49namespace Intrinsic {
50typedef unsigned ID;
51}
52
53//===----------------------------------------------------------------------===//
54// UnaryInstruction Class
55//===----------------------------------------------------------------------===//
56
57class UnaryInstruction : public Instruction {
58protected:
59 UnaryInstruction(Type *Ty, unsigned iType, Value *V,
60 Instruction *IB = nullptr)
61 : Instruction(Ty, iType, &Op<0>(), 1, IB) {
62 Op<0>() = V;
63 }
64 UnaryInstruction(Type *Ty, unsigned iType, Value *V, BasicBlock *IAE)
65 : Instruction(Ty, iType, &Op<0>(), 1, IAE) {
66 Op<0>() = V;
67 }
68
69public:
70 // allocate space for exactly one operand
71 void *operator new(size_t S) { return User::operator new(S, 1); }
72 void operator delete(void *Ptr) { User::operator delete(Ptr); }
73
74 /// Transparently provide more efficient getOperand methods.
75 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
76
77 // Methods for support type inquiry through isa, cast, and dyn_cast:
78 static bool classof(const Instruction *I) {
79 return I->isUnaryOp() ||
80 I->getOpcode() == Instruction::Alloca ||
81 I->getOpcode() == Instruction::Load ||
82 I->getOpcode() == Instruction::VAArg ||
83 I->getOpcode() == Instruction::ExtractValue ||
84 (I->getOpcode() >= CastOpsBegin && I->getOpcode() < CastOpsEnd);
85 }
86 static bool classof(const Value *V) {
87 return isa<Instruction>(V) && classof(cast<Instruction>(V));
88 }
89};
90
91template <>
92struct OperandTraits<UnaryInstruction> :
93 public FixedNumOperandTraits<UnaryInstruction, 1> {
94};
95
96DEFINE_TRANSPARENT_OPERAND_ACCESSORS(UnaryInstruction, Value)UnaryInstruction::op_iterator UnaryInstruction::op_begin() { return
OperandTraits<UnaryInstruction>::op_begin(this); } UnaryInstruction
::const_op_iterator UnaryInstruction::op_begin() const { return
OperandTraits<UnaryInstruction>::op_begin(const_cast<
UnaryInstruction*>(this)); } UnaryInstruction::op_iterator
UnaryInstruction::op_end() { return OperandTraits<UnaryInstruction
>::op_end(this); } UnaryInstruction::const_op_iterator UnaryInstruction
::op_end() const { return OperandTraits<UnaryInstruction>
::op_end(const_cast<UnaryInstruction*>(this)); } Value *
UnaryInstruction::getOperand(unsigned i_nocapture) const { (static_cast
<void> (0)); return cast_or_null<Value>( OperandTraits
<UnaryInstruction>::op_begin(const_cast<UnaryInstruction
*>(this))[i_nocapture].get()); } void UnaryInstruction::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { (static_cast<
void> (0)); OperandTraits<UnaryInstruction>::op_begin
(this)[i_nocapture] = Val_nocapture; } unsigned UnaryInstruction
::getNumOperands() const { return OperandTraits<UnaryInstruction
>::operands(this); } template <int Idx_nocapture> Use
&UnaryInstruction::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
UnaryInstruction::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
97
98//===----------------------------------------------------------------------===//
99// UnaryOperator Class
100//===----------------------------------------------------------------------===//
101
102class UnaryOperator : public UnaryInstruction {
103 void AssertOK();
104
105protected:
106 UnaryOperator(UnaryOps iType, Value *S, Type *Ty,
107 const Twine &Name, Instruction *InsertBefore);
108 UnaryOperator(UnaryOps iType, Value *S, Type *Ty,
109 const Twine &Name, BasicBlock *InsertAtEnd);
110
111 // Note: Instruction needs to be a friend here to call cloneImpl.
112 friend class Instruction;
113
114 UnaryOperator *cloneImpl() const;
115
116public:
117
118 /// Construct a unary instruction, given the opcode and an operand.
119 /// Optionally (if InstBefore is specified) insert the instruction
120 /// into a BasicBlock right before the specified instruction. The specified
121 /// Instruction is allowed to be a dereferenced end iterator.
122 ///
123 static UnaryOperator *Create(UnaryOps Op, Value *S,
124 const Twine &Name = Twine(),
125 Instruction *InsertBefore = nullptr);
126
127 /// Construct a unary instruction, given the opcode and an operand.
128 /// Also automatically insert this instruction to the end of the
129 /// BasicBlock specified.
130 ///
131 static UnaryOperator *Create(UnaryOps Op, Value *S,
132 const Twine &Name,
133 BasicBlock *InsertAtEnd);
134
135 /// These methods just forward to Create, and are useful when you
136 /// statically know what type of instruction you're going to create. These
137 /// helpers just save some typing.
138#define HANDLE_UNARY_INST(N, OPC, CLASS) \
139 static UnaryOperator *Create##OPC(Value *V, const Twine &Name = "") {\
140 return Create(Instruction::OPC, V, Name);\
141 }
142#include "llvm/IR/Instruction.def"
143#define HANDLE_UNARY_INST(N, OPC, CLASS) \
144 static UnaryOperator *Create##OPC(Value *V, const Twine &Name, \
145 BasicBlock *BB) {\
146 return Create(Instruction::OPC, V, Name, BB);\
147 }
148#include "llvm/IR/Instruction.def"
149#define HANDLE_UNARY_INST(N, OPC, CLASS) \
150 static UnaryOperator *Create##OPC(Value *V, const Twine &Name, \
151 Instruction *I) {\
152 return Create(Instruction::OPC, V, Name, I);\
153 }
154#include "llvm/IR/Instruction.def"
155
156 static UnaryOperator *
157 CreateWithCopiedFlags(UnaryOps Opc, Value *V, Instruction *CopyO,
158 const Twine &Name = "",
159 Instruction *InsertBefore = nullptr) {
160 UnaryOperator *UO = Create(Opc, V, Name, InsertBefore);
161 UO->copyIRFlags(CopyO);
162 return UO;
163 }
164
165 static UnaryOperator *CreateFNegFMF(Value *Op, Instruction *FMFSource,
166 const Twine &Name = "",
167 Instruction *InsertBefore = nullptr) {
168 return CreateWithCopiedFlags(Instruction::FNeg, Op, FMFSource, Name,
169 InsertBefore);
170 }
171
172 UnaryOps getOpcode() const {
173 return static_cast<UnaryOps>(Instruction::getOpcode());
174 }
175
176 // Methods for support type inquiry through isa, cast, and dyn_cast:
177 static bool classof(const Instruction *I) {
178 return I->isUnaryOp();
179 }
180 static bool classof(const Value *V) {
181 return isa<Instruction>(V) && classof(cast<Instruction>(V));
182 }
183};
184
185//===----------------------------------------------------------------------===//
186// BinaryOperator Class
187//===----------------------------------------------------------------------===//
188
189class BinaryOperator : public Instruction {
190 void AssertOK();
191
192protected:
193 BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
194 const Twine &Name, Instruction *InsertBefore);
195 BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
196 const Twine &Name, BasicBlock *InsertAtEnd);
197
198 // Note: Instruction needs to be a friend here to call cloneImpl.
199 friend class Instruction;
200
201 BinaryOperator *cloneImpl() const;
202
203public:
204 // allocate space for exactly two operands
205 void *operator new(size_t S) { return User::operator new(S, 2); }
206 void operator delete(void *Ptr) { User::operator delete(Ptr); }
207
208 /// Transparently provide more efficient getOperand methods.
209 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
210
211 /// Construct a binary instruction, given the opcode and the two
212 /// operands. Optionally (if InstBefore is specified) insert the instruction
213 /// into a BasicBlock right before the specified instruction. The specified
214 /// Instruction is allowed to be a dereferenced end iterator.
215 ///
216 static BinaryOperator *Create(BinaryOps Op, Value *S1, Value *S2,
217 const Twine &Name = Twine(),
218 Instruction *InsertBefore = nullptr);
219
220 /// Construct a binary instruction, given the opcode and the two
221 /// operands. Also automatically insert this instruction to the end of the
222 /// BasicBlock specified.
223 ///
224 static BinaryOperator *Create(BinaryOps Op, Value *S1, Value *S2,
225 const Twine &Name, BasicBlock *InsertAtEnd);
226
227 /// These methods just forward to Create, and are useful when you
228 /// statically know what type of instruction you're going to create. These
229 /// helpers just save some typing.
230#define HANDLE_BINARY_INST(N, OPC, CLASS) \
231 static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
232 const Twine &Name = "") {\
233 return Create(Instruction::OPC, V1, V2, Name);\
234 }
235#include "llvm/IR/Instruction.def"
236#define HANDLE_BINARY_INST(N, OPC, CLASS) \
237 static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
238 const Twine &Name, BasicBlock *BB) {\
239 return Create(Instruction::OPC, V1, V2, Name, BB);\
240 }
241#include "llvm/IR/Instruction.def"
242#define HANDLE_BINARY_INST(N, OPC, CLASS) \
243 static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
244 const Twine &Name, Instruction *I) {\
245 return Create(Instruction::OPC, V1, V2, Name, I);\
246 }
247#include "llvm/IR/Instruction.def"
248
249 static BinaryOperator *
250 CreateWithCopiedFlags(BinaryOps Opc, Value *V1, Value *V2, Instruction *CopyO,
251 const Twine &Name = "",
252 Instruction *InsertBefore = nullptr) {
253 BinaryOperator *BO = Create(Opc, V1, V2, Name, InsertBefore);
254 BO->copyIRFlags(CopyO);
255 return BO;
256 }
257
258 static BinaryOperator *CreateFAddFMF(Value *V1, Value *V2,
259 Instruction *FMFSource,
260 const Twine &Name = "") {
261 return CreateWithCopiedFlags(Instruction::FAdd, V1, V2, FMFSource, Name);
262 }
263 static BinaryOperator *CreateFSubFMF(Value *V1, Value *V2,
264 Instruction *FMFSource,
265 const Twine &Name = "") {
266 return CreateWithCopiedFlags(Instruction::FSub, V1, V2, FMFSource, Name);
267 }
268 static BinaryOperator *CreateFMulFMF(Value *V1, Value *V2,
269 Instruction *FMFSource,
270 const Twine &Name = "") {
271 return CreateWithCopiedFlags(Instruction::FMul, V1, V2, FMFSource, Name);
272 }
273 static BinaryOperator *CreateFDivFMF(Value *V1, Value *V2,
274 Instruction *FMFSource,
275 const Twine &Name = "") {
276 return CreateWithCopiedFlags(Instruction::FDiv, V1, V2, FMFSource, Name);
277 }
278 static BinaryOperator *CreateFRemFMF(Value *V1, Value *V2,
279 Instruction *FMFSource,
280 const Twine &Name = "") {
281 return CreateWithCopiedFlags(Instruction::FRem, V1, V2, FMFSource, Name);
282 }
283
284 static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
285 const Twine &Name = "") {
286 BinaryOperator *BO = Create(Opc, V1, V2, Name);
287 BO->setHasNoSignedWrap(true);
288 return BO;
289 }
290 static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
291 const Twine &Name, BasicBlock *BB) {
292 BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
293 BO->setHasNoSignedWrap(true);
294 return BO;
295 }
296 static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
297 const Twine &Name, Instruction *I) {
298 BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
299 BO->setHasNoSignedWrap(true);
300 return BO;
301 }
302
303 static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
304 const Twine &Name = "") {
305 BinaryOperator *BO = Create(Opc, V1, V2, Name);
306 BO->setHasNoUnsignedWrap(true);
307 return BO;
308 }
309 static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
310 const Twine &Name, BasicBlock *BB) {
311 BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
312 BO->setHasNoUnsignedWrap(true);
313 return BO;
314 }
315 static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
316 const Twine &Name, Instruction *I) {
317 BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
318 BO->setHasNoUnsignedWrap(true);
319 return BO;
320 }
321
322 static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
323 const Twine &Name = "") {
324 BinaryOperator *BO = Create(Opc, V1, V2, Name);
325 BO->setIsExact(true);
326 return BO;
327 }
328 static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
329 const Twine &Name, BasicBlock *BB) {
330 BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
331 BO->setIsExact(true);
332 return BO;
333 }
334 static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
335 const Twine &Name, Instruction *I) {
336 BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
337 BO->setIsExact(true);
338 return BO;
339 }
340
341#define DEFINE_HELPERS(OPC, NUWNSWEXACT) \
342 static BinaryOperator *Create##NUWNSWEXACT##OPC(Value *V1, Value *V2, \
343 const Twine &Name = "") { \
344 return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name); \
345 } \
346 static BinaryOperator *Create##NUWNSWEXACT##OPC( \
347 Value *V1, Value *V2, const Twine &Name, BasicBlock *BB) { \
348 return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name, BB); \
349 } \
350 static BinaryOperator *Create##NUWNSWEXACT##OPC( \
351 Value *V1, Value *V2, const Twine &Name, Instruction *I) { \
352 return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name, I); \
353 }
354
355 DEFINE_HELPERS(Add, NSW) // CreateNSWAdd
356 DEFINE_HELPERS(Add, NUW) // CreateNUWAdd
357 DEFINE_HELPERS(Sub, NSW) // CreateNSWSub
358 DEFINE_HELPERS(Sub, NUW) // CreateNUWSub
359 DEFINE_HELPERS(Mul, NSW) // CreateNSWMul
360 DEFINE_HELPERS(Mul, NUW) // CreateNUWMul
361 DEFINE_HELPERS(Shl, NSW) // CreateNSWShl
362 DEFINE_HELPERS(Shl, NUW) // CreateNUWShl
363
364 DEFINE_HELPERS(SDiv, Exact) // CreateExactSDiv
365 DEFINE_HELPERS(UDiv, Exact) // CreateExactUDiv
366 DEFINE_HELPERS(AShr, Exact) // CreateExactAShr
367 DEFINE_HELPERS(LShr, Exact) // CreateExactLShr
368
369#undef DEFINE_HELPERS
370
371 /// Helper functions to construct and inspect unary operations (NEG and NOT)
372 /// via binary operators SUB and XOR:
373 ///
374 /// Create the NEG and NOT instructions out of SUB and XOR instructions.
375 ///
376 static BinaryOperator *CreateNeg(Value *Op, const Twine &Name = "",
377 Instruction *InsertBefore = nullptr);
378 static BinaryOperator *CreateNeg(Value *Op, const Twine &Name,
379 BasicBlock *InsertAtEnd);
380 static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name = "",
381 Instruction *InsertBefore = nullptr);
382 static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name,
383 BasicBlock *InsertAtEnd);
384 static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name = "",
385 Instruction *InsertBefore = nullptr);
386 static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name,
387 BasicBlock *InsertAtEnd);
388 static BinaryOperator *CreateNot(Value *Op, const Twine &Name = "",
389 Instruction *InsertBefore = nullptr);
390 static BinaryOperator *CreateNot(Value *Op, const Twine &Name,
391 BasicBlock *InsertAtEnd);
392
393 BinaryOps getOpcode() const {
394 return static_cast<BinaryOps>(Instruction::getOpcode());
395 }
396
397 /// Exchange the two operands to this instruction.
398 /// This instruction is safe to use on any binary instruction and
399 /// does not modify the semantics of the instruction. If the instruction
400 /// cannot be reversed (ie, it's a Div), then return true.
401 ///
402 bool swapOperands();
403
404 // Methods for support type inquiry through isa, cast, and dyn_cast:
405 static bool classof(const Instruction *I) {
406 return I->isBinaryOp();
407 }
408 static bool classof(const Value *V) {
409 return isa<Instruction>(V) && classof(cast<Instruction>(V));
410 }
411};
412
413template <>
414struct OperandTraits<BinaryOperator> :
415 public FixedNumOperandTraits<BinaryOperator, 2> {
416};
417
418DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BinaryOperator, Value)BinaryOperator::op_iterator BinaryOperator::op_begin() { return
OperandTraits<BinaryOperator>::op_begin(this); } BinaryOperator
::const_op_iterator BinaryOperator::op_begin() const { return
OperandTraits<BinaryOperator>::op_begin(const_cast<
BinaryOperator*>(this)); } BinaryOperator::op_iterator BinaryOperator
::op_end() { return OperandTraits<BinaryOperator>::op_end
(this); } BinaryOperator::const_op_iterator BinaryOperator::op_end
() const { return OperandTraits<BinaryOperator>::op_end
(const_cast<BinaryOperator*>(this)); } Value *BinaryOperator
::getOperand(unsigned i_nocapture) const { (static_cast<void
> (0)); return cast_or_null<Value>( OperandTraits<
BinaryOperator>::op_begin(const_cast<BinaryOperator*>
(this))[i_nocapture].get()); } void BinaryOperator::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { (static_cast<
void> (0)); OperandTraits<BinaryOperator>::op_begin(
this)[i_nocapture] = Val_nocapture; } unsigned BinaryOperator
::getNumOperands() const { return OperandTraits<BinaryOperator
>::operands(this); } template <int Idx_nocapture> Use
&BinaryOperator::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
BinaryOperator::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
419
420//===----------------------------------------------------------------------===//
421// CastInst Class
422//===----------------------------------------------------------------------===//
423
424/// This is the base class for all instructions that perform data
425/// casts. It is simply provided so that instruction category testing
426/// can be performed with code like:
427///
428/// if (isa<CastInst>(Instr)) { ... }
429/// Base class of casting instructions.
430class CastInst : public UnaryInstruction {
431protected:
432 /// Constructor with insert-before-instruction semantics for subclasses
433 CastInst(Type *Ty, unsigned iType, Value *S,
434 const Twine &NameStr = "", Instruction *InsertBefore = nullptr)
435 : UnaryInstruction(Ty, iType, S, InsertBefore) {
436 setName(NameStr);
437 }
438 /// Constructor with insert-at-end-of-block semantics for subclasses
439 CastInst(Type *Ty, unsigned iType, Value *S,
440 const Twine &NameStr, BasicBlock *InsertAtEnd)
441 : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
442 setName(NameStr);
443 }
444
445public:
446 /// Provides a way to construct any of the CastInst subclasses using an
447 /// opcode instead of the subclass's constructor. The opcode must be in the
448 /// CastOps category (Instruction::isCast(opcode) returns true). This
449 /// constructor has insert-before-instruction semantics to automatically
450 /// insert the new CastInst before InsertBefore (if it is non-null).
451 /// Construct any of the CastInst subclasses
452 static CastInst *Create(
453 Instruction::CastOps, ///< The opcode of the cast instruction
454 Value *S, ///< The value to be casted (operand 0)
455 Type *Ty, ///< The type to which cast should be made
456 const Twine &Name = "", ///< Name for the instruction
457 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
458 );
459 /// Provides a way to construct any of the CastInst subclasses using an
460 /// opcode instead of the subclass's constructor. The opcode must be in the
461 /// CastOps category. This constructor has insert-at-end-of-block semantics
462 /// to automatically insert the new CastInst at the end of InsertAtEnd (if
463 /// its non-null).
464 /// Construct any of the CastInst subclasses
465 static CastInst *Create(
466 Instruction::CastOps, ///< The opcode for the cast instruction
467 Value *S, ///< The value to be casted (operand 0)
468 Type *Ty, ///< The type to which operand is casted
469 const Twine &Name, ///< The name for the instruction
470 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
471 );
472
473 /// Create a ZExt or BitCast cast instruction
474 static CastInst *CreateZExtOrBitCast(
475 Value *S, ///< The value to be casted (operand 0)
476 Type *Ty, ///< The type to which cast should be made
477 const Twine &Name = "", ///< Name for the instruction
478 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
479 );
480
481 /// Create a ZExt or BitCast cast instruction
482 static CastInst *CreateZExtOrBitCast(
483 Value *S, ///< The value to be casted (operand 0)
484 Type *Ty, ///< The type to which operand is casted
485 const Twine &Name, ///< The name for the instruction
486 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
487 );
488
489 /// Create a SExt or BitCast cast instruction
490 static CastInst *CreateSExtOrBitCast(
491 Value *S, ///< The value to be casted (operand 0)
492 Type *Ty, ///< The type to which cast should be made
493 const Twine &Name = "", ///< Name for the instruction
494 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
495 );
496
497 /// Create a SExt or BitCast cast instruction
498 static CastInst *CreateSExtOrBitCast(
499 Value *S, ///< The value to be casted (operand 0)
500 Type *Ty, ///< The type to which operand is casted
501 const Twine &Name, ///< The name for the instruction
502 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
503 );
504
505 /// Create a BitCast AddrSpaceCast, or a PtrToInt cast instruction.
506 static CastInst *CreatePointerCast(
507 Value *S, ///< The pointer value to be casted (operand 0)
508 Type *Ty, ///< The type to which operand is casted
509 const Twine &Name, ///< The name for the instruction
510 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
511 );
512
513 /// Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
514 static CastInst *CreatePointerCast(
515 Value *S, ///< The pointer value to be casted (operand 0)
516 Type *Ty, ///< The type to which cast should be made
517 const Twine &Name = "", ///< Name for the instruction
518 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
519 );
520
521 /// Create a BitCast or an AddrSpaceCast cast instruction.
522 static CastInst *CreatePointerBitCastOrAddrSpaceCast(
523 Value *S, ///< The pointer value to be casted (operand 0)
524 Type *Ty, ///< The type to which operand is casted
525 const Twine &Name, ///< The name for the instruction
526 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
527 );
528
529 /// Create a BitCast or an AddrSpaceCast cast instruction.
530 static CastInst *CreatePointerBitCastOrAddrSpaceCast(
531 Value *S, ///< The pointer value to be casted (operand 0)
532 Type *Ty, ///< The type to which cast should be made
533 const Twine &Name = "", ///< Name for the instruction
534 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
535 );
536
537 /// Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
538 ///
539 /// If the value is a pointer type and the destination an integer type,
540 /// creates a PtrToInt cast. If the value is an integer type and the
541 /// destination a pointer type, creates an IntToPtr cast. Otherwise, creates
542 /// a bitcast.
543 static CastInst *CreateBitOrPointerCast(
544 Value *S, ///< The pointer value to be casted (operand 0)
545 Type *Ty, ///< The type to which cast should be made
546 const Twine &Name = "", ///< Name for the instruction
547 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
548 );
549
550 /// Create a ZExt, BitCast, or Trunc for int -> int casts.
551 static CastInst *CreateIntegerCast(
552 Value *S, ///< The pointer value to be casted (operand 0)
553 Type *Ty, ///< The type to which cast should be made
554 bool isSigned, ///< Whether to regard S as signed or not
555 const Twine &Name = "", ///< Name for the instruction
556 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
557 );
558
559 /// Create a ZExt, BitCast, or Trunc for int -> int casts.
560 static CastInst *CreateIntegerCast(
561 Value *S, ///< The integer value to be casted (operand 0)
562 Type *Ty, ///< The integer type to which operand is casted
563 bool isSigned, ///< Whether to regard S as signed or not
564 const Twine &Name, ///< The name for the instruction
565 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
566 );
567
568 /// Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
569 static CastInst *CreateFPCast(
570 Value *S, ///< The floating point value to be casted
571 Type *Ty, ///< The floating point type to cast to
572 const Twine &Name = "", ///< Name for the instruction
573 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
574 );
575
576 /// Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
577 static CastInst *CreateFPCast(
578 Value *S, ///< The floating point value to be casted
579 Type *Ty, ///< The floating point type to cast to
580 const Twine &Name, ///< The name for the instruction
581 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
582 );
583
584 /// Create a Trunc or BitCast cast instruction
585 static CastInst *CreateTruncOrBitCast(
586 Value *S, ///< The value to be casted (operand 0)
587 Type *Ty, ///< The type to which cast should be made
588 const Twine &Name = "", ///< Name for the instruction
589 Instruction *InsertBefore = nullptr ///< Place to insert the instruction
590 );
591
592 /// Create a Trunc or BitCast cast instruction
593 static CastInst *CreateTruncOrBitCast(
594 Value *S, ///< The value to be casted (operand 0)
595 Type *Ty, ///< The type to which operand is casted
596 const Twine &Name, ///< The name for the instruction
597 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
598 );
599
600 /// Check whether a bitcast between these types is valid
601 static bool isBitCastable(
602 Type *SrcTy, ///< The Type from which the value should be cast.
603 Type *DestTy ///< The Type to which the value should be cast.
604 );
605
606 /// Check whether a bitcast, inttoptr, or ptrtoint cast between these
607 /// types is valid and a no-op.
608 ///
609 /// This ensures that any pointer<->integer cast has enough bits in the
610 /// integer and any other cast is a bitcast.
611 static bool isBitOrNoopPointerCastable(
612 Type *SrcTy, ///< The Type from which the value should be cast.
613 Type *DestTy, ///< The Type to which the value should be cast.
614 const DataLayout &DL);
615
616 /// Returns the opcode necessary to cast Val into Ty using usual casting
617 /// rules.
618 /// Infer the opcode for cast operand and type
619 static Instruction::CastOps getCastOpcode(
620 const Value *Val, ///< The value to cast
621 bool SrcIsSigned, ///< Whether to treat the source as signed
622 Type *Ty, ///< The Type to which the value should be casted
623 bool DstIsSigned ///< Whether to treate the dest. as signed
624 );
625
626 /// There are several places where we need to know if a cast instruction
627 /// only deals with integer source and destination types. To simplify that
628 /// logic, this method is provided.
629 /// @returns true iff the cast has only integral typed operand and dest type.
630 /// Determine if this is an integer-only cast.
631 bool isIntegerCast() const;
632
633 /// A lossless cast is one that does not alter the basic value. It implies
634 /// a no-op cast but is more stringent, preventing things like int->float,
635 /// long->double, or int->ptr.
636 /// @returns true iff the cast is lossless.
637 /// Determine if this is a lossless cast.
638 bool isLosslessCast() const;
639
640 /// A no-op cast is one that can be effected without changing any bits.
641 /// It implies that the source and destination types are the same size. The
642 /// DataLayout argument is to determine the pointer size when examining casts
643 /// involving Integer and Pointer types. They are no-op casts if the integer
644 /// is the same size as the pointer. However, pointer size varies with
645 /// platform. Note that a precondition of this method is that the cast is
646 /// legal - i.e. the instruction formed with these operands would verify.
647 static bool isNoopCast(
648 Instruction::CastOps Opcode, ///< Opcode of cast
649 Type *SrcTy, ///< SrcTy of cast
650 Type *DstTy, ///< DstTy of cast
651 const DataLayout &DL ///< DataLayout to get the Int Ptr type from.
652 );
653
654 /// Determine if this cast is a no-op cast.
655 ///
656 /// \param DL is the DataLayout to determine pointer size.
657 bool isNoopCast(const DataLayout &DL) const;
658
659 /// Determine how a pair of casts can be eliminated, if they can be at all.
660 /// This is a helper function for both CastInst and ConstantExpr.
661 /// @returns 0 if the CastInst pair can't be eliminated, otherwise
662 /// returns Instruction::CastOps value for a cast that can replace
663 /// the pair, casting SrcTy to DstTy.
664 /// Determine if a cast pair is eliminable
665 static unsigned isEliminableCastPair(
666 Instruction::CastOps firstOpcode, ///< Opcode of first cast
667 Instruction::CastOps secondOpcode, ///< Opcode of second cast
668 Type *SrcTy, ///< SrcTy of 1st cast
669 Type *MidTy, ///< DstTy of 1st cast & SrcTy of 2nd cast
670 Type *DstTy, ///< DstTy of 2nd cast
671 Type *SrcIntPtrTy, ///< Integer type corresponding to Ptr SrcTy, or null
672 Type *MidIntPtrTy, ///< Integer type corresponding to Ptr MidTy, or null
673 Type *DstIntPtrTy ///< Integer type corresponding to Ptr DstTy, or null
674 );
675
676 /// Return the opcode of this CastInst
677 Instruction::CastOps getOpcode() const {
678 return Instruction::CastOps(Instruction::getOpcode());
679 }
680
681 /// Return the source type, as a convenience
682 Type* getSrcTy() const { return getOperand(0)->getType(); }
683 /// Return the destination type, as a convenience
684 Type* getDestTy() const { return getType(); }
685
686 /// This method can be used to determine if a cast from SrcTy to DstTy using
687 /// Opcode op is valid or not.
688 /// @returns true iff the proposed cast is valid.
689 /// Determine if a cast is valid without creating one.
690 static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy);
691 static bool castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) {
692 return castIsValid(op, S->getType(), DstTy);
693 }
694
695 /// Methods for support type inquiry through isa, cast, and dyn_cast:
696 static bool classof(const Instruction *I) {
697 return I->isCast();
698 }
699 static bool classof(const Value *V) {
700 return isa<Instruction>(V) && classof(cast<Instruction>(V));
701 }
702};
703
704//===----------------------------------------------------------------------===//
705// CmpInst Class
706//===----------------------------------------------------------------------===//
707
708/// This class is the base class for the comparison instructions.
709/// Abstract base class of comparison instructions.
710class CmpInst : public Instruction {
711public:
712 /// This enumeration lists the possible predicates for CmpInst subclasses.
713 /// Values in the range 0-31 are reserved for FCmpInst, while values in the
714 /// range 32-64 are reserved for ICmpInst. This is necessary to ensure the
715 /// predicate values are not overlapping between the classes.
716 ///
717 /// Some passes (e.g. InstCombine) depend on the bit-wise characteristics of
718 /// FCMP_* values. Changing the bit patterns requires a potential change to
719 /// those passes.
720 enum Predicate : unsigned {
721 // Opcode U L G E Intuitive operation
722 FCMP_FALSE = 0, ///< 0 0 0 0 Always false (always folded)
723 FCMP_OEQ = 1, ///< 0 0 0 1 True if ordered and equal
724 FCMP_OGT = 2, ///< 0 0 1 0 True if ordered and greater than
725 FCMP_OGE = 3, ///< 0 0 1 1 True if ordered and greater than or equal
726 FCMP_OLT = 4, ///< 0 1 0 0 True if ordered and less than
727 FCMP_OLE = 5, ///< 0 1 0 1 True if ordered and less than or equal
728 FCMP_ONE = 6, ///< 0 1 1 0 True if ordered and operands are unequal
729 FCMP_ORD = 7, ///< 0 1 1 1 True if ordered (no nans)
730 FCMP_UNO = 8, ///< 1 0 0 0 True if unordered: isnan(X) | isnan(Y)
731 FCMP_UEQ = 9, ///< 1 0 0 1 True if unordered or equal
732 FCMP_UGT = 10, ///< 1 0 1 0 True if unordered or greater than
733 FCMP_UGE = 11, ///< 1 0 1 1 True if unordered, greater than, or equal
734 FCMP_ULT = 12, ///< 1 1 0 0 True if unordered or less than
735 FCMP_ULE = 13, ///< 1 1 0 1 True if unordered, less than, or equal
736 FCMP_UNE = 14, ///< 1 1 1 0 True if unordered or not equal
737 FCMP_TRUE = 15, ///< 1 1 1 1 Always true (always folded)
738 FIRST_FCMP_PREDICATE = FCMP_FALSE,
739 LAST_FCMP_PREDICATE = FCMP_TRUE,
740 BAD_FCMP_PREDICATE = FCMP_TRUE + 1,
741 ICMP_EQ = 32, ///< equal
742 ICMP_NE = 33, ///< not equal
743 ICMP_UGT = 34, ///< unsigned greater than
744 ICMP_UGE = 35, ///< unsigned greater or equal
745 ICMP_ULT = 36, ///< unsigned less than
746 ICMP_ULE = 37, ///< unsigned less or equal
747 ICMP_SGT = 38, ///< signed greater than
748 ICMP_SGE = 39, ///< signed greater or equal
749 ICMP_SLT = 40, ///< signed less than
750 ICMP_SLE = 41, ///< signed less or equal
751 FIRST_ICMP_PREDICATE = ICMP_EQ,
752 LAST_ICMP_PREDICATE = ICMP_SLE,
753 BAD_ICMP_PREDICATE = ICMP_SLE + 1
754 };
755 using PredicateField =
756 Bitfield::Element<Predicate, 0, 6, LAST_ICMP_PREDICATE>;
757
758protected:
759 CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred,
760 Value *LHS, Value *RHS, const Twine &Name = "",
761 Instruction *InsertBefore = nullptr,
762 Instruction *FlagsSource = nullptr);
763
764 CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred,
765 Value *LHS, Value *RHS, const Twine &Name,
766 BasicBlock *InsertAtEnd);
767
768public:
769 // allocate space for exactly two operands
770 void *operator new(size_t S) { return User::operator new(S, 2); }
771 void operator delete(void *Ptr) { User::operator delete(Ptr); }
772
773 /// Construct a compare instruction, given the opcode, the predicate and
774 /// the two operands. Optionally (if InstBefore is specified) insert the
775 /// instruction into a BasicBlock right before the specified instruction.
776 /// The specified Instruction is allowed to be a dereferenced end iterator.
777 /// Create a CmpInst
778 static CmpInst *Create(OtherOps Op,
779 Predicate predicate, Value *S1,
780 Value *S2, const Twine &Name = "",
781 Instruction *InsertBefore = nullptr);
782
783 /// Construct a compare instruction, given the opcode, the predicate and the
784 /// two operands. Also automatically insert this instruction to the end of
785 /// the BasicBlock specified.
786 /// Create a CmpInst
787 static CmpInst *Create(OtherOps Op, Predicate predicate, Value *S1,
788 Value *S2, const Twine &Name, BasicBlock *InsertAtEnd);
789
790 /// Get the opcode casted to the right type
791 OtherOps getOpcode() const {
792 return static_cast<OtherOps>(Instruction::getOpcode());
793 }
794
795 /// Return the predicate for this instruction.
796 Predicate getPredicate() const { return getSubclassData<PredicateField>(); }
797
798 /// Set the predicate for this instruction to the specified value.
799 void setPredicate(Predicate P) { setSubclassData<PredicateField>(P); }
800
801 static bool isFPPredicate(Predicate P) {
802 static_assert(FIRST_FCMP_PREDICATE == 0,
803 "FIRST_FCMP_PREDICATE is required to be 0");
804 return P <= LAST_FCMP_PREDICATE;
805 }
806
807 static bool isIntPredicate(Predicate P) {
808 return P >= FIRST_ICMP_PREDICATE && P <= LAST_ICMP_PREDICATE;
809 }
810
811 static StringRef getPredicateName(Predicate P);
812
813 bool isFPPredicate() const { return isFPPredicate(getPredicate()); }
814 bool isIntPredicate() const { return isIntPredicate(getPredicate()); }
815
816 /// For example, EQ -> NE, UGT -> ULE, SLT -> SGE,
817 /// OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
818 /// @returns the inverse predicate for the instruction's current predicate.
819 /// Return the inverse of the instruction's predicate.
820 Predicate getInversePredicate() const {
821 return getInversePredicate(getPredicate());
822 }
823
824 /// For example, EQ -> NE, UGT -> ULE, SLT -> SGE,
825 /// OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
826 /// @returns the inverse predicate for predicate provided in \p pred.
827 /// Return the inverse of a given predicate
828 static Predicate getInversePredicate(Predicate pred);
829
830 /// For example, EQ->EQ, SLE->SGE, ULT->UGT,
831 /// OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
832 /// @returns the predicate that would be the result of exchanging the two
833 /// operands of the CmpInst instruction without changing the result
834 /// produced.
835 /// Return the predicate as if the operands were swapped
836 Predicate getSwappedPredicate() const {
837 return getSwappedPredicate(getPredicate());
838 }
839
840 /// This is a static version that you can use without an instruction
841 /// available.
842 /// Return the predicate as if the operands were swapped.
843 static Predicate getSwappedPredicate(Predicate pred);
844
845 /// This is a static version that you can use without an instruction
846 /// available.
847 /// @returns true if the comparison predicate is strict, false otherwise.
848 static bool isStrictPredicate(Predicate predicate);
849
850 /// @returns true if the comparison predicate is strict, false otherwise.
851 /// Determine if this instruction is using an strict comparison predicate.
852 bool isStrictPredicate() const { return isStrictPredicate(getPredicate()); }
853
854 /// This is a static version that you can use without an instruction
855 /// available.
856 /// @returns true if the comparison predicate is non-strict, false otherwise.
857 static bool isNonStrictPredicate(Predicate predicate);
858
859 /// @returns true if the comparison predicate is non-strict, false otherwise.
860 /// Determine if this instruction is using an non-strict comparison predicate.
861 bool isNonStrictPredicate() const {
862 return isNonStrictPredicate(getPredicate());
863 }
864
865 /// For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
866 /// Returns the strict version of non-strict comparisons.
867 Predicate getStrictPredicate() const {
868 return getStrictPredicate(getPredicate());
869 }
870
871 /// This is a static version that you can use without an instruction
872 /// available.
873 /// @returns the strict version of comparison provided in \p pred.
874 /// If \p pred is not a strict comparison predicate, returns \p pred.
875 /// Returns the strict version of non-strict comparisons.
876 static Predicate getStrictPredicate(Predicate pred);
877
878 /// For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
879 /// Returns the non-strict version of strict comparisons.
880 Predicate getNonStrictPredicate() const {
881 return getNonStrictPredicate(getPredicate());
882 }
883
884 /// This is a static version that you can use without an instruction
885 /// available.
886 /// @returns the non-strict version of comparison provided in \p pred.
887 /// If \p pred is not a strict comparison predicate, returns \p pred.
888 /// Returns the non-strict version of strict comparisons.
889 static Predicate getNonStrictPredicate(Predicate pred);
890
891 /// This is a static version that you can use without an instruction
892 /// available.
893 /// Return the flipped strictness of predicate
894 static Predicate getFlippedStrictnessPredicate(Predicate pred);
895
896 /// For predicate of kind "is X or equal to 0" returns the predicate "is X".
897 /// For predicate of kind "is X" returns the predicate "is X or equal to 0".
898 /// does not support other kind of predicates.
899 /// @returns the predicate that does not contains is equal to zero if
900 /// it had and vice versa.
901 /// Return the flipped strictness of predicate
902 Predicate getFlippedStrictnessPredicate() const {
903 return getFlippedStrictnessPredicate(getPredicate());
904 }
905
906 /// Provide more efficient getOperand methods.
907 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
908
909 /// This is just a convenience that dispatches to the subclasses.
910 /// Swap the operands and adjust predicate accordingly to retain
911 /// the same comparison.
912 void swapOperands();
913
914 /// This is just a convenience that dispatches to the subclasses.
915 /// Determine if this CmpInst is commutative.
916 bool isCommutative() const;
917
918 /// Determine if this is an equals/not equals predicate.
919 /// This is a static version that you can use without an instruction
920 /// available.
921 static bool isEquality(Predicate pred);
922
923 /// Determine if this is an equals/not equals predicate.
924 bool isEquality() const { return isEquality(getPredicate()); }
925
926 /// Return true if the predicate is relational (not EQ or NE).
927 static bool isRelational(Predicate P) { return !isEquality(P); }
928
929 /// Return true if the predicate is relational (not EQ or NE).
930 bool isRelational() const { return !isEquality(); }
931
932 /// @returns true if the comparison is signed, false otherwise.
933 /// Determine if this instruction is using a signed comparison.
934 bool isSigned() const {
935 return isSigned(getPredicate());
936 }
937
938 /// @returns true if the comparison is unsigned, false otherwise.
939 /// Determine if this instruction is using an unsigned comparison.
940 bool isUnsigned() const {
941 return isUnsigned(getPredicate());
942 }
943
944 /// For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert
945 /// @returns the signed version of the unsigned predicate pred.
946 /// return the signed version of a predicate
947 static Predicate getSignedPredicate(Predicate pred);
948
949 /// For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert
950 /// @returns the signed version of the predicate for this instruction (which
951 /// has to be an unsigned predicate).
952 /// return the signed version of a predicate
953 Predicate getSignedPredicate() {
954 return getSignedPredicate(getPredicate());
955 }
956
957 /// For example, SLT->ULT, SLE->ULE, SGT->UGT, SGE->UGE, ULT->Failed assert
958 /// @returns the unsigned version of the signed predicate pred.
959 static Predicate getUnsignedPredicate(Predicate pred);
960
961 /// For example, SLT->ULT, SLE->ULE, SGT->UGT, SGE->UGE, ULT->Failed assert
962 /// @returns the unsigned version of the predicate for this instruction (which
963 /// has to be an signed predicate).
964 /// return the unsigned version of a predicate
965 Predicate getUnsignedPredicate() {
966 return getUnsignedPredicate(getPredicate());
967 }
968
969 /// For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->Failed assert
970 /// @returns the unsigned version of the signed predicate pred or
971 /// the signed version of the signed predicate pred.
972 static Predicate getFlippedSignednessPredicate(Predicate pred);
973
974 /// For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->Failed assert
975 /// @returns the unsigned version of the signed predicate pred or
976 /// the signed version of the signed predicate pred.
977 Predicate getFlippedSignednessPredicate() {
978 return getFlippedSignednessPredicate(getPredicate());
979 }
980
981 /// This is just a convenience.
982 /// Determine if this is true when both operands are the same.
983 bool isTrueWhenEqual() const {
984 return isTrueWhenEqual(getPredicate());
985 }
986
987 /// This is just a convenience.
988 /// Determine if this is false when both operands are the same.
989 bool isFalseWhenEqual() const {
990 return isFalseWhenEqual(getPredicate());
991 }
992
993 /// @returns true if the predicate is unsigned, false otherwise.
994 /// Determine if the predicate is an unsigned operation.
995 static bool isUnsigned(Predicate predicate);
996
997 /// @returns true if the predicate is signed, false otherwise.
998 /// Determine if the predicate is an signed operation.
999 static bool isSigned(Predicate predicate);
1000
1001 /// Determine if the predicate is an ordered operation.
1002 static bool isOrdered(Predicate predicate);
1003
1004 /// Determine if the predicate is an unordered operation.
1005 static bool isUnordered(Predicate predicate);
1006
1007 /// Determine if the predicate is true when comparing a value with itself.
1008 static bool isTrueWhenEqual(Predicate predicate);
1009
1010 /// Determine if the predicate is false when comparing a value with itself.
1011 static bool isFalseWhenEqual(Predicate predicate);
1012
1013 /// Determine if Pred1 implies Pred2 is true when two compares have matching
1014 /// operands.
1015 static bool isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2);
1016
1017 /// Determine if Pred1 implies Pred2 is false when two compares have matching
1018 /// operands.
1019 static bool isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2);
1020
1021 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1022 static bool classof(const Instruction *I) {
1023 return I->getOpcode() == Instruction::ICmp ||
1024 I->getOpcode() == Instruction::FCmp;
1025 }
1026 static bool classof(const Value *V) {
1027 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1028 }
1029
1030 /// Create a result type for fcmp/icmp
1031 static Type* makeCmpResultType(Type* opnd_type) {
1032 if (VectorType* vt = dyn_cast<VectorType>(opnd_type)) {
1033 return VectorType::get(Type::getInt1Ty(opnd_type->getContext()),
1034 vt->getElementCount());
1035 }
1036 return Type::getInt1Ty(opnd_type->getContext());
1037 }
1038
1039private:
1040 // Shadow Value::setValueSubclassData with a private forwarding method so that
1041 // subclasses cannot accidentally use it.
1042 void setValueSubclassData(unsigned short D) {
1043 Value::setValueSubclassData(D);
1044 }
1045};
1046
1047// FIXME: these are redundant if CmpInst < BinaryOperator
1048template <>
1049struct OperandTraits<CmpInst> : public FixedNumOperandTraits<CmpInst, 2> {
1050};
1051
1052DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CmpInst, Value)CmpInst::op_iterator CmpInst::op_begin() { return OperandTraits
<CmpInst>::op_begin(this); } CmpInst::const_op_iterator
CmpInst::op_begin() const { return OperandTraits<CmpInst>
::op_begin(const_cast<CmpInst*>(this)); } CmpInst::op_iterator
CmpInst::op_end() { return OperandTraits<CmpInst>::op_end
(this); } CmpInst::const_op_iterator CmpInst::op_end() const {
return OperandTraits<CmpInst>::op_end(const_cast<CmpInst
*>(this)); } Value *CmpInst::getOperand(unsigned i_nocapture
) const { (static_cast<void> (0)); return cast_or_null<
Value>( OperandTraits<CmpInst>::op_begin(const_cast<
CmpInst*>(this))[i_nocapture].get()); } void CmpInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { (static_cast<
void> (0)); OperandTraits<CmpInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned CmpInst::getNumOperands() const
{ return OperandTraits<CmpInst>::operands(this); } template
<int Idx_nocapture> Use &CmpInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &CmpInst::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
1053
1054/// A lightweight accessor for an operand bundle meant to be passed
1055/// around by value.
1056struct OperandBundleUse {
1057 ArrayRef<Use> Inputs;
1058
1059 OperandBundleUse() = default;
1060 explicit OperandBundleUse(StringMapEntry<uint32_t> *Tag, ArrayRef<Use> Inputs)
1061 : Inputs(Inputs), Tag(Tag) {}
1062
1063 /// Return true if the operand at index \p Idx in this operand bundle
1064 /// has the attribute A.
1065 bool operandHasAttr(unsigned Idx, Attribute::AttrKind A) const {
1066 if (isDeoptOperandBundle())
1067 if (A == Attribute::ReadOnly || A == Attribute::NoCapture)
1068 return Inputs[Idx]->getType()->isPointerTy();
1069
1070 // Conservative answer: no operands have any attributes.
1071 return false;
1072 }
1073
1074 /// Return the tag of this operand bundle as a string.
1075 StringRef getTagName() const {
1076 return Tag->getKey();
1077 }
1078
1079 /// Return the tag of this operand bundle as an integer.
1080 ///
1081 /// Operand bundle tags are interned by LLVMContextImpl::getOrInsertBundleTag,
1082 /// and this function returns the unique integer getOrInsertBundleTag
1083 /// associated the tag of this operand bundle to.
1084 uint32_t getTagID() const {
1085 return Tag->getValue();
1086 }
1087
1088 /// Return true if this is a "deopt" operand bundle.
1089 bool isDeoptOperandBundle() const {
1090 return getTagID() == LLVMContext::OB_deopt;
1091 }
1092
1093 /// Return true if this is a "funclet" operand bundle.
1094 bool isFuncletOperandBundle() const {
1095 return getTagID() == LLVMContext::OB_funclet;
1096 }
1097
1098 /// Return true if this is a "cfguardtarget" operand bundle.
1099 bool isCFGuardTargetOperandBundle() const {
1100 return getTagID() == LLVMContext::OB_cfguardtarget;
1101 }
1102
1103private:
1104 /// Pointer to an entry in LLVMContextImpl::getOrInsertBundleTag.
1105 StringMapEntry<uint32_t> *Tag;
1106};
1107
1108/// A container for an operand bundle being viewed as a set of values
1109/// rather than a set of uses.
1110///
1111/// Unlike OperandBundleUse, OperandBundleDefT owns the memory it carries, and
1112/// so it is possible to create and pass around "self-contained" instances of
1113/// OperandBundleDef and ConstOperandBundleDef.
1114template <typename InputTy> class OperandBundleDefT {
1115 std::string Tag;
1116 std::vector<InputTy> Inputs;
1117
1118public:
1119 explicit OperandBundleDefT(std::string Tag, std::vector<InputTy> Inputs)
1120 : Tag(std::move(Tag)), Inputs(std::move(Inputs)) {}
1121 explicit OperandBundleDefT(std::string Tag, ArrayRef<InputTy> Inputs)
1122 : Tag(std::move(Tag)), Inputs(Inputs) {}
1123
1124 explicit OperandBundleDefT(const OperandBundleUse &OBU) {
1125 Tag = std::string(OBU.getTagName());
1126 llvm::append_range(Inputs, OBU.Inputs);
1127 }
1128
1129 ArrayRef<InputTy> inputs() const { return Inputs; }
1130
1131 using input_iterator = typename std::vector<InputTy>::const_iterator;
1132
1133 size_t input_size() const { return Inputs.size(); }
1134 input_iterator input_begin() const { return Inputs.begin(); }
1135 input_iterator input_end() const { return Inputs.end(); }
1136
1137 StringRef getTag() const { return Tag; }
1138};
1139
1140using OperandBundleDef = OperandBundleDefT<Value *>;
1141using ConstOperandBundleDef = OperandBundleDefT<const Value *>;
1142
1143//===----------------------------------------------------------------------===//
1144// CallBase Class
1145//===----------------------------------------------------------------------===//
1146
1147/// Base class for all callable instructions (InvokeInst and CallInst)
1148/// Holds everything related to calling a function.
1149///
1150/// All call-like instructions are required to use a common operand layout:
1151/// - Zero or more arguments to the call,
1152/// - Zero or more operand bundles with zero or more operand inputs each
1153/// bundle,
1154/// - Zero or more subclass controlled operands
1155/// - The called function.
1156///
1157/// This allows this base class to easily access the called function and the
1158/// start of the arguments without knowing how many other operands a particular
1159/// subclass requires. Note that accessing the end of the argument list isn't
1160/// as cheap as most other operations on the base class.
1161class CallBase : public Instruction {
1162protected:
1163 // The first two bits are reserved by CallInst for fast retrieval,
1164 using CallInstReservedField = Bitfield::Element<unsigned, 0, 2>;
1165 using CallingConvField =
1166 Bitfield::Element<CallingConv::ID, CallInstReservedField::NextBit, 10,
1167 CallingConv::MaxID>;
1168 static_assert(
1169 Bitfield::areContiguous<CallInstReservedField, CallingConvField>(),
1170 "Bitfields must be contiguous");
1171
1172 /// The last operand is the called operand.
1173 static constexpr int CalledOperandOpEndIdx = -1;
1174
1175 AttributeList Attrs; ///< parameter attributes for callable
1176 FunctionType *FTy;
1177
1178 template <class... ArgsTy>
1179 CallBase(AttributeList const &A, FunctionType *FT, ArgsTy &&... Args)
1180 : Instruction(std::forward<ArgsTy>(Args)...), Attrs(A), FTy(FT) {}
1181
1182 using Instruction::Instruction;
1183
1184 bool hasDescriptor() const { return Value::HasDescriptor; }
1185
1186 unsigned getNumSubclassExtraOperands() const {
1187 switch (getOpcode()) {
1188 case Instruction::Call:
1189 return 0;
1190 case Instruction::Invoke:
1191 return 2;
1192 case Instruction::CallBr:
1193 return getNumSubclassExtraOperandsDynamic();
1194 }
1195 llvm_unreachable("Invalid opcode!")__builtin_unreachable();
1196 }
1197
1198 /// Get the number of extra operands for instructions that don't have a fixed
1199 /// number of extra operands.
1200 unsigned getNumSubclassExtraOperandsDynamic() const;
1201
1202public:
1203 using Instruction::getContext;
1204
1205 /// Create a clone of \p CB with a different set of operand bundles and
1206 /// insert it before \p InsertPt.
1207 ///
1208 /// The returned call instruction is identical \p CB in every way except that
1209 /// the operand bundles for the new instruction are set to the operand bundles
1210 /// in \p Bundles.
1211 static CallBase *Create(CallBase *CB, ArrayRef<OperandBundleDef> Bundles,
1212 Instruction *InsertPt = nullptr);
1213
1214 /// Create a clone of \p CB with the operand bundle with the tag matching
1215 /// \p Bundle's tag replaced with Bundle, and insert it before \p InsertPt.
1216 ///
1217 /// The returned call instruction is identical \p CI in every way except that
1218 /// the specified operand bundle has been replaced.
1219 static CallBase *Create(CallBase *CB,
1220 OperandBundleDef Bundle,
1221 Instruction *InsertPt = nullptr);
1222
1223 /// Create a clone of \p CB with operand bundle \p OB added.
1224 static CallBase *addOperandBundle(CallBase *CB, uint32_t ID,
1225 OperandBundleDef OB,
1226 Instruction *InsertPt = nullptr);
1227
1228 /// Create a clone of \p CB with operand bundle \p ID removed.
1229 static CallBase *removeOperandBundle(CallBase *CB, uint32_t ID,
1230 Instruction *InsertPt = nullptr);
1231
1232 static bool classof(const Instruction *I) {
1233 return I->getOpcode() == Instruction::Call ||
1234 I->getOpcode() == Instruction::Invoke ||
1235 I->getOpcode() == Instruction::CallBr;
1236 }
1237 static bool classof(const Value *V) {
1238 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1239 }
1240
1241 FunctionType *getFunctionType() const { return FTy; }
1242
1243 void mutateFunctionType(FunctionType *FTy) {
1244 Value::mutateType(FTy->getReturnType());
1245 this->FTy = FTy;
1246 }
1247
1248 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1249
1250 /// data_operands_begin/data_operands_end - Return iterators iterating over
1251 /// the call / invoke argument list and bundle operands. For invokes, this is
1252 /// the set of instruction operands except the invoke target and the two
1253 /// successor blocks; and for calls this is the set of instruction operands
1254 /// except the call target.
1255 User::op_iterator data_operands_begin() { return op_begin(); }
1256 User::const_op_iterator data_operands_begin() const {
1257 return const_cast<CallBase *>(this)->data_operands_begin();
1258 }
1259 User::op_iterator data_operands_end() {
1260 // Walk from the end of the operands over the called operand and any
1261 // subclass operands.
1262 return op_end() - getNumSubclassExtraOperands() - 1;
1263 }
1264 User::const_op_iterator data_operands_end() const {
1265 return const_cast<CallBase *>(this)->data_operands_end();
1266 }
1267 iterator_range<User::op_iterator> data_ops() {
1268 return make_range(data_operands_begin(), data_operands_end());
1269 }
1270 iterator_range<User::const_op_iterator> data_ops() const {
1271 return make_range(data_operands_begin(), data_operands_end());
1272 }
1273 bool data_operands_empty() const {
1274 return data_operands_end() == data_operands_begin();
1275 }
1276 unsigned data_operands_size() const {
1277 return std::distance(data_operands_begin(), data_operands_end());
1278 }
1279
1280 bool isDataOperand(const Use *U) const {
1281 assert(this == U->getUser() &&(static_cast<void> (0))
1282 "Only valid to query with a use of this instruction!")(static_cast<void> (0));
1283 return data_operands_begin() <= U && U < data_operands_end();
1284 }
1285 bool isDataOperand(Value::const_user_iterator UI) const {
1286 return isDataOperand(&UI.getUse());
1287 }
1288
1289 /// Given a value use iterator, return the data operand corresponding to it.
1290 /// Iterator must actually correspond to a data operand.
1291 unsigned getDataOperandNo(Value::const_user_iterator UI) const {
1292 return getDataOperandNo(&UI.getUse());
1293 }
1294
1295 /// Given a use for a data operand, get the data operand number that
1296 /// corresponds to it.
1297 unsigned getDataOperandNo(const Use *U) const {
1298 assert(isDataOperand(U) && "Data operand # out of range!")(static_cast<void> (0));
1299 return U - data_operands_begin();
1300 }
1301
1302 /// Return the iterator pointing to the beginning of the argument list.
1303 User::op_iterator arg_begin() { return op_begin(); }
1304 User::const_op_iterator arg_begin() const {
1305 return const_cast<CallBase *>(this)->arg_begin();
1306 }
1307
1308 /// Return the iterator pointing to the end of the argument list.
1309 User::op_iterator arg_end() {
1310 // From the end of the data operands, walk backwards past the bundle
1311 // operands.
1312 return data_operands_end() - getNumTotalBundleOperands();
1313 }
1314 User::const_op_iterator arg_end() const {
1315 return const_cast<CallBase *>(this)->arg_end();
1316 }
1317
1318 /// Iteration adapter for range-for loops.
1319 iterator_range<User::op_iterator> args() {
1320 return make_range(arg_begin(), arg_end());
1321 }
1322 iterator_range<User::const_op_iterator> args() const {
1323 return make_range(arg_begin(), arg_end());
1324 }
1325 bool arg_empty() const { return arg_end() == arg_begin(); }
1326 unsigned arg_size() const { return arg_end() - arg_begin(); }
1327
1328 // Legacy API names that duplicate the above and will be removed once users
1329 // are migrated.
1330 iterator_range<User::op_iterator> arg_operands() {
1331 return make_range(arg_begin(), arg_end());
1332 }
1333 iterator_range<User::const_op_iterator> arg_operands() const {
1334 return make_range(arg_begin(), arg_end());
1335 }
1336 unsigned getNumArgOperands() const { return arg_size(); }
1337
1338 Value *getArgOperand(unsigned i) const {
1339 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast<void> (0));
1340 return getOperand(i);
1341 }
1342
1343 void setArgOperand(unsigned i, Value *v) {
1344 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast<void> (0));
1345 setOperand(i, v);
1346 }
1347
1348 /// Wrappers for getting the \c Use of a call argument.
1349 const Use &getArgOperandUse(unsigned i) const {
1350 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast<void> (0));
1351 return User::getOperandUse(i);
1352 }
1353 Use &getArgOperandUse(unsigned i) {
1354 assert(i < getNumArgOperands() && "Out of bounds!")(static_cast<void> (0));
1355 return User::getOperandUse(i);
1356 }
1357
1358 bool isArgOperand(const Use *U) const {
1359 assert(this == U->getUser() &&(static_cast<void> (0))
1360 "Only valid to query with a use of this instruction!")(static_cast<void> (0));
1361 return arg_begin() <= U && U < arg_end();
1362 }
1363 bool isArgOperand(Value::const_user_iterator UI) const {
1364 return isArgOperand(&UI.getUse());
1365 }
1366
1367 /// Given a use for a arg operand, get the arg operand number that
1368 /// corresponds to it.
1369 unsigned getArgOperandNo(const Use *U) const {
1370 assert(isArgOperand(U) && "Arg operand # out of range!")(static_cast<void> (0));
1371 return U - arg_begin();
1372 }
1373
1374 /// Given a value use iterator, return the arg operand number corresponding to
1375 /// it. Iterator must actually correspond to a data operand.
1376 unsigned getArgOperandNo(Value::const_user_iterator UI) const {
1377 return getArgOperandNo(&UI.getUse());
1378 }
1379
1380 /// Returns true if this CallSite passes the given Value* as an argument to
1381 /// the called function.
1382 bool hasArgument(const Value *V) const {
1383 return llvm::is_contained(args(), V);
1384 }
1385
1386 Value *getCalledOperand() const { return Op<CalledOperandOpEndIdx>(); }
1387
1388 const Use &getCalledOperandUse() const { return Op<CalledOperandOpEndIdx>(); }
1389 Use &getCalledOperandUse() { return Op<CalledOperandOpEndIdx>(); }
1390
1391 /// Returns the function called, or null if this is an
1392 /// indirect function invocation.
1393 Function *getCalledFunction() const {
1394 return dyn_cast_or_null<Function>(getCalledOperand());
1395 }
1396
1397 /// Return true if the callsite is an indirect call.
1398 bool isIndirectCall() const;
1399
1400 /// Determine whether the passed iterator points to the callee operand's Use.
1401 bool isCallee(Value::const_user_iterator UI) const {
1402 return isCallee(&UI.getUse());
1403 }
1404
1405 /// Determine whether this Use is the callee operand's Use.
1406 bool isCallee(const Use *U) const { return &getCalledOperandUse() == U; }
1407
1408 /// Helper to get the caller (the parent function).
1409 Function *getCaller();
1410 const Function *getCaller() const {
1411 return const_cast<CallBase *>(this)->getCaller();
1412 }
1413
1414 /// Tests if this call site must be tail call optimized. Only a CallInst can
1415 /// be tail call optimized.
1416 bool isMustTailCall() const;
1417
1418 /// Tests if this call site is marked as a tail call.
1419 bool isTailCall() const;
1420
1421 /// Returns the intrinsic ID of the intrinsic called or
1422 /// Intrinsic::not_intrinsic if the called function is not an intrinsic, or if
1423 /// this is an indirect call.
1424 Intrinsic::ID getIntrinsicID() const;
1425
1426 void setCalledOperand(Value *V) { Op<CalledOperandOpEndIdx>() = V; }
1427
1428 /// Sets the function called, including updating the function type.
1429 void setCalledFunction(Function *Fn) {
1430 setCalledFunction(Fn->getFunctionType(), Fn);
1431 }
1432
1433 /// Sets the function called, including updating the function type.
1434 void setCalledFunction(FunctionCallee Fn) {
1435 setCalledFunction(Fn.getFunctionType(), Fn.getCallee());
1436 }
1437
1438 /// Sets the function called, including updating to the specified function
1439 /// type.
1440 void setCalledFunction(FunctionType *FTy, Value *Fn) {
1441 this->FTy = FTy;
1442 assert(cast<PointerType>(Fn->getType())->isOpaqueOrPointeeTypeMatches(FTy))(static_cast<void> (0));
1443 // This function doesn't mutate the return type, only the function
1444 // type. Seems broken, but I'm just gonna stick an assert in for now.
1445 assert(getType() == FTy->getReturnType())(static_cast<void> (0));
1446 setCalledOperand(Fn);
1447 }
1448
1449 CallingConv::ID getCallingConv() const {
1450 return getSubclassData<CallingConvField>();
1451 }
1452
1453 void setCallingConv(CallingConv::ID CC) {
1454 setSubclassData<CallingConvField>(CC);
1455 }
1456
1457 /// Check if this call is an inline asm statement.
1458 bool isInlineAsm() const { return isa<InlineAsm>(getCalledOperand()); }
1459
1460 /// \name Attribute API
1461 ///
1462 /// These methods access and modify attributes on this call (including
1463 /// looking through to the attributes on the called function when necessary).
1464 ///@{
1465
1466 /// Return the parameter attributes for this call.
1467 ///
1468 AttributeList getAttributes() const { return Attrs; }
1469
1470 /// Set the parameter attributes for this call.
1471 ///
1472 void setAttributes(AttributeList A) { Attrs = A; }
1473
1474 /// Determine whether this call has the given attribute. If it does not
1475 /// then determine if the called function has the attribute, but only if
1476 /// the attribute is allowed for the call.
1477 bool hasFnAttr(Attribute::AttrKind Kind) const {
1478 assert(Kind != Attribute::NoBuiltin &&(static_cast<void> (0))
1479 "Use CallBase::isNoBuiltin() to check for Attribute::NoBuiltin")(static_cast<void> (0));
1480 return hasFnAttrImpl(Kind);
19
Calling 'CallBase::hasFnAttrImpl'
28
Returning from 'CallBase::hasFnAttrImpl'
29
Returning value, which participates in a condition later
1481 }
1482
1483 /// Determine whether this call has the given attribute. If it does not
1484 /// then determine if the called function has the attribute, but only if
1485 /// the attribute is allowed for the call.
1486 bool hasFnAttr(StringRef Kind) const { return hasFnAttrImpl(Kind); }
1487
1488 // TODO: remove non-AtIndex versions of these methods.
1489 /// adds the attribute to the list of attributes.
1490 void addAttributeAtIndex(unsigned i, Attribute::AttrKind Kind) {
1491 Attrs = Attrs.addAttributeAtIndex(getContext(), i, Kind);
1492 }
1493
1494 /// adds the attribute to the list of attributes.
1495 void addAttributeAtIndex(unsigned i, Attribute Attr) {
1496 Attrs = Attrs.addAttributeAtIndex(getContext(), i, Attr);
1497 }
1498
1499 /// Adds the attribute to the function.
1500 void addFnAttr(Attribute::AttrKind Kind) {
1501 Attrs = Attrs.addFnAttribute(getContext(), Kind);
1502 }
1503
1504 /// Adds the attribute to the function.
1505 void addFnAttr(Attribute Attr) {
1506 Attrs = Attrs.addFnAttribute(getContext(), Attr);
1507 }
1508
1509 /// Adds the attribute to the return value.
1510 void addRetAttr(Attribute::AttrKind Kind) {
1511 Attrs = Attrs.addRetAttribute(getContext(), Kind);
1512 }
1513
1514 /// Adds the attribute to the return value.
1515 void addRetAttr(Attribute Attr) {
1516 Attrs = Attrs.addRetAttribute(getContext(), Attr);
1517 }
1518
1519 /// Adds the attribute to the indicated argument
1520 void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
1521 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast<void> (0));
1522 Attrs = Attrs.addParamAttribute(getContext(), ArgNo, Kind);
1523 }
1524
1525 /// Adds the attribute to the indicated argument
1526 void addParamAttr(unsigned ArgNo, Attribute Attr) {
1527 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast<void> (0));
1528 Attrs = Attrs.addParamAttribute(getContext(), ArgNo, Attr);
1529 }
1530
1531 /// removes the attribute from the list of attributes.
1532 void removeAttributeAtIndex(unsigned i, Attribute::AttrKind Kind) {
1533 Attrs = Attrs.removeAttributeAtIndex(getContext(), i, Kind);
1534 }
1535
1536 /// removes the attribute from the list of attributes.
1537 void removeAttributeAtIndex(unsigned i, StringRef Kind) {
1538 Attrs = Attrs.removeAttributeAtIndex(getContext(), i, Kind);
1539 }
1540
1541 /// Removes the attributes from the function
1542 void removeFnAttrs(const AttrBuilder &AttrsToRemove) {
1543 Attrs = Attrs.removeFnAttributes(getContext(), AttrsToRemove);
1544 }
1545
1546 /// Removes the attribute from the function
1547 void removeFnAttr(Attribute::AttrKind Kind) {
1548 Attrs = Attrs.removeFnAttribute(getContext(), Kind);
1549 }
1550
1551 /// Removes the attribute from the return value
1552 void removeRetAttr(Attribute::AttrKind Kind) {
1553 Attrs = Attrs.removeRetAttribute(getContext(), Kind);
1554 }
1555
1556 /// Removes the attributes from the return value
1557 void removeRetAttrs(const AttrBuilder &AttrsToRemove) {
1558 Attrs = Attrs.removeRetAttributes(getContext(), AttrsToRemove);
1559 }
1560
1561 /// Removes the attribute from the given argument
1562 void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
1563 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast<void> (0));
1564 Attrs = Attrs.removeParamAttribute(getContext(), ArgNo, Kind);
1565 }
1566
1567 /// Removes the attribute from the given argument
1568 void removeParamAttr(unsigned ArgNo, StringRef Kind) {
1569 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast<void> (0));
1570 Attrs = Attrs.removeParamAttribute(getContext(), ArgNo, Kind);
1571 }
1572
1573 /// Removes the attributes from the given argument
1574 void removeParamAttrs(unsigned ArgNo, const AttrBuilder &AttrsToRemove) {
1575 Attrs = Attrs.removeParamAttributes(getContext(), ArgNo, AttrsToRemove);
1576 }
1577
1578 /// adds the dereferenceable attribute to the list of attributes.
1579 void addDereferenceableParamAttr(unsigned i, uint64_t Bytes) {
1580 Attrs = Attrs.addDereferenceableParamAttr(getContext(), i, Bytes);
1581 }
1582
1583 /// adds the dereferenceable attribute to the list of attributes.
1584 void addDereferenceableRetAttr(uint64_t Bytes) {
1585 Attrs = Attrs.addDereferenceableRetAttr(getContext(), Bytes);
1586 }
1587
1588 /// Determine whether the return value has the given attribute.
1589 bool hasRetAttr(Attribute::AttrKind Kind) const {
1590 return hasRetAttrImpl(Kind);
1591 }
1592 /// Determine whether the return value has the given attribute.
1593 bool hasRetAttr(StringRef Kind) const { return hasRetAttrImpl(Kind); }
1594
1595 /// Determine whether the argument or parameter has the given attribute.
1596 bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const;
1597
1598 /// Get the attribute of a given kind at a position.
1599 Attribute getAttributeAtIndex(unsigned i, Attribute::AttrKind Kind) const {
1600 return getAttributes().getAttributeAtIndex(i, Kind);
1601 }
1602
1603 /// Get the attribute of a given kind at a position.
1604 Attribute getAttributeAtIndex(unsigned i, StringRef Kind) const {
1605 return getAttributes().getAttributeAtIndex(i, Kind);
1606 }
1607
1608 /// Get the attribute of a given kind for the function.
1609 Attribute getFnAttr(StringRef Kind) const {
1610 return getAttributes().getFnAttr(Kind);
1611 }
1612
1613 /// Get the attribute of a given kind for the function.
1614 Attribute getFnAttr(Attribute::AttrKind Kind) const {
1615 return getAttributes().getFnAttr(Kind);
1616 }
1617
1618 /// Get the attribute of a given kind from a given arg
1619 Attribute getParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
1620 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast<void> (0));
1621 return getAttributes().getParamAttr(ArgNo, Kind);
1622 }
1623
1624 /// Get the attribute of a given kind from a given arg
1625 Attribute getParamAttr(unsigned ArgNo, StringRef Kind) const {
1626 assert(ArgNo < getNumArgOperands() && "Out of bounds")(static_cast<void> (0));
1627 return getAttributes().getParamAttr(ArgNo, Kind);
1628 }
1629
1630 /// Return true if the data operand at index \p i has the attribute \p
1631 /// A.
1632 ///
1633 /// Data operands include call arguments and values used in operand bundles,
1634 /// but does not include the callee operand. This routine dispatches to the
1635 /// underlying AttributeList or the OperandBundleUser as appropriate.
1636 ///
1637 /// The index \p i is interpreted as
1638 ///
1639 /// \p i == Attribute::ReturnIndex -> the return value
1640 /// \p i in [1, arg_size + 1) -> argument number (\p i - 1)
1641 /// \p i in [arg_size + 1, data_operand_size + 1) -> bundle operand at index
1642 /// (\p i - 1) in the operand list.
1643 bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const {
1644 // Note that we have to add one because `i` isn't zero-indexed.
1645 assert(i < (getNumArgOperands() + getNumTotalBundleOperands() + 1) &&(static_cast<void> (0))
1646 "Data operand index out of bounds!")(static_cast<void> (0));
1647
1648 // The attribute A can either be directly specified, if the operand in
1649 // question is a call argument; or be indirectly implied by the kind of its
1650 // containing operand bundle, if the operand is a bundle operand.
1651
1652 if (i == AttributeList::ReturnIndex)
1653 return hasRetAttr(Kind);
1654
1655 // FIXME: Avoid these i - 1 calculations and update the API to use
1656 // zero-based indices.
1657 if (i < (getNumArgOperands() + 1))
1658 return paramHasAttr(i - 1, Kind);
1659
1660 assert(hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) &&(static_cast<void> (0))
1661 "Must be either a call argument or an operand bundle!")(static_cast<void> (0));
1662 return bundleOperandHasAttr(i - 1, Kind);
1663 }
1664
1665 /// Determine whether this data operand is not captured.
1666 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1667 // better indicate that this may return a conservative answer.
1668 bool doesNotCapture(unsigned OpNo) const {
1669 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::NoCapture);
1670 }
1671
1672 /// Determine whether this argument is passed by value.
1673 bool isByValArgument(unsigned ArgNo) const {
1674 return paramHasAttr(ArgNo, Attribute::ByVal);
1675 }
1676
1677 /// Determine whether this argument is passed in an alloca.
1678 bool isInAllocaArgument(unsigned ArgNo) const {
1679 return paramHasAttr(ArgNo, Attribute::InAlloca);
1680 }
1681
1682 /// Determine whether this argument is passed by value, in an alloca, or is
1683 /// preallocated.
1684 bool isPassPointeeByValueArgument(unsigned ArgNo) const {
1685 return paramHasAttr(ArgNo, Attribute::ByVal) ||
1686 paramHasAttr(ArgNo, Attribute::InAlloca) ||
1687 paramHasAttr(ArgNo, Attribute::Preallocated);
1688 }
1689
1690 /// Determine whether passing undef to this argument is undefined behavior.
1691 /// If passing undef to this argument is UB, passing poison is UB as well
1692 /// because poison is more undefined than undef.
1693 bool isPassingUndefUB(unsigned ArgNo) const {
1694 return paramHasAttr(ArgNo, Attribute::NoUndef) ||
1695 // dereferenceable implies noundef.
1696 paramHasAttr(ArgNo, Attribute::Dereferenceable) ||
1697 // dereferenceable implies noundef, and null is a well-defined value.
1698 paramHasAttr(ArgNo, Attribute::DereferenceableOrNull);
1699 }
1700
1701 /// Determine if there are is an inalloca argument. Only the last argument can
1702 /// have the inalloca attribute.
1703 bool hasInAllocaArgument() const {
1704 return !arg_empty() && paramHasAttr(arg_size() - 1, Attribute::InAlloca);
1705 }
1706
1707 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1708 // better indicate that this may return a conservative answer.
1709 bool doesNotAccessMemory(unsigned OpNo) const {
1710 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
1711 }
1712
1713 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1714 // better indicate that this may return a conservative answer.
1715 bool onlyReadsMemory(unsigned OpNo) const {
1716 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadOnly) ||
1717 dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
1718 }
1719
1720 // FIXME: Once this API is no longer duplicated in `CallSite`, rename this to
1721 // better indicate that this may return a conservative answer.
1722 bool doesNotReadMemory(unsigned OpNo) const {
1723 return dataOperandHasImpliedAttr(OpNo + 1, Attribute::WriteOnly) ||
1724 dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
1725 }
1726
1727 /// Extract the alignment of the return value.
1728 MaybeAlign getRetAlign() const { return Attrs.getRetAlignment(); }
1729
1730 /// Extract the alignment for a call or parameter (0=unknown).
1731 MaybeAlign getParamAlign(unsigned ArgNo) const {
1732 return Attrs.getParamAlignment(ArgNo);
1733 }
1734
1735 MaybeAlign getParamStackAlign(unsigned ArgNo) const {
1736 return Attrs.getParamStackAlignment(ArgNo);
1737 }
1738
1739 /// Extract the byval type for a call or parameter.
1740 Type *getParamByValType(unsigned ArgNo) const {
1741 if (auto *Ty = Attrs.getParamByValType(ArgNo))
1742 return Ty;
1743 if (const Function *F = getCalledFunction())
1744 return F->getAttributes().getParamByValType(ArgNo);
1745 return nullptr;
1746 }
1747
1748 /// Extract the preallocated type for a call or parameter.
1749 Type *getParamPreallocatedType(unsigned ArgNo) const {
1750 if (auto *Ty = Attrs.getParamPreallocatedType(ArgNo))
1751 return Ty;
1752 if (const Function *F = getCalledFunction())
1753 return F->getAttributes().getParamPreallocatedType(ArgNo);
1754 return nullptr;
1755 }
1756
1757 /// Extract the preallocated type for a call or parameter.
1758 Type *getParamInAllocaType(unsigned ArgNo) const {
1759 if (auto *Ty = Attrs.getParamInAllocaType(ArgNo))
1760 return Ty;
1761 if (const Function *F = getCalledFunction())
1762 return F->getAttributes().getParamInAllocaType(ArgNo);
1763 return nullptr;
1764 }
1765
1766 /// Extract the number of dereferenceable bytes for a call or
1767 /// parameter (0=unknown).
1768 uint64_t getRetDereferenceableBytes() const {
1769 return Attrs.getRetDereferenceableBytes();
1770 }
1771
1772 /// Extract the number of dereferenceable bytes for a call or
1773 /// parameter (0=unknown).
1774 uint64_t getParamDereferenceableBytes(unsigned i) const {
1775 return Attrs.getParamDereferenceableBytes(i);
1776 }
1777
1778 /// Extract the number of dereferenceable_or_null bytes for a call
1779 /// (0=unknown).
1780 uint64_t getRetDereferenceableOrNullBytes() const {
1781 return Attrs.getRetDereferenceableOrNullBytes();
1782 }
1783
1784 /// Extract the number of dereferenceable_or_null bytes for a
1785 /// parameter (0=unknown).
1786 uint64_t getParamDereferenceableOrNullBytes(unsigned i) const {
1787 return Attrs.getParamDereferenceableOrNullBytes(i);
1788 }
1789
1790 /// Return true if the return value is known to be not null.
1791 /// This may be because it has the nonnull attribute, or because at least
1792 /// one byte is dereferenceable and the pointer is in addrspace(0).
1793 bool isReturnNonNull() const;
1794
1795 /// Determine if the return value is marked with NoAlias attribute.
1796 bool returnDoesNotAlias() const {
1797 return Attrs.hasRetAttr(Attribute::NoAlias);
1798 }
1799
1800 /// If one of the arguments has the 'returned' attribute, returns its
1801 /// operand value. Otherwise, return nullptr.
1802 Value *getReturnedArgOperand() const;
1803
1804 /// Return true if the call should not be treated as a call to a
1805 /// builtin.
1806 bool isNoBuiltin() const {
1807 return hasFnAttrImpl(Attribute::NoBuiltin) &&
1808 !hasFnAttrImpl(Attribute::Builtin);
1809 }
1810
1811 /// Determine if the call requires strict floating point semantics.
1812 bool isStrictFP() const { return hasFnAttr(Attribute::StrictFP); }
1813
1814 /// Return true if the call should not be inlined.
1815 bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
1816 void setIsNoInline() { addFnAttr(Attribute::NoInline); }
1817 /// Determine if the call does not access memory.
1818 bool doesNotAccessMemory() const { return hasFnAttr(Attribute::ReadNone); }
1819 void setDoesNotAccessMemory() { addFnAttr(Attribute::ReadNone); }
1820
1821 /// Determine if the call does not access or only reads memory.
1822 bool onlyReadsMemory() const {
1823 return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
1824 }
1825
1826 void setOnlyReadsMemory() { addFnAttr(Attribute::ReadOnly); }
1827
1828 /// Determine if the call does not access or only writes memory.
1829 bool doesNotReadMemory() const {
1830 return doesNotAccessMemory() || hasFnAttr(Attribute::WriteOnly);
1831 }
1832 void setDoesNotReadMemory() { addFnAttr(Attribute::WriteOnly); }
1833
1834 /// Determine if the call can access memmory only using pointers based
1835 /// on its arguments.
1836 bool onlyAccessesArgMemory() const {
1837 return hasFnAttr(Attribute::ArgMemOnly);
1838 }
1839 void setOnlyAccessesArgMemory() { addFnAttr(Attribute::ArgMemOnly); }
1840
1841 /// Determine if the function may only access memory that is
1842 /// inaccessible from the IR.
1843 bool onlyAccessesInaccessibleMemory() const {
1844 return hasFnAttr(Attribute::InaccessibleMemOnly);
1845 }
1846 void setOnlyAccessesInaccessibleMemory() {
1847 addFnAttr(Attribute::InaccessibleMemOnly);
1848 }
1849
1850 /// Determine if the function may only access memory that is
1851 /// either inaccessible from the IR or pointed to by its arguments.
1852 bool onlyAccessesInaccessibleMemOrArgMem() const {
1853 return hasFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
1854 }
1855 void setOnlyAccessesInaccessibleMemOrArgMem() {
1856 addFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
1857 }
1858 /// Determine if the call cannot return.
1859 bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
1860 void setDoesNotReturn() { addFnAttr(Attribute::NoReturn); }
1861
1862 /// Determine if the call should not perform indirect branch tracking.
1863 bool doesNoCfCheck() const { return hasFnAttr(Attribute::NoCfCheck); }
1864
1865 /// Determine if the call cannot unwind.
1866 bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
1867 void setDoesNotThrow() { addFnAttr(Attribute::NoUnwind); }
1868
1869 /// Determine if the invoke cannot be duplicated.
1870 bool cannotDuplicate() const { return hasFnAttr(Attribute::NoDuplicate); }
1871 void setCannotDuplicate() { addFnAttr(Attribute::NoDuplicate); }
1872
1873 /// Determine if the call cannot be tail merged.
1874 bool cannotMerge() const { return hasFnAttr(Attribute::NoMerge); }
1875 void setCannotMerge() { addFnAttr(Attribute::NoMerge); }
1876
1877 /// Determine if the invoke is convergent
1878 bool isConvergent() const { return hasFnAttr(Attribute::Convergent); }
18
Calling 'CallBase::hasFnAttr'
30
Returning from 'CallBase::hasFnAttr'
31
Returning value, which participates in a condition later
1879 void setConvergent() { addFnAttr(Attribute::Convergent); }
1880 void setNotConvergent() { removeFnAttr(Attribute::Convergent); }
1881
1882 /// Determine if the call returns a structure through first
1883 /// pointer argument.
1884 bool hasStructRetAttr() const {
1885 if (getNumArgOperands() == 0)
1886 return false;
1887
1888 // Be friendly and also check the callee.
1889 return paramHasAttr(0, Attribute::StructRet);
1890 }
1891
1892 /// Determine if any call argument is an aggregate passed by value.
1893 bool hasByValArgument() const {
1894 return Attrs.hasAttrSomewhere(Attribute::ByVal);
1895 }
1896
1897 ///@{
1898 // End of attribute API.
1899
1900 /// \name Operand Bundle API
1901 ///
1902 /// This group of methods provides the API to access and manipulate operand
1903 /// bundles on this call.
1904 /// @{
1905
1906 /// Return the number of operand bundles associated with this User.
1907 unsigned getNumOperandBundles() const {
1908 return std::distance(bundle_op_info_begin(), bundle_op_info_end());
1909 }
1910
1911 /// Return true if this User has any operand bundles.
1912 bool hasOperandBundles() const { return getNumOperandBundles() != 0; }
1913
1914 /// Return the index of the first bundle operand in the Use array.
1915 unsigned getBundleOperandsStartIndex() const {
1916 assert(hasOperandBundles() && "Don't call otherwise!")(static_cast<void> (0));
1917 return bundle_op_info_begin()->Begin;
1918 }
1919
1920 /// Return the index of the last bundle operand in the Use array.
1921 unsigned getBundleOperandsEndIndex() const {
1922 assert(hasOperandBundles() && "Don't call otherwise!")(static_cast<void> (0));
1923 return bundle_op_info_end()[-1].End;
1924 }
1925
1926 /// Return true if the operand at index \p Idx is a bundle operand.
1927 bool isBundleOperand(unsigned Idx) const {
1928 return hasOperandBundles() && Idx >= getBundleOperandsStartIndex() &&
1929 Idx < getBundleOperandsEndIndex();
1930 }
1931
1932 /// Returns true if the use is a bundle operand.
1933 bool isBundleOperand(const Use *U) const {
1934 assert(this == U->getUser() &&(static_cast<void> (0))
1935 "Only valid to query with a use of this instruction!")(static_cast<void> (0));
1936 return hasOperandBundles() && isBundleOperand(U - op_begin());
1937 }
1938 bool isBundleOperand(Value::const_user_iterator UI) const {
1939 return isBundleOperand(&UI.getUse());
1940 }
1941
1942 /// Return the total number operands (not operand bundles) used by
1943 /// every operand bundle in this OperandBundleUser.
1944 unsigned getNumTotalBundleOperands() const {
1945 if (!hasOperandBundles())
1946 return 0;
1947
1948 unsigned Begin = getBundleOperandsStartIndex();
1949 unsigned End = getBundleOperandsEndIndex();
1950
1951 assert(Begin <= End && "Should be!")(static_cast<void> (0));
1952 return End - Begin;
1953 }
1954
1955 /// Return the operand bundle at a specific index.
1956 OperandBundleUse getOperandBundleAt(unsigned Index) const {
1957 assert(Index < getNumOperandBundles() && "Index out of bounds!")(static_cast<void> (0));
1958 return operandBundleFromBundleOpInfo(*(bundle_op_info_begin() + Index));
1959 }
1960
1961 /// Return the number of operand bundles with the tag Name attached to
1962 /// this instruction.
1963 unsigned countOperandBundlesOfType(StringRef Name) const {
1964 unsigned Count = 0;
1965 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
1966 if (getOperandBundleAt(i).getTagName() == Name)
1967 Count++;
1968
1969 return Count;
1970 }
1971
1972 /// Return the number of operand bundles with the tag ID attached to
1973 /// this instruction.
1974 unsigned countOperandBundlesOfType(uint32_t ID) const {
1975 unsigned Count = 0;
1976 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
1977 if (getOperandBundleAt(i).getTagID() == ID)
1978 Count++;
1979
1980 return Count;
1981 }
1982
1983 /// Return an operand bundle by name, if present.
1984 ///
1985 /// It is an error to call this for operand bundle types that may have
1986 /// multiple instances of them on the same instruction.
1987 Optional<OperandBundleUse> getOperandBundle(StringRef Name) const {
1988 assert(countOperandBundlesOfType(Name) < 2 && "Precondition violated!")(static_cast<void> (0));
1989
1990 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
1991 OperandBundleUse U = getOperandBundleAt(i);
1992 if (U.getTagName() == Name)
1993 return U;
1994 }
1995
1996 return None;
1997 }
1998
1999 /// Return an operand bundle by tag ID, if present.
2000 ///
2001 /// It is an error to call this for operand bundle types that may have
2002 /// multiple instances of them on the same instruction.
2003 Optional<OperandBundleUse> getOperandBundle(uint32_t ID) const {
2004 assert(countOperandBundlesOfType(ID) < 2 && "Precondition violated!")(static_cast<void> (0));
2005
2006 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
2007 OperandBundleUse U = getOperandBundleAt(i);
2008 if (U.getTagID() == ID)
2009 return U;
2010 }
2011
2012 return None;
2013 }
2014
2015 /// Return the list of operand bundles attached to this instruction as
2016 /// a vector of OperandBundleDefs.
2017 ///
2018 /// This function copies the OperandBundeUse instances associated with this
2019 /// OperandBundleUser to a vector of OperandBundleDefs. Note:
2020 /// OperandBundeUses and OperandBundleDefs are non-trivially *different*
2021 /// representations of operand bundles (see documentation above).
2022 void getOperandBundlesAsDefs(SmallVectorImpl<OperandBundleDef> &Defs) const;
2023
2024 /// Return the operand bundle for the operand at index OpIdx.
2025 ///
2026 /// It is an error to call this with an OpIdx that does not correspond to an
2027 /// bundle operand.
2028 OperandBundleUse getOperandBundleForOperand(unsigned OpIdx) const {
2029 return operandBundleFromBundleOpInfo(getBundleOpInfoForOperand(OpIdx));
2030 }
2031
2032 /// Return true if this operand bundle user has operand bundles that
2033 /// may read from the heap.
2034 bool hasReadingOperandBundles() const;
2035
2036 /// Return true if this operand bundle user has operand bundles that
2037 /// may write to the heap.
2038 bool hasClobberingOperandBundles() const {
2039 for (auto &BOI : bundle_op_infos()) {
2040 if (BOI.Tag->second == LLVMContext::OB_deopt ||
2041 BOI.Tag->second == LLVMContext::OB_funclet)
2042 continue;
2043
2044 // This instruction has an operand bundle that is not known to us.
2045 // Assume the worst.
2046 return true;
2047 }
2048
2049 return false;
2050 }
2051
2052 /// Return true if the bundle operand at index \p OpIdx has the
2053 /// attribute \p A.
2054 bool bundleOperandHasAttr(unsigned OpIdx, Attribute::AttrKind A) const {
2055 auto &BOI = getBundleOpInfoForOperand(OpIdx);
2056 auto OBU = operandBundleFromBundleOpInfo(BOI);
2057 return OBU.operandHasAttr(OpIdx - BOI.Begin, A);
2058 }
2059
2060 /// Return true if \p Other has the same sequence of operand bundle
2061 /// tags with the same number of operands on each one of them as this
2062 /// OperandBundleUser.
2063 bool hasIdenticalOperandBundleSchema(const CallBase &Other) const {
2064 if (getNumOperandBundles() != Other.getNumOperandBundles())
2065 return false;
2066
2067 return std::equal(bundle_op_info_begin(), bundle_op_info_end(),
2068 Other.bundle_op_info_begin());
2069 }
2070
2071 /// Return true if this operand bundle user contains operand bundles
2072 /// with tags other than those specified in \p IDs.
2073 bool hasOperandBundlesOtherThan(ArrayRef<uint32_t> IDs) const {
2074 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
2075 uint32_t ID = getOperandBundleAt(i).getTagID();
2076 if (!is_contained(IDs, ID))
2077 return true;
2078 }
2079 return false;
2080 }
2081
2082 /// Is the function attribute S disallowed by some operand bundle on
2083 /// this operand bundle user?
2084 bool isFnAttrDisallowedByOpBundle(StringRef S) const {
2085 // Operand bundles only possibly disallow readnone, readonly and argmemonly
2086 // attributes. All String attributes are fine.
2087 return false;
2088 }
2089
2090 /// Is the function attribute A disallowed by some operand bundle on
2091 /// this operand bundle user?
2092 bool isFnAttrDisallowedByOpBundle(Attribute::AttrKind A) const {
2093 switch (A) {
23
Control jumps to the 'default' case at line 2094
2094 default:
2095 return false;
24
Returning zero, which participates in a condition later
2096
2097 case Attribute::InaccessibleMemOrArgMemOnly:
2098 return hasReadingOperandBundles();
2099
2100 case Attribute::InaccessibleMemOnly:
2101 return hasReadingOperandBundles();
2102
2103 case Attribute::ArgMemOnly:
2104 return hasReadingOperandBundles();
2105
2106 case Attribute::ReadNone:
2107 return hasReadingOperandBundles();
2108
2109 case Attribute::ReadOnly:
2110 return hasClobberingOperandBundles();
2111 }
2112
2113 llvm_unreachable("switch has a default case!")__builtin_unreachable();
2114 }
2115
2116 /// Used to keep track of an operand bundle. See the main comment on
2117 /// OperandBundleUser above.
2118 struct BundleOpInfo {
2119 /// The operand bundle tag, interned by
2120 /// LLVMContextImpl::getOrInsertBundleTag.
2121 StringMapEntry<uint32_t> *Tag;
2122
2123 /// The index in the Use& vector where operands for this operand
2124 /// bundle starts.
2125 uint32_t Begin;
2126
2127 /// The index in the Use& vector where operands for this operand
2128 /// bundle ends.
2129 uint32_t End;
2130
2131 bool operator==(const BundleOpInfo &Other) const {
2132 return Tag == Other.Tag && Begin == Other.Begin && End == Other.End;
2133 }
2134 };
2135
2136 /// Simple helper function to map a BundleOpInfo to an
2137 /// OperandBundleUse.
2138 OperandBundleUse
2139 operandBundleFromBundleOpInfo(const BundleOpInfo &BOI) const {
2140 auto begin = op_begin();
2141 ArrayRef<Use> Inputs(begin + BOI.Begin, begin + BOI.End);
2142 return OperandBundleUse(BOI.Tag, Inputs);
2143 }
2144
2145 using bundle_op_iterator = BundleOpInfo *;
2146 using const_bundle_op_iterator = const BundleOpInfo *;
2147
2148 /// Return the start of the list of BundleOpInfo instances associated
2149 /// with this OperandBundleUser.
2150 ///
2151 /// OperandBundleUser uses the descriptor area co-allocated with the host User
2152 /// to store some meta information about which operands are "normal" operands,
2153 /// and which ones belong to some operand bundle.
2154 ///
2155 /// The layout of an operand bundle user is
2156 ///
2157 /// +-----------uint32_t End-------------------------------------+
2158 /// | |
2159 /// | +--------uint32_t Begin--------------------+ |
2160 /// | | | |
2161 /// ^ ^ v v
2162 /// |------|------|----|----|----|----|----|---------|----|---------|----|-----
2163 /// | BOI0 | BOI1 | .. | DU | U0 | U1 | .. | BOI0_U0 | .. | BOI1_U0 | .. | Un
2164 /// |------|------|----|----|----|----|----|---------|----|---------|----|-----
2165 /// v v ^ ^
2166 /// | | | |
2167 /// | +--------uint32_t Begin------------+ |
2168 /// | |
2169 /// +-----------uint32_t End-----------------------------+
2170 ///
2171 ///
2172 /// BOI0, BOI1 ... are descriptions of operand bundles in this User's use
2173 /// list. These descriptions are installed and managed by this class, and
2174 /// they're all instances of OperandBundleUser<T>::BundleOpInfo.
2175 ///
2176 /// DU is an additional descriptor installed by User's 'operator new' to keep
2177 /// track of the 'BOI0 ... BOIN' co-allocation. OperandBundleUser does not
2178 /// access or modify DU in any way, it's an implementation detail private to
2179 /// User.
2180 ///
2181 /// The regular Use& vector for the User starts at U0. The operand bundle
2182 /// uses are part of the Use& vector, just like normal uses. In the diagram
2183 /// above, the operand bundle uses start at BOI0_U0. Each instance of
2184 /// BundleOpInfo has information about a contiguous set of uses constituting
2185 /// an operand bundle, and the total set of operand bundle uses themselves
2186 /// form a contiguous set of uses (i.e. there are no gaps between uses
2187 /// corresponding to individual operand bundles).
2188 ///
2189 /// This class does not know the location of the set of operand bundle uses
2190 /// within the use list -- that is decided by the User using this class via
2191 /// the BeginIdx argument in populateBundleOperandInfos.
2192 ///
2193 /// Currently operand bundle users with hung-off operands are not supported.
2194 bundle_op_iterator bundle_op_info_begin() {
2195 if (!hasDescriptor())
2196 return nullptr;
2197
2198 uint8_t *BytesBegin = getDescriptor().begin();
2199 return reinterpret_cast<bundle_op_iterator>(BytesBegin);
2200 }
2201
2202 /// Return the start of the list of BundleOpInfo instances associated
2203 /// with this OperandBundleUser.
2204 const_bundle_op_iterator bundle_op_info_begin() const {
2205 auto *NonConstThis = const_cast<CallBase *>(this);
2206 return NonConstThis->bundle_op_info_begin();
2207 }
2208
2209 /// Return the end of the list of BundleOpInfo instances associated
2210 /// with this OperandBundleUser.
2211 bundle_op_iterator bundle_op_info_end() {
2212 if (!hasDescriptor())
2213 return nullptr;
2214
2215 uint8_t *BytesEnd = getDescriptor().end();
2216 return reinterpret_cast<bundle_op_iterator>(BytesEnd);
2217 }
2218
2219 /// Return the end of the list of BundleOpInfo instances associated
2220 /// with this OperandBundleUser.
2221 const_bundle_op_iterator bundle_op_info_end() const {
2222 auto *NonConstThis = const_cast<CallBase *>(this);
2223 return NonConstThis->bundle_op_info_end();
2224 }
2225
2226 /// Return the range [\p bundle_op_info_begin, \p bundle_op_info_end).
2227 iterator_range<bundle_op_iterator> bundle_op_infos() {
2228 return make_range(bundle_op_info_begin(), bundle_op_info_end());
2229 }
2230
2231 /// Return the range [\p bundle_op_info_begin, \p bundle_op_info_end).
2232 iterator_range<const_bundle_op_iterator> bundle_op_infos() const {
2233 return make_range(bundle_op_info_begin(), bundle_op_info_end());
2234 }
2235
2236 /// Populate the BundleOpInfo instances and the Use& vector from \p
2237 /// Bundles. Return the op_iterator pointing to the Use& one past the last
2238 /// last bundle operand use.
2239 ///
2240 /// Each \p OperandBundleDef instance is tracked by a OperandBundleInfo
2241 /// instance allocated in this User's descriptor.
2242 op_iterator populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,
2243 const unsigned BeginIndex);
2244
2245public:
2246 /// Return the BundleOpInfo for the operand at index OpIdx.
2247 ///
2248 /// It is an error to call this with an OpIdx that does not correspond to an
2249 /// bundle operand.
2250 BundleOpInfo &getBundleOpInfoForOperand(unsigned OpIdx);
2251 const BundleOpInfo &getBundleOpInfoForOperand(unsigned OpIdx) const {
2252 return const_cast<CallBase *>(this)->getBundleOpInfoForOperand(OpIdx);
2253 }
2254
2255protected:
2256 /// Return the total number of values used in \p Bundles.
2257 static unsigned CountBundleInputs(ArrayRef<OperandBundleDef> Bundles) {
2258 unsigned Total = 0;
2259 for (auto &B : Bundles)
2260 Total += B.input_size();
2261 return Total;
2262 }
2263
2264 /// @}
2265 // End of operand bundle API.
2266
2267private:
2268 bool hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
2269 bool hasFnAttrOnCalledFunction(StringRef Kind) const;
2270
2271 template <typename AttrKind> bool hasFnAttrImpl(AttrKind Kind) const {
2272 if (Attrs.hasFnAttr(Kind))
20
Assuming the condition is false
21
Taking false branch
2273 return true;
2274
2275 // Operand bundles override attributes on the called function, but don't
2276 // override attributes directly present on the call instruction.
2277 if (isFnAttrDisallowedByOpBundle(Kind))
22
Calling 'CallBase::isFnAttrDisallowedByOpBundle'
25
Returning from 'CallBase::isFnAttrDisallowedByOpBundle'
26
Taking false branch
2278 return false;
2279
2280 return hasFnAttrOnCalledFunction(Kind);
27
Returning value, which participates in a condition later
2281 }
2282
2283 /// Determine whether the return value has the given attribute. Supports
2284 /// Attribute::AttrKind and StringRef as \p AttrKind types.
2285 template <typename AttrKind> bool hasRetAttrImpl(AttrKind Kind) const {
2286 if (Attrs.hasRetAttr(Kind))
2287 return true;
2288
2289 // Look at the callee, if available.
2290 if (const Function *F = getCalledFunction())
2291 return F->getAttributes().hasRetAttr(Kind);
2292 return false;
2293 }
2294};
2295
2296template <>
2297struct OperandTraits<CallBase> : public VariadicOperandTraits<CallBase, 1> {};
2298
2299DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CallBase, Value)CallBase::op_iterator CallBase::op_begin() { return OperandTraits
<CallBase>::op_begin(this); } CallBase::const_op_iterator
CallBase::op_begin() const { return OperandTraits<CallBase
>::op_begin(const_cast<CallBase*>(this)); } CallBase
::op_iterator CallBase::op_end() { return OperandTraits<CallBase
>::op_end(this); } CallBase::const_op_iterator CallBase::op_end
() const { return OperandTraits<CallBase>::op_end(const_cast
<CallBase*>(this)); } Value *CallBase::getOperand(unsigned
i_nocapture) const { (static_cast<void> (0)); return cast_or_null
<Value>( OperandTraits<CallBase>::op_begin(const_cast
<CallBase*>(this))[i_nocapture].get()); } void CallBase
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<void> (0)); OperandTraits<CallBase>::op_begin(this
)[i_nocapture] = Val_nocapture; } unsigned CallBase::getNumOperands
() const { return OperandTraits<CallBase>::operands(this
); } template <int Idx_nocapture> Use &CallBase::Op
() { return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &CallBase::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
2300
2301//===----------------------------------------------------------------------===//
2302// FuncletPadInst Class
2303//===----------------------------------------------------------------------===//
2304class FuncletPadInst : public Instruction {
2305private:
2306 FuncletPadInst(const FuncletPadInst &CPI);
2307
2308 explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
2309 ArrayRef<Value *> Args, unsigned Values,
2310 const Twine &NameStr, Instruction *InsertBefore);
2311 explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
2312 ArrayRef<Value *> Args, unsigned Values,
2313 const Twine &NameStr, BasicBlock *InsertAtEnd);
2314
2315 void init(Value *ParentPad, ArrayRef<Value *> Args, const Twine &NameStr);
2316
2317protected:
2318 // Note: Instruction needs to be a friend here to call cloneImpl.
2319 friend class Instruction;
2320 friend class CatchPadInst;
2321 friend class CleanupPadInst;
2322
2323 FuncletPadInst *cloneImpl() const;
2324
2325public:
2326 /// Provide fast operand accessors
2327 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2328
2329 /// getNumArgOperands - Return the number of funcletpad arguments.
2330 ///
2331 unsigned getNumArgOperands() const { return getNumOperands() - 1; }
2332
2333 /// Convenience accessors
2334
2335 /// Return the outer EH-pad this funclet is nested within.
2336 ///
2337 /// Note: This returns the associated CatchSwitchInst if this FuncletPadInst
2338 /// is a CatchPadInst.
2339 Value *getParentPad() const { return Op<-1>(); }
2340 void setParentPad(Value *ParentPad) {
2341 assert(ParentPad)(static_cast<void> (0));
2342 Op<-1>() = ParentPad;
2343 }
2344
2345 /// getArgOperand/setArgOperand - Return/set the i-th funcletpad argument.
2346 ///
2347 Value *getArgOperand(unsigned i) const { return getOperand(i); }
2348 void setArgOperand(unsigned i, Value *v) { setOperand(i, v); }
2349
2350 /// arg_operands - iteration adapter for range-for loops.
2351 op_range arg_operands() { return op_range(op_begin(), op_end() - 1); }
2352
2353 /// arg_operands - iteration adapter for range-for loops.
2354 const_op_range arg_operands() const {
2355 return const_op_range(op_begin(), op_end() - 1);
2356 }
2357
2358 // Methods for support type inquiry through isa, cast, and dyn_cast:
2359 static bool classof(const Instruction *I) { return I->isFuncletPad(); }
2360 static bool classof(const Value *V) {
2361 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2362 }
2363};
2364
2365template <>
2366struct OperandTraits<FuncletPadInst>
2367 : public VariadicOperandTraits<FuncletPadInst, /*MINARITY=*/1> {};
2368
2369DEFINE_TRANSPARENT_OPERAND_ACCESSORS(FuncletPadInst, Value)FuncletPadInst::op_iterator FuncletPadInst::op_begin() { return
OperandTraits<FuncletPadInst>::op_begin(this); } FuncletPadInst
::const_op_iterator FuncletPadInst::op_begin() const { return
OperandTraits<FuncletPadInst>::op_begin(const_cast<
FuncletPadInst*>(this)); } FuncletPadInst::op_iterator FuncletPadInst
::op_end() { return OperandTraits<FuncletPadInst>::op_end
(this); } FuncletPadInst::const_op_iterator FuncletPadInst::op_end
() const { return OperandTraits<FuncletPadInst>::op_end
(const_cast<FuncletPadInst*>(this)); } Value *FuncletPadInst
::getOperand(unsigned i_nocapture) const { (static_cast<void
> (0)); return cast_or_null<Value>( OperandTraits<
FuncletPadInst>::op_begin(const_cast<FuncletPadInst*>
(this))[i_nocapture].get()); } void FuncletPadInst::setOperand
(unsigned i_nocapture, Value *Val_nocapture) { (static_cast<
void> (0)); OperandTraits<FuncletPadInst>::op_begin(
this)[i_nocapture] = Val_nocapture; } unsigned FuncletPadInst
::getNumOperands() const { return OperandTraits<FuncletPadInst
>::operands(this); } template <int Idx_nocapture> Use
&FuncletPadInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
FuncletPadInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
2370
2371} // end namespace llvm
2372
2373#endif // LLVM_IR_INSTRTYPES_H

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include/llvm/IR/PatternMatch.h

1//===- PatternMatch.h - Match on the LLVM IR --------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file provides a simple and efficient mechanism for performing general
10// tree-based pattern matches on the LLVM IR. The power of these routines is
11// that it allows you to write concise patterns that are expressive and easy to
12// understand. The other major advantage of this is that it allows you to
13// trivially capture/bind elements in the pattern to variables. For example,
14// you can do something like this:
15//
16// Value *Exp = ...
17// Value *X, *Y; ConstantInt *C1, *C2; // (X & C1) | (Y & C2)
18// if (match(Exp, m_Or(m_And(m_Value(X), m_ConstantInt(C1)),
19// m_And(m_Value(Y), m_ConstantInt(C2))))) {
20// ... Pattern is matched and variables are bound ...
21// }
22//
23// This is primarily useful to things like the instruction combiner, but can
24// also be useful for static analysis tools or code generators.
25//
26//===----------------------------------------------------------------------===//
27
28#ifndef LLVM_IR_PATTERNMATCH_H
29#define LLVM_IR_PATTERNMATCH_H
30
31#include "llvm/ADT/APFloat.h"
32#include "llvm/ADT/APInt.h"
33#include "llvm/IR/Constant.h"
34#include "llvm/IR/Constants.h"
35#include "llvm/IR/DataLayout.h"
36#include "llvm/IR/InstrTypes.h"
37#include "llvm/IR/Instruction.h"
38#include "llvm/IR/Instructions.h"
39#include "llvm/IR/IntrinsicInst.h"
40#include "llvm/IR/Intrinsics.h"
41#include "llvm/IR/Operator.h"
42#include "llvm/IR/Value.h"
43#include "llvm/Support/Casting.h"
44#include <cstdint>
45
46namespace llvm {
47namespace PatternMatch {
48
49template <typename Val, typename Pattern> bool match(Val *V, const Pattern &P) {
50 return const_cast<Pattern &>(P).match(V);
36
Calling 'IntrinsicID_match::match'
40
Returning from 'IntrinsicID_match::match'
41
Returning zero, which participates in a condition later
45
Calling 'IntrinsicID_match::match'
49
Returning from 'IntrinsicID_match::match'
50
Returning zero, which participates in a condition later
51}
52
53template <typename Pattern> bool match(ArrayRef<int> Mask, const Pattern &P) {
54 return const_cast<Pattern &>(P).match(Mask);
55}
56
57template <typename SubPattern_t> struct OneUse_match {
58 SubPattern_t SubPattern;
59
60 OneUse_match(const SubPattern_t &SP) : SubPattern(SP) {}
61
62 template <typename OpTy> bool match(OpTy *V) {
63 return V->hasOneUse() && SubPattern.match(V);
64 }
65};
66
67template <typename T> inline OneUse_match<T> m_OneUse(const T &SubPattern) {
68 return SubPattern;
69}
70
71template <typename Class> struct class_match {
72 template <typename ITy> bool match(ITy *V) { return isa<Class>(V); }
73};
74
75/// Match an arbitrary value and ignore it.
76inline class_match<Value> m_Value() { return class_match<Value>(); }
77
78/// Match an arbitrary unary operation and ignore it.
79inline class_match<UnaryOperator> m_UnOp() {
80 return class_match<UnaryOperator>();
81}
82
83/// Match an arbitrary binary operation and ignore it.
84inline class_match<BinaryOperator> m_BinOp() {
85 return class_match<BinaryOperator>();
86}
87
88/// Matches any compare instruction and ignore it.
89inline class_match<CmpInst> m_Cmp() { return class_match<CmpInst>(); }
90
91struct undef_match {
92 static bool check(const Value *V) {
93 if (isa<UndefValue>(V))
94 return true;
95
96 const auto *CA = dyn_cast<ConstantAggregate>(V);
97 if (!CA)
98 return false;
99
100 SmallPtrSet<const ConstantAggregate *, 8> Seen;
101 SmallVector<const ConstantAggregate *, 8> Worklist;
102
103 // Either UndefValue, PoisonValue, or an aggregate that only contains
104 // these is accepted by matcher.
105 // CheckValue returns false if CA cannot satisfy this constraint.
106 auto CheckValue = [&](const ConstantAggregate *CA) {
107 for (const Value *Op : CA->operand_values()) {
108 if (isa<UndefValue>(Op))
109 continue;
110
111 const auto *CA = dyn_cast<ConstantAggregate>(Op);
112 if (!CA)
113 return false;
114 if (Seen.insert(CA).second)
115 Worklist.emplace_back(CA);
116 }
117
118 return true;
119 };
120
121 if (!CheckValue(CA))
122 return false;
123
124 while (!Worklist.empty()) {
125 if (!CheckValue(Worklist.pop_back_val()))
126 return false;
127 }
128 return true;
129 }
130 template <typename ITy> bool match(ITy *V) { return check(V); }
131};
132
133/// Match an arbitrary undef constant. This matches poison as well.
134/// If this is an aggregate and contains a non-aggregate element that is
135/// neither undef nor poison, the aggregate is not matched.
136inline auto m_Undef() { return undef_match(); }
137
138/// Match an arbitrary poison constant.
139inline class_match<PoisonValue> m_Poison() { return class_match<PoisonValue>(); }
140
141/// Match an arbitrary Constant and ignore it.
142inline class_match<Constant> m_Constant() { return class_match<Constant>(); }
143
144/// Match an arbitrary ConstantInt and ignore it.
145inline class_match<ConstantInt> m_ConstantInt() {
146 return class_match<ConstantInt>();
147}
148
149/// Match an arbitrary ConstantFP and ignore it.
150inline class_match<ConstantFP> m_ConstantFP() {
151 return class_match<ConstantFP>();
152}
153
154/// Match an arbitrary ConstantExpr and ignore it.
155inline class_match<ConstantExpr> m_ConstantExpr() {
156 return class_match<ConstantExpr>();
157}
158
159/// Match an arbitrary basic block value and ignore it.
160inline class_match<BasicBlock> m_BasicBlock() {
161 return class_match<BasicBlock>();
162}
163
164/// Inverting matcher
165template <typename Ty> struct match_unless {
166 Ty M;
167
168 match_unless(const Ty &Matcher) : M(Matcher) {}
169
170 template <typename ITy> bool match(ITy *V) { return !M.match(V); }
171};
172
173/// Match if the inner matcher does *NOT* match.
174template <typename Ty> inline match_unless<Ty> m_Unless(const Ty &M) {
175 return match_unless<Ty>(M);
176}
177
178/// Matching combinators
179template <typename LTy, typename RTy> struct match_combine_or {
180 LTy L;
181 RTy R;
182
183 match_combine_or(const LTy &Left, const RTy &Right) : L(Left), R(Right) {}
184
185 template <typename ITy> bool match(ITy *V) {
186 if (L.match(V))
187 return true;
188 if (R.match(V))
189 return true;
190 return false;
191 }
192};
193
194template <typename LTy, typename RTy> struct match_combine_and {
195 LTy L;
196 RTy R;
197
198 match_combine_and(const LTy &Left, const RTy &Right) : L(Left), R(Right) {}
199
200 template <typename ITy> bool match(ITy *V) {
201 if (L.match(V))
202 if (R.match(V))
203 return true;
204 return false;
205 }
206};
207
208/// Combine two pattern matchers matching L || R
209template <typename LTy, typename RTy>
210inline match_combine_or<LTy, RTy> m_CombineOr(const LTy &L, const RTy &R) {
211 return match_combine_or<LTy, RTy>(L, R);
212}
213
214/// Combine two pattern matchers matching L && R
215template <typename LTy, typename RTy>
216inline match_combine_and<LTy, RTy> m_CombineAnd(const LTy &L, const RTy &R) {
217 return match_combine_and<LTy, RTy>(L, R);
218}
219
220struct apint_match {
221 const APInt *&Res;
222 bool AllowUndef;
223
224 apint_match(const APInt *&Res, bool AllowUndef)
225 : Res(Res), AllowUndef(AllowUndef) {}
226
227 template <typename ITy> bool match(ITy *V) {
228 if (auto *CI = dyn_cast<ConstantInt>(V)) {
229 Res = &CI->getValue();
230 return true;
231 }
232 if (V->getType()->isVectorTy())
233 if (const auto *C = dyn_cast<Constant>(V))
234 if (auto *CI = dyn_cast_or_null<ConstantInt>(
235 C->getSplatValue(AllowUndef))) {
236 Res = &CI->getValue();
237 return true;
238 }
239 return false;
240 }
241};
242// Either constexpr if or renaming ConstantFP::getValueAPF to
243// ConstantFP::getValue is needed to do it via single template
244// function for both apint/apfloat.
245struct apfloat_match {
246 const APFloat *&Res;
247 bool AllowUndef;
248
249 apfloat_match(const APFloat *&Res, bool AllowUndef)
250 : Res(Res), AllowUndef(AllowUndef) {}
251
252 template <typename ITy> bool match(ITy *V) {
253 if (auto *CI = dyn_cast<ConstantFP>(V)) {
254 Res = &CI->getValueAPF();
255 return true;
256 }
257 if (V->getType()->isVectorTy())
258 if (const auto *C = dyn_cast<Constant>(V))
259 if (auto *CI = dyn_cast_or_null<ConstantFP>(
260 C->getSplatValue(AllowUndef))) {
261 Res = &CI->getValueAPF();
262 return true;
263 }
264 return false;
265 }
266};
267
268/// Match a ConstantInt or splatted ConstantVector, binding the
269/// specified pointer to the contained APInt.
270inline apint_match m_APInt(const APInt *&Res) {
271 // Forbid undefs by default to maintain previous behavior.
272 return apint_match(Res, /* AllowUndef */ false);
273}
274
275/// Match APInt while allowing undefs in splat vector constants.
276inline apint_match m_APIntAllowUndef(const APInt *&Res) {
277 return apint_match(Res, /* AllowUndef */ true);
278}
279
280/// Match APInt while forbidding undefs in splat vector constants.
281inline apint_match m_APIntForbidUndef(const APInt *&Res) {
282 return apint_match(Res, /* AllowUndef */ false);
283}
284
285/// Match a ConstantFP or splatted ConstantVector, binding the
286/// specified pointer to the contained APFloat.
287inline apfloat_match m_APFloat(const APFloat *&Res) {
288 // Forbid undefs by default to maintain previous behavior.
289 return apfloat_match(Res, /* AllowUndef */ false);
290}
291
292/// Match APFloat while allowing undefs in splat vector constants.
293inline apfloat_match m_APFloatAllowUndef(const APFloat *&Res) {
294 return apfloat_match(Res, /* AllowUndef */ true);
295}
296
297/// Match APFloat while forbidding undefs in splat vector constants.
298inline apfloat_match m_APFloatForbidUndef(const APFloat *&Res) {
299 return apfloat_match(Res, /* AllowUndef */ false);
300}
301
302template <int64_t Val> struct constantint_match {
303 template <typename ITy> bool match(ITy *V) {
304 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
305 const APInt &CIV = CI->getValue();
306 if (Val >= 0)
307 return CIV == static_cast<uint64_t>(Val);
308 // If Val is negative, and CI is shorter than it, truncate to the right
309 // number of bits. If it is larger, then we have to sign extend. Just
310 // compare their negated values.
311 return -CIV == -Val;
312 }
313 return false;
314 }
315};
316
317/// Match a ConstantInt with a specific value.
318template <int64_t Val> inline constantint_match<Val> m_ConstantInt() {
319 return constantint_match<Val>();
320}
321
322/// This helper class is used to match constant scalars, vector splats,
323/// and fixed width vectors that satisfy a specified predicate.
324/// For fixed width vector constants, undefined elements are ignored.
325template <typename Predicate, typename ConstantVal>
326struct cstval_pred_ty : public Predicate {
327 template <typename ITy> bool match(ITy *V) {
328 if (const auto *CV = dyn_cast<ConstantVal>(V))
329 return this->isValue(CV->getValue());
330 if (const auto *VTy = dyn_cast<VectorType>(V->getType())) {
331 if (const auto *C = dyn_cast<Constant>(V)) {
332 if (const auto *CV = dyn_cast_or_null<ConstantVal>(C->getSplatValue()))
333 return this->isValue(CV->getValue());
334
335 // Number of elements of a scalable vector unknown at compile time
336 auto *FVTy = dyn_cast<FixedVectorType>(VTy);
337 if (!FVTy)
338 return false;
339
340 // Non-splat vector constant: check each element for a match.
341 unsigned NumElts = FVTy->getNumElements();
342 assert(NumElts != 0 && "Constant vector with no elements?")(static_cast<void> (0));
343 bool HasNonUndefElements = false;
344 for (unsigned i = 0; i != NumElts; ++i) {
345 Constant *Elt = C->getAggregateElement(i);
346 if (!Elt)
347 return false;
348 if (isa<UndefValue>(Elt))
349 continue;
350 auto *CV = dyn_cast<ConstantVal>(Elt);
351 if (!CV || !this->isValue(CV->getValue()))
352 return false;
353 HasNonUndefElements = true;
354 }
355 return HasNonUndefElements;
356 }
357 }
358 return false;
359 }
360};
361
362/// specialization of cstval_pred_ty for ConstantInt
363template <typename Predicate>
364using cst_pred_ty = cstval_pred_ty<Predicate, ConstantInt>;
365
366/// specialization of cstval_pred_ty for ConstantFP
367template <typename Predicate>
368using cstfp_pred_ty = cstval_pred_ty<Predicate, ConstantFP>;
369
370/// This helper class is used to match scalar and vector constants that
371/// satisfy a specified predicate, and bind them to an APInt.
372template <typename Predicate> struct api_pred_ty : public Predicate {
373 const APInt *&Res;
374
375 api_pred_ty(const APInt *&R) : Res(R) {}
376
377 template <typename ITy> bool match(ITy *V) {
378 if (const auto *CI = dyn_cast<ConstantInt>(V))
379 if (this->isValue(CI->getValue())) {
380 Res = &CI->getValue();
381 return true;
382 }
383 if (V->getType()->isVectorTy())
384 if (const auto *C = dyn_cast<Constant>(V))
385 if (auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue()))
386 if (this->isValue(CI->getValue())) {
387 Res = &CI->getValue();
388 return true;
389 }
390
391 return false;
392 }
393};
394
395/// This helper class is used to match scalar and vector constants that
396/// satisfy a specified predicate, and bind them to an APFloat.
397/// Undefs are allowed in splat vector constants.
398template <typename Predicate> struct apf_pred_ty : public Predicate {
399 const APFloat *&Res;
400
401 apf_pred_ty(const APFloat *&R) : Res(R) {}
402
403 template <typename ITy> bool match(ITy *V) {
404 if (const auto *CI = dyn_cast<ConstantFP>(V))
405 if (this->isValue(CI->getValue())) {
406 Res = &CI->getValue();
407 return true;
408 }
409 if (V->getType()->isVectorTy())
410 if (const auto *C = dyn_cast<Constant>(V))
411 if (auto *CI = dyn_cast_or_null<ConstantFP>(
412 C->getSplatValue(/* AllowUndef */ true)))
413 if (this->isValue(CI->getValue())) {
414 Res = &CI->getValue();
415 return true;
416 }
417
418 return false;
419 }
420};
421
422///////////////////////////////////////////////////////////////////////////////
423//
424// Encapsulate constant value queries for use in templated predicate matchers.
425// This allows checking if constants match using compound predicates and works
426// with vector constants, possibly with relaxed constraints. For example, ignore
427// undef values.
428//
429///////////////////////////////////////////////////////////////////////////////
430
431struct is_any_apint {
432 bool isValue(const APInt &C) { return true; }
433};
434/// Match an integer or vector with any integral constant.
435/// For vectors, this includes constants with undefined elements.
436inline cst_pred_ty<is_any_apint> m_AnyIntegralConstant() {
437 return cst_pred_ty<is_any_apint>();
438}
439
440struct is_all_ones {
441 bool isValue(const APInt &C) { return C.isAllOnesValue(); }
442};
443/// Match an integer or vector with all bits set.
444/// For vectors, this includes constants with undefined elements.
445inline cst_pred_ty<is_all_ones> m_AllOnes() {
446 return cst_pred_ty<is_all_ones>();
447}
448
449struct is_maxsignedvalue {
450 bool isValue(const APInt &C) { return C.isMaxSignedValue(); }
451};
452/// Match an integer or vector with values having all bits except for the high
453/// bit set (0x7f...).
454/// For vectors, this includes constants with undefined elements.
455inline cst_pred_ty<is_maxsignedvalue> m_MaxSignedValue() {
456 return cst_pred_ty<is_maxsignedvalue>();
457}
458inline api_pred_ty<is_maxsignedvalue> m_MaxSignedValue(const APInt *&V) {
459 return V;
460}
461
462struct is_negative {
463 bool isValue(const APInt &C) { return C.isNegative(); }
464};
465/// Match an integer or vector of negative values.
466/// For vectors, this includes constants with undefined elements.
467inline cst_pred_ty<is_negative> m_Negative() {
468 return cst_pred_ty<is_negative>();
469}
470inline api_pred_ty<is_negative> m_Negative(const APInt *&V) {
471 return V;
472}
473
474struct is_nonnegative {
475 bool isValue(const APInt &C) { return C.isNonNegative(); }
476};
477/// Match an integer or vector of non-negative values.
478/// For vectors, this includes constants with undefined elements.
479inline cst_pred_ty<is_nonnegative> m_NonNegative() {
480 return cst_pred_ty<is_nonnegative>();
481}
482inline api_pred_ty<is_nonnegative> m_NonNegative(const APInt *&V) {
483 return V;
484}
485
486struct is_strictlypositive {
487 bool isValue(const APInt &C) { return C.isStrictlyPositive(); }
488};
489/// Match an integer or vector of strictly positive values.
490/// For vectors, this includes constants with undefined elements.
491inline cst_pred_ty<is_strictlypositive> m_StrictlyPositive() {
492 return cst_pred_ty<is_strictlypositive>();
493}
494inline api_pred_ty<is_strictlypositive> m_StrictlyPositive(const APInt *&V) {
495 return V;
496}
497
498struct is_nonpositive {
499 bool isValue(const APInt &C) { return C.isNonPositive(); }
500};
501/// Match an integer or vector of non-positive values.
502/// For vectors, this includes constants with undefined elements.
503inline cst_pred_ty<is_nonpositive> m_NonPositive() {
504 return cst_pred_ty<is_nonpositive>();
505}
506inline api_pred_ty<is_nonpositive> m_NonPositive(const APInt *&V) { return V; }
507
508struct is_one {
509 bool isValue(const APInt &C) { return C.isOneValue(); }
510};
511/// Match an integer 1 or a vector with all elements equal to 1.
512/// For vectors, this includes constants with undefined elements.
513inline cst_pred_ty<is_one> m_One() {
514 return cst_pred_ty<is_one>();
515}
516
517struct is_zero_int {
518 bool isValue(const APInt &C) { return C.isNullValue(); }
519};
520/// Match an integer 0 or a vector with all elements equal to 0.
521/// For vectors, this includes constants with undefined elements.
522inline cst_pred_ty<is_zero_int> m_ZeroInt() {
523 return cst_pred_ty<is_zero_int>();
524}
525
526struct is_zero {
527 template <typename ITy> bool match(ITy *V) {
528 auto *C = dyn_cast<Constant>(V);
529 // FIXME: this should be able to do something for scalable vectors
530 return C && (C->isNullValue() || cst_pred_ty<is_zero_int>().match(C));
531 }
532};
533/// Match any null constant or a vector with all elements equal to 0.
534/// For vectors, this includes constants with undefined elements.
535inline is_zero m_Zero() {
536 return is_zero();
537}
538
539struct is_power2 {
540 bool isValue(const APInt &C) { return C.isPowerOf2(); }
541};
542/// Match an integer or vector power-of-2.
543/// For vectors, this includes constants with undefined elements.
544inline cst_pred_ty<is_power2> m_Power2() {
545 return cst_pred_ty<is_power2>();
546}
547inline api_pred_ty<is_power2> m_Power2(const APInt *&V) {
548 return V;
549}
550
551struct is_negated_power2 {
552 bool isValue(const APInt &C) { return (-C).isPowerOf2(); }
553};
554/// Match a integer or vector negated power-of-2.
555/// For vectors, this includes constants with undefined elements.
556inline cst_pred_ty<is_negated_power2> m_NegatedPower2() {
557 return cst_pred_ty<is_negated_power2>();
558}
559inline api_pred_ty<is_negated_power2> m_NegatedPower2(const APInt *&V) {
560 return V;
561}
562
563struct is_power2_or_zero {
564 bool isValue(const APInt &C) { return !C || C.isPowerOf2(); }
565};
566/// Match an integer or vector of 0 or power-of-2 values.
567/// For vectors, this includes constants with undefined elements.
568inline cst_pred_ty<is_power2_or_zero> m_Power2OrZero() {
569 return cst_pred_ty<is_power2_or_zero>();
570}
571inline api_pred_ty<is_power2_or_zero> m_Power2OrZero(const APInt *&V) {
572 return V;
573}
574
575struct is_sign_mask {
576 bool isValue(const APInt &C) { return C.isSignMask(); }
577};
578/// Match an integer or vector with only the sign bit(s) set.
579/// For vectors, this includes constants with undefined elements.
580inline cst_pred_ty<is_sign_mask> m_SignMask() {
581 return cst_pred_ty<is_sign_mask>();
582}
583
584struct is_lowbit_mask {
585 bool isValue(const APInt &C) { return C.isMask(); }
586};
587/// Match an integer or vector with only the low bit(s) set.
588/// For vectors, this includes constants with undefined elements.
589inline cst_pred_ty<is_lowbit_mask> m_LowBitMask() {
590 return cst_pred_ty<is_lowbit_mask>();
591}
592
593struct icmp_pred_with_threshold {
594 ICmpInst::Predicate Pred;
595 const APInt *Thr;
596 bool isValue(const APInt &C) {
597 switch (Pred) {
598 case ICmpInst::Predicate::ICMP_EQ:
599 return C.eq(*Thr);
600 case ICmpInst::Predicate::ICMP_NE:
601 return C.ne(*Thr);
602 case ICmpInst::Predicate::ICMP_UGT:
603 return C.ugt(*Thr);
604 case ICmpInst::Predicate::ICMP_UGE:
605 return C.uge(*Thr);
606 case ICmpInst::Predicate::ICMP_ULT:
607 return C.ult(*Thr);
608 case ICmpInst::Predicate::ICMP_ULE:
609 return C.ule(*Thr);
610 case ICmpInst::Predicate::ICMP_SGT:
611 return C.sgt(*Thr);
612 case ICmpInst::Predicate::ICMP_SGE:
613 return C.sge(*Thr);
614 case ICmpInst::Predicate::ICMP_SLT:
615 return C.slt(*Thr);
616 case ICmpInst::Predicate::ICMP_SLE:
617 return C.sle(*Thr);
618 default:
619 llvm_unreachable("Unhandled ICmp predicate")__builtin_unreachable();
620 }
621 }
622};
623/// Match an integer or vector with every element comparing 'pred' (eg/ne/...)
624/// to Threshold. For vectors, this includes constants with undefined elements.
625inline cst_pred_ty<icmp_pred_with_threshold>
626m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold) {
627 cst_pred_ty<icmp_pred_with_threshold> P;
628 P.Pred = Predicate;
629 P.Thr = &Threshold;
630 return P;
631}
632
633struct is_nan {
634 bool isValue(const APFloat &C) { return C.isNaN(); }
635};
636/// Match an arbitrary NaN constant. This includes quiet and signalling nans.
637/// For vectors, this includes constants with undefined elements.
638inline cstfp_pred_ty<is_nan> m_NaN() {
639 return cstfp_pred_ty<is_nan>();
640}
641
642struct is_nonnan {
643 bool isValue(const APFloat &C) { return !C.isNaN(); }
644};
645/// Match a non-NaN FP constant.
646/// For vectors, this includes constants with undefined elements.
647inline cstfp_pred_ty<is_nonnan> m_NonNaN() {
648 return cstfp_pred_ty<is_nonnan>();
649}
650
651struct is_inf {
652 bool isValue(const APFloat &C) { return C.isInfinity(); }
653};
654/// Match a positive or negative infinity FP constant.
655/// For vectors, this includes constants with undefined elements.
656inline cstfp_pred_ty<is_inf> m_Inf() {
657 return cstfp_pred_ty<is_inf>();
658}
659
660struct is_noninf {
661 bool isValue(const APFloat &C) { return !C.isInfinity(); }
662};
663/// Match a non-infinity FP constant, i.e. finite or NaN.
664/// For vectors, this includes constants with undefined elements.
665inline cstfp_pred_ty<is_noninf> m_NonInf() {
666 return cstfp_pred_ty<is_noninf>();
667}
668
669struct is_finite {
670 bool isValue(const APFloat &C) { return C.isFinite(); }
671};
672/// Match a finite FP constant, i.e. not infinity or NaN.
673/// For vectors, this includes constants with undefined elements.
674inline cstfp_pred_ty<is_finite> m_Finite() {
675 return cstfp_pred_ty<is_finite>();
676}
677inline apf_pred_ty<is_finite> m_Finite(const APFloat *&V) { return V; }
678
679struct is_finitenonzero {
680 bool isValue(const APFloat &C) { return C.isFiniteNonZero(); }
681};
682/// Match a finite non-zero FP constant.
683/// For vectors, this includes constants with undefined elements.
684inline cstfp_pred_ty<is_finitenonzero> m_FiniteNonZero() {
685 return cstfp_pred_ty<is_finitenonzero>();
686}
687inline apf_pred_ty<is_finitenonzero> m_FiniteNonZero(const APFloat *&V) {
688 return V;
689}
690
691struct is_any_zero_fp {
692 bool isValue(const APFloat &C) { return C.isZero(); }
693};
694/// Match a floating-point negative zero or positive zero.
695/// For vectors, this includes constants with undefined elements.
696inline cstfp_pred_ty<is_any_zero_fp> m_AnyZeroFP() {
697 return cstfp_pred_ty<is_any_zero_fp>();
698}
699
700struct is_pos_zero_fp {
701 bool isValue(const APFloat &C) { return C.isPosZero(); }
702};
703/// Match a floating-point positive zero.
704/// For vectors, this includes constants with undefined elements.
705inline cstfp_pred_ty<is_pos_zero_fp> m_PosZeroFP() {
706 return cstfp_pred_ty<is_pos_zero_fp>();
707}
708
709struct is_neg_zero_fp {
710 bool isValue(const APFloat &C) { return C.isNegZero(); }
711};
712/// Match a floating-point negative zero.
713/// For vectors, this includes constants with undefined elements.
714inline cstfp_pred_ty<is_neg_zero_fp> m_NegZeroFP() {
715 return cstfp_pred_ty<is_neg_zero_fp>();
716}
717
718struct is_non_zero_fp {
719 bool isValue(const APFloat &C) { return C.isNonZero(); }
720};
721/// Match a floating-point non-zero.
722/// For vectors, this includes constants with undefined elements.
723inline cstfp_pred_ty<is_non_zero_fp> m_NonZeroFP() {
724 return cstfp_pred_ty<is_non_zero_fp>();
725}
726
727///////////////////////////////////////////////////////////////////////////////
728
729template <typename Class> struct bind_ty {
730 Class *&VR;
731
732 bind_ty(Class *&V) : VR(V) {}
733
734 template <typename ITy> bool match(ITy *V) {
735 if (auto *CV = dyn_cast<Class>(V)) {
736 VR = CV;
737 return true;
738 }
739 return false;
740 }
741};
742
743/// Match a value, capturing it if we match.
744inline bind_ty<Value> m_Value(Value *&V) { return V; }
745inline bind_ty<const Value> m_Value(const Value *&V) { return V; }
746
747/// Match an instruction, capturing it if we match.
748inline bind_ty<Instruction> m_Instruction(Instruction *&I) { return I; }
749/// Match a unary operator, capturing it if we match.
750inline bind_ty<UnaryOperator> m_UnOp(UnaryOperator *&I) { return I; }
751/// Match a binary operator, capturing it if we match.
752inline bind_ty<BinaryOperator> m_BinOp(BinaryOperator *&I) { return I; }
753/// Match a with overflow intrinsic, capturing it if we match.
754inline bind_ty<WithOverflowInst> m_WithOverflowInst(WithOverflowInst *&I) { return I; }
755inline bind_ty<const WithOverflowInst>
756m_WithOverflowInst(const WithOverflowInst *&I) {
757 return I;
758}
759
760/// Match a Constant, capturing the value if we match.
761inline bind_ty<Constant> m_Constant(Constant *&C) { return C; }
762
763/// Match a ConstantInt, capturing the value if we match.
764inline bind_ty<ConstantInt> m_ConstantInt(ConstantInt *&CI) { return CI; }
765
766/// Match a ConstantFP, capturing the value if we match.
767inline bind_ty<ConstantFP> m_ConstantFP(ConstantFP *&C) { return C; }
768
769/// Match a ConstantExpr, capturing the value if we match.
770inline bind_ty<ConstantExpr> m_ConstantExpr(ConstantExpr *&C) { return C; }
771
772/// Match a basic block value, capturing it if we match.
773inline bind_ty<BasicBlock> m_BasicBlock(BasicBlock *&V) { return V; }
774inline bind_ty<const BasicBlock> m_BasicBlock(const BasicBlock *&V) {
775 return V;
776}
777
778/// Match an arbitrary immediate Constant and ignore it.
779inline match_combine_and<class_match<Constant>,
780 match_unless<class_match<ConstantExpr>>>
781m_ImmConstant() {
782 return m_CombineAnd(m_Constant(), m_Unless(m_ConstantExpr()));
783}
784
785/// Match an immediate Constant, capturing the value if we match.
786inline match_combine_and<bind_ty<Constant>,
787 match_unless<class_match<ConstantExpr>>>
788m_ImmConstant(Constant *&C) {
789 return m_CombineAnd(m_Constant(C), m_Unless(m_ConstantExpr()));
790}
791
792/// Match a specified Value*.
793struct specificval_ty {
794 const Value *Val;
795
796 specificval_ty(const Value *V) : Val(V) {}
797
798 template <typename ITy> bool match(ITy *V) { return V == Val; }
799};
800
801/// Match if we have a specific specified value.
802inline specificval_ty m_Specific(const Value *V) { return V; }
803
804/// Stores a reference to the Value *, not the Value * itself,
805/// thus can be used in commutative matchers.
806template <typename Class> struct deferredval_ty {
807 Class *const &Val;
808
809 deferredval_ty(Class *const &V) : Val(V) {}
810
811 template <typename ITy> bool match(ITy *const V) { return V == Val; }
812};
813
814/// Like m_Specific(), but works if the specific value to match is determined
815/// as part of the same match() expression. For example:
816/// m_Add(m_Value(X), m_Specific(X)) is incorrect, because m_Specific() will
817/// bind X before the pattern match starts.
818/// m_Add(m_Value(X), m_Deferred(X)) is correct, and will check against
819/// whichever value m_Value(X) populated.
820inline deferredval_ty<Value> m_Deferred(Value *const &V) { return V; }
821inline deferredval_ty<const Value> m_Deferred(const Value *const &V) {
822 return V;
823}
824
825/// Match a specified floating point value or vector of all elements of
826/// that value.
827struct specific_fpval {
828 double Val;
829
830 specific_fpval(double V) : Val(V) {}
831
832 template <typename ITy> bool match(ITy *V) {
833 if (const auto *CFP = dyn_cast<ConstantFP>(V))
834 return CFP->isExactlyValue(Val);
835 if (V->getType()->isVectorTy())
836 if (const auto *C = dyn_cast<Constant>(V))
837 if (auto *CFP = dyn_cast_or_null<ConstantFP>(C->getSplatValue()))
838 return CFP->isExactlyValue(Val);
839 return false;
840 }
841};
842
843/// Match a specific floating point value or vector with all elements
844/// equal to the value.
845inline specific_fpval m_SpecificFP(double V) { return specific_fpval(V); }
846
847/// Match a float 1.0 or vector with all elements equal to 1.0.
848inline specific_fpval m_FPOne() { return m_SpecificFP(1.0); }
849
850struct bind_const_intval_ty {
851 uint64_t &VR;
852
853 bind_const_intval_ty(uint64_t &V) : VR(V) {}
854
855 template <typename ITy> bool match(ITy *V) {
856 if (const auto *CV = dyn_cast<ConstantInt>(V))
857 if (CV->getValue().ule(UINT64_MAX(18446744073709551615UL))) {
858 VR = CV->getZExtValue();
859 return true;
860 }
861 return false;
862 }
863};
864
865/// Match a specified integer value or vector of all elements of that
866/// value.
867template <bool AllowUndefs>
868struct specific_intval {
869 APInt Val;
870
871 specific_intval(APInt V) : Val(std::move(V)) {}
872
873 template <typename ITy> bool match(ITy *V) {
874 const auto *CI = dyn_cast<ConstantInt>(V);
875 if (!CI && V->getType()->isVectorTy())
876 if (const auto *C = dyn_cast<Constant>(V))
877 CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue(AllowUndefs));
878
879 return CI && APInt::isSameValue(CI->getValue(), Val);
880 }
881};
882
883/// Match a specific integer value or vector with all elements equal to
884/// the value.
885inline specific_intval<false> m_SpecificInt(APInt V) {
886 return specific_intval<false>(std::move(V));
887}
888
889inline specific_intval<false> m_SpecificInt(uint64_t V) {
890 return m_SpecificInt(APInt(64, V));
891}
892
893inline specific_intval<true> m_SpecificIntAllowUndef(APInt V) {
894 return specific_intval<true>(std::move(V));
895}
896
897inline specific_intval<true> m_SpecificIntAllowUndef(uint64_t V) {
898 return m_SpecificIntAllowUndef(APInt(64, V));
899}
900
901/// Match a ConstantInt and bind to its value. This does not match
902/// ConstantInts wider than 64-bits.
903inline bind_const_intval_ty m_ConstantInt(uint64_t &V) { return V; }
904
905/// Match a specified basic block value.
906struct specific_bbval {
907 BasicBlock *Val;
908
909 specific_bbval(BasicBlock *Val) : Val(Val) {}
910
911 template <typename ITy> bool match(ITy *V) {
912 const auto *BB = dyn_cast<BasicBlock>(V);
913 return BB && BB == Val;
914 }
915};
916
917/// Match a specific basic block value.
918inline specific_bbval m_SpecificBB(BasicBlock *BB) {
919 return specific_bbval(BB);
920}
921
922/// A commutative-friendly version of m_Specific().
923inline deferredval_ty<BasicBlock> m_Deferred(BasicBlock *const &BB) {
924 return BB;
925}
926inline deferredval_ty<const BasicBlock>
927m_Deferred(const BasicBlock *const &BB) {
928 return BB;
929}
930
931//===----------------------------------------------------------------------===//
932// Matcher for any binary operator.
933//
934template <typename LHS_t, typename RHS_t, bool Commutable = false>
935struct AnyBinaryOp_match {
936 LHS_t L;
937 RHS_t R;
938
939 // The evaluation order is always stable, regardless of Commutability.
940 // The LHS is always matched first.
941 AnyBinaryOp_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
942
943 template <typename OpTy> bool match(OpTy *V) {
944 if (auto *I = dyn_cast<BinaryOperator>(V))
945 return (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) ||
946 (Commutable && L.match(I->getOperand(1)) &&
947 R.match(I->getOperand(0)));
948 return false;
949 }
950};
951
952template <typename LHS, typename RHS>
953inline AnyBinaryOp_match<LHS, RHS> m_BinOp(const LHS &L, const RHS &R) {
954 return AnyBinaryOp_match<LHS, RHS>(L, R);
955}
956
957//===----------------------------------------------------------------------===//
958// Matcher for any unary operator.
959// TODO fuse unary, binary matcher into n-ary matcher
960//
961template <typename OP_t> struct AnyUnaryOp_match {
962 OP_t X;
963
964 AnyUnaryOp_match(const OP_t &X) : X(X) {}
965
966 template <typename OpTy> bool match(OpTy *V) {
967 if (auto *I = dyn_cast<UnaryOperator>(V))
968 return X.match(I->getOperand(0));
969 return false;
970 }
971};
972
973template <typename OP_t> inline AnyUnaryOp_match<OP_t> m_UnOp(const OP_t &X) {
974 return AnyUnaryOp_match<OP_t>(X);
975}
976
977//===----------------------------------------------------------------------===//
978// Matchers for specific binary operators.
979//
980
981template <typename LHS_t, typename RHS_t, unsigned Opcode,
982 bool Commutable = false>
983struct BinaryOp_match {
984 LHS_t L;
985 RHS_t R;
986
987 // The evaluation order is always stable, regardless of Commutability.
988 // The LHS is always matched first.
989 BinaryOp_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
990
991 template <typename OpTy> bool match(OpTy *V) {
992 if (V->getValueID() == Value::InstructionVal + Opcode) {
993 auto *I = cast<BinaryOperator>(V);
994 return (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) ||
995 (Commutable && L.match(I->getOperand(1)) &&
996 R.match(I->getOperand(0)));
997 }
998 if (auto *CE = dyn_cast<ConstantExpr>(V))
999 return CE->getOpcode() == Opcode &&
1000 ((L.match(CE->getOperand(0)) && R.match(CE->getOperand(1))) ||
1001 (Commutable && L.match(CE->getOperand(1)) &&
1002 R.match(CE->getOperand(0))));
1003 return false;
1004 }
1005};
1006
1007template <typename LHS, typename RHS>
1008inline BinaryOp_match<LHS, RHS, Instruction::Add> m_Add(const LHS &L,
1009 const RHS &R) {
1010 return BinaryOp_match<LHS, RHS, Instruction::Add>(L, R);
1011}
1012
1013template <typename LHS, typename RHS>
1014inline BinaryOp_match<LHS, RHS, Instruction::FAdd> m_FAdd(const LHS &L,
1015 const RHS &R) {
1016 return BinaryOp_match<LHS, RHS, Instruction::FAdd>(L, R);
1017}
1018
1019template <typename LHS, typename RHS>
1020inline BinaryOp_match<LHS, RHS, Instruction::Sub> m_Sub(const LHS &L,
1021 const RHS &R) {
1022 return BinaryOp_match<LHS, RHS, Instruction::Sub>(L, R);
1023}
1024
1025template <typename LHS, typename RHS>
1026inline BinaryOp_match<LHS, RHS, Instruction::FSub> m_FSub(const LHS &L,
1027 const RHS &R) {
1028 return BinaryOp_match<LHS, RHS, Instruction::FSub>(L, R);
1029}
1030
1031template <typename Op_t> struct FNeg_match {
1032 Op_t X;
1033
1034 FNeg_match(const Op_t &Op) : X(Op) {}
1035 template <typename OpTy> bool match(OpTy *V) {
1036 auto *FPMO = dyn_cast<FPMathOperator>(V);
1037 if (!FPMO) return false;
1038
1039 if (FPMO->getOpcode() == Instruction::FNeg)
1040 return X.match(FPMO->getOperand(0));
1041
1042 if (FPMO->getOpcode() == Instruction::FSub) {
1043 if (FPMO->hasNoSignedZeros()) {
1044 // With 'nsz', any zero goes.
1045 if (!cstfp_pred_ty<is_any_zero_fp>().match(FPMO->getOperand(0)))
1046 return false;
1047 } else {
1048 // Without 'nsz', we need fsub -0.0, X exactly.
1049 if (!cstfp_pred_ty<is_neg_zero_fp>().match(FPMO->getOperand(0)))
1050 return false;
1051 }
1052
1053 return X.match(FPMO->getOperand(1));
1054 }
1055
1056 return false;
1057 }
1058};
1059
1060/// Match 'fneg X' as 'fsub -0.0, X'.
1061template <typename OpTy>
1062inline FNeg_match<OpTy>
1063m_FNeg(const OpTy &X) {
1064 return FNeg_match<OpTy>(X);
1065}
1066
1067/// Match 'fneg X' as 'fsub +-0.0, X'.
1068template <typename RHS>
1069inline BinaryOp_match<cstfp_pred_ty<is_any_zero_fp>, RHS, Instruction::FSub>
1070m_FNegNSZ(const RHS &X) {
1071 return m_FSub(m_AnyZeroFP(), X);
1072}
1073
1074template <typename LHS, typename RHS>
1075inline BinaryOp_match<LHS, RHS, Instruction::Mul> m_Mul(const LHS &L,
1076 const RHS &R) {
1077 return BinaryOp_match<LHS, RHS, Instruction::Mul>(L, R);
1078}
1079
1080template <typename LHS, typename RHS>
1081inline BinaryOp_match<LHS, RHS, Instruction::FMul> m_FMul(const LHS &L,
1082 const RHS &R) {
1083 return BinaryOp_match<LHS, RHS, Instruction::FMul>(L, R);
1084}
1085
1086template <typename LHS, typename RHS>
1087inline BinaryOp_match<LHS, RHS, Instruction::UDiv> m_UDiv(const LHS &L,
1088 const RHS &R) {
1089 return BinaryOp_match<LHS, RHS, Instruction::UDiv>(L, R);
1090}
1091
1092template <typename LHS, typename RHS>
1093inline BinaryOp_match<LHS, RHS, Instruction::SDiv> m_SDiv(const LHS &L,
1094 const RHS &R) {
1095 return BinaryOp_match<LHS, RHS, Instruction::SDiv>(L, R);
1096}
1097
1098template <typename LHS, typename RHS>
1099inline BinaryOp_match<LHS, RHS, Instruction::FDiv> m_FDiv(const LHS &L,
1100 const RHS &R) {
1101 return BinaryOp_match<LHS, RHS, Instruction::FDiv>(L, R);
1102}
1103
1104template <typename LHS, typename RHS>
1105inline BinaryOp_match<LHS, RHS, Instruction::URem> m_URem(const LHS &L,
1106 const RHS &R) {
1107 return BinaryOp_match<LHS, RHS, Instruction::URem>(L, R);
1108}
1109
1110template <typename LHS, typename RHS>
1111inline BinaryOp_match<LHS, RHS, Instruction::SRem> m_SRem(const LHS &L,
1112 const RHS &R) {
1113 return BinaryOp_match<LHS, RHS, Instruction::SRem>(L, R);
1114}
1115
1116template <typename LHS, typename RHS>
1117inline BinaryOp_match<LHS, RHS, Instruction::FRem> m_FRem(const LHS &L,
1118 const RHS &R) {
1119 return BinaryOp_match<LHS, RHS, Instruction::FRem>(L, R);
1120}
1121
1122template <typename LHS, typename RHS>
1123inline BinaryOp_match<LHS, RHS, Instruction::And> m_And(const LHS &L,
1124 const RHS &R) {
1125 return BinaryOp_match<LHS, RHS, Instruction::And>(L, R);
1126}
1127
1128template <typename LHS, typename RHS>
1129inline BinaryOp_match<LHS, RHS, Instruction::Or> m_Or(const LHS &L,
1130 const RHS &R) {
1131 return BinaryOp_match<LHS, RHS, Instruction::Or>(L, R);
1132}
1133
1134template <typename LHS, typename RHS>
1135inline BinaryOp_match<LHS, RHS, Instruction::Xor> m_Xor(const LHS &L,
1136 const RHS &R) {
1137 return BinaryOp_match<LHS, RHS, Instruction::Xor>(L, R);
1138}
1139
1140template <typename LHS, typename RHS>
1141inline BinaryOp_match<LHS, RHS, Instruction::Shl> m_Shl(const LHS &L,
1142 const RHS &R) {
1143 return BinaryOp_match<LHS, RHS, Instruction::Shl>(L, R);
1144}
1145
1146template <typename LHS, typename RHS>
1147inline BinaryOp_match<LHS, RHS, Instruction::LShr> m_LShr(const LHS &L,
1148 const RHS &R) {
1149 return BinaryOp_match<LHS, RHS, Instruction::LShr>(L, R);
1150}
1151
1152template <typename LHS, typename RHS>
1153inline BinaryOp_match<LHS, RHS, Instruction::AShr> m_AShr(const LHS &L,
1154 const RHS &R) {
1155 return BinaryOp_match<LHS, RHS, Instruction::AShr>(L, R);
1156}
1157
1158template <typename LHS_t, typename RHS_t, unsigned Opcode,
1159 unsigned WrapFlags = 0>
1160struct OverflowingBinaryOp_match {
1161 LHS_t L;
1162 RHS_t R;
1163
1164 OverflowingBinaryOp_match(const LHS_t &LHS, const RHS_t &RHS)
1165 : L(LHS), R(RHS) {}
1166
1167 template <typename OpTy> bool match(OpTy *V) {
1168 if (auto *Op = dyn_cast<OverflowingBinaryOperator>(V)) {
1169 if (Op->getOpcode() != Opcode)
1170 return false;
1171 if ((WrapFlags & OverflowingBinaryOperator::NoUnsignedWrap) &&
1172 !Op->hasNoUnsignedWrap())
1173 return false;
1174 if ((WrapFlags & OverflowingBinaryOperator::NoSignedWrap) &&
1175 !Op->hasNoSignedWrap())
1176 return false;
1177 return L.match(Op->getOperand(0)) && R.match(Op->getOperand(1));
1178 }
1179 return false;
1180 }
1181};
1182
1183template <typename LHS, typename RHS>
1184inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
1185 OverflowingBinaryOperator::NoSignedWrap>
1186m_NSWAdd(const LHS &L, const RHS &R) {
1187 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
1188 OverflowingBinaryOperator::NoSignedWrap>(
1189 L, R);
1190}
1191template <typename LHS, typename RHS>
1192inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
1193 OverflowingBinaryOperator::NoSignedWrap>
1194m_NSWSub(const LHS &L, const RHS &R) {
1195 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
1196 OverflowingBinaryOperator::NoSignedWrap>(
1197 L, R);
1198}
1199template <typename LHS, typename RHS>
1200inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
1201 OverflowingBinaryOperator::NoSignedWrap>
1202m_NSWMul(const LHS &L, const RHS &R) {
1203 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
1204 OverflowingBinaryOperator::NoSignedWrap>(
1205 L, R);
1206}
1207template <typename LHS, typename RHS>
1208inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
1209 OverflowingBinaryOperator::NoSignedWrap>
1210m_NSWShl(const LHS &L, const RHS &R) {
1211 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
1212 OverflowingBinaryOperator::NoSignedWrap>(
1213 L, R);
1214}
1215
1216template <typename LHS, typename RHS>
1217inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
1218 OverflowingBinaryOperator::NoUnsignedWrap>
1219m_NUWAdd(const LHS &L, const RHS &R) {
1220 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
1221 OverflowingBinaryOperator::NoUnsignedWrap>(
1222 L, R);
1223}
1224template <typename LHS, typename RHS>
1225inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
1226 OverflowingBinaryOperator::NoUnsignedWrap>
1227m_NUWSub(const LHS &L, const RHS &R) {
1228 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
1229 OverflowingBinaryOperator::NoUnsignedWrap>(
1230 L, R);
1231}
1232template <typename LHS, typename RHS>
1233inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
1234 OverflowingBinaryOperator::NoUnsignedWrap>
1235m_NUWMul(const LHS &L, const RHS &R) {
1236 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
1237 OverflowingBinaryOperator::NoUnsignedWrap>(
1238 L, R);
1239}
1240template <typename LHS, typename RHS>
1241inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
1242 OverflowingBinaryOperator::NoUnsignedWrap>
1243m_NUWShl(const LHS &L, const RHS &R) {
1244 return OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
1245 OverflowingBinaryOperator::NoUnsignedWrap>(
1246 L, R);
1247}
1248
1249//===----------------------------------------------------------------------===//
1250// Class that matches a group of binary opcodes.
1251//
1252template <typename LHS_t, typename RHS_t, typename Predicate>
1253struct BinOpPred_match : Predicate {
1254 LHS_t L;
1255 RHS_t R;
1256
1257 BinOpPred_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
1258
1259 template <typename OpTy> bool match(OpTy *V) {
1260 if (auto *I = dyn_cast<Instruction>(V))
1261 return this->isOpType(I->getOpcode()) && L.match(I->getOperand(0)) &&
1262 R.match(I->getOperand(1));
1263 if (auto *CE = dyn_cast<ConstantExpr>(V))
1264 return this->isOpType(CE->getOpcode()) && L.match(CE->getOperand(0)) &&
1265 R.match(CE->getOperand(1));
1266 return false;
1267 }
1268};
1269
1270struct is_shift_op {
1271 bool isOpType(unsigned Opcode) { return Instruction::isShift(Opcode); }
1272};
1273
1274struct is_right_shift_op {
1275 bool isOpType(unsigned Opcode) {
1276 return Opcode == Instruction::LShr || Opcode == Instruction::AShr;
1277 }
1278};
1279
1280struct is_logical_shift_op {
1281 bool isOpType(unsigned Opcode) {
1282 return Opcode == Instruction::LShr || Opcode == Instruction::Shl;
1283 }
1284};
1285
1286struct is_bitwiselogic_op {
1287 bool isOpType(unsigned Opcode) {
1288 return Instruction::isBitwiseLogicOp(Opcode);
1289 }
1290};
1291
1292struct is_idiv_op {
1293 bool isOpType(unsigned Opcode) {
1294 return Opcode == Instruction::SDiv || Opcode == Instruction::UDiv;
1295 }
1296};
1297
1298struct is_irem_op {
1299 bool isOpType(unsigned Opcode) {
1300 return Opcode == Instruction::SRem || Opcode == Instruction::URem;
1301 }
1302};
1303
1304/// Matches shift operations.
1305template <typename LHS, typename RHS>
1306inline BinOpPred_match<LHS, RHS, is_shift_op> m_Shift(const LHS &L,
1307 const RHS &R) {
1308 return BinOpPred_match<LHS, RHS, is_shift_op>(L, R);
1309}
1310
1311/// Matches logical shift operations.
1312template <typename LHS, typename RHS>
1313inline BinOpPred_match<LHS, RHS, is_right_shift_op> m_Shr(const LHS &L,
1314 const RHS &R) {
1315 return BinOpPred_match<LHS, RHS, is_right_shift_op>(L, R);
1316}
1317
1318/// Matches logical shift operations.
1319template <typename LHS, typename RHS>
1320inline BinOpPred_match<LHS, RHS, is_logical_shift_op>
1321m_LogicalShift(const LHS &L, const RHS &R) {
1322 return BinOpPred_match<LHS, RHS, is_logical_shift_op>(L, R);
1323}
1324
1325/// Matches bitwise logic operations.
1326template <typename LHS, typename RHS>
1327inline BinOpPred_match<LHS, RHS, is_bitwiselogic_op>
1328m_BitwiseLogic(const LHS &L, const RHS &R) {
1329 return BinOpPred_match<LHS, RHS, is_bitwiselogic_op>(L, R);
1330}
1331
1332/// Matches integer division operations.
1333template <typename LHS, typename RHS>
1334inline BinOpPred_match<LHS, RHS, is_idiv_op> m_IDiv(const LHS &L,
1335 const RHS &R) {
1336 return BinOpPred_match<LHS, RHS, is_idiv_op>(L, R);
1337}
1338
1339/// Matches integer remainder operations.
1340template <typename LHS, typename RHS>
1341inline BinOpPred_match<LHS, RHS, is_irem_op> m_IRem(const LHS &L,
1342 const RHS &R) {
1343 return BinOpPred_match<LHS, RHS, is_irem_op>(L, R);
1344}
1345
1346//===----------------------------------------------------------------------===//
1347// Class that matches exact binary ops.
1348//
1349template <typename SubPattern_t> struct Exact_match {
1350 SubPattern_t SubPattern;
1351
1352 Exact_match(const SubPattern_t &SP) : SubPattern(SP) {}
1353
1354 template <typename OpTy> bool match(OpTy *V) {
1355 if (auto *PEO = dyn_cast<PossiblyExactOperator>(V))
1356 return PEO->isExact() && SubPattern.match(V);
1357 return false;
1358 }
1359};
1360
1361template <typename T> inline Exact_match<T> m_Exact(const T &SubPattern) {
1362 return SubPattern;
1363}
1364
1365//===----------------------------------------------------------------------===//
1366// Matchers for CmpInst classes
1367//
1368
1369template <typename LHS_t, typename RHS_t, typename Class, typename PredicateTy,
1370 bool Commutable = false>
1371struct CmpClass_match {
1372 PredicateTy &Predicate;
1373 LHS_t L;
1374 RHS_t R;
1375
1376 // The evaluation order is always stable, regardless of Commutability.
1377 // The LHS is always matched first.
1378 CmpClass_match(PredicateTy &Pred, const LHS_t &LHS, const RHS_t &RHS)
1379 : Predicate(Pred), L(LHS), R(RHS) {}
1380
1381 template <typename OpTy> bool match(OpTy *V) {
1382 if (auto *I = dyn_cast<Class>(V)) {
1383 if (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) {
1384 Predicate = I->getPredicate();
1385 return true;
1386 } else if (Commutable && L.match(I->getOperand(1)) &&
1387 R.match(I->getOperand(0))) {
1388 Predicate = I->getSwappedPredicate();
1389 return true;
1390 }
1391 }
1392 return false;
1393 }
1394};
1395
1396template <typename LHS, typename RHS>
1397inline CmpClass_match<LHS, RHS, CmpInst, CmpInst::Predicate>
1398m_Cmp(CmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
1399 return CmpClass_match<LHS, RHS, CmpInst, CmpInst::Predicate>(Pred, L, R);
1400}
1401
1402template <typename LHS, typename RHS>
1403inline CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>
1404m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
1405 return CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>(Pred, L, R);
1406}
1407
1408template <typename LHS, typename RHS>
1409inline CmpClass_match<LHS, RHS, FCmpInst, FCmpInst::Predicate>
1410m_FCmp(FCmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
1411 return CmpClass_match<LHS, RHS, FCmpInst, FCmpInst::Predicate>(Pred, L, R);
1412}
1413
1414//===----------------------------------------------------------------------===//
1415// Matchers for instructions with a given opcode and number of operands.
1416//
1417
1418/// Matches instructions with Opcode and three operands.
1419template <typename T0, unsigned Opcode> struct OneOps_match {
1420 T0 Op1;
1421
1422 OneOps_match(const T0 &Op1) : Op1(Op1) {}
1423
1424 template <typename OpTy> bool match(OpTy *V) {
1425 if (V->getValueID() == Value::InstructionVal + Opcode) {
1426 auto *I = cast<Instruction>(V);
1427 return Op1.match(I->getOperand(0));
1428 }
1429 return false;
1430 }
1431};
1432
1433/// Matches instructions with Opcode and three operands.
1434template <typename T0, typename T1, unsigned Opcode> struct TwoOps_match {
1435 T0 Op1;
1436 T1 Op2;
1437
1438 TwoOps_match(const T0 &Op1, const T1 &Op2) : Op1(Op1), Op2(Op2) {}
1439
1440 template <typename OpTy> bool match(OpTy *V) {
1441 if (V->getValueID() == Value::InstructionVal + Opcode) {
1442 auto *I = cast<Instruction>(V);
1443 return Op1.match(I->getOperand(0)) && Op2.match(I->getOperand(1));
1444 }
1445 return false;
1446 }
1447};
1448
1449/// Matches instructions with Opcode and three operands.
1450template <typename T0, typename T1, typename T2, unsigned Opcode>
1451struct ThreeOps_match {
1452 T0 Op1;
1453 T1 Op2;
1454 T2 Op3;
1455
1456 ThreeOps_match(const T0 &Op1, const T1 &Op2, const T2 &Op3)
1457 : Op1(Op1), Op2(Op2), Op3(Op3) {}
1458
1459 template <typename OpTy> bool match(OpTy *V) {
1460 if (V->getValueID() == Value::InstructionVal + Opcode) {
1461 auto *I = cast<Instruction>(V);
1462 return Op1.match(I->getOperand(0)) && Op2.match(I->getOperand(1)) &&
1463 Op3.match(I->getOperand(2));
1464 }
1465 return false;
1466 }
1467};
1468
1469/// Matches SelectInst.
1470template <typename Cond, typename LHS, typename RHS>
1471inline ThreeOps_match<Cond, LHS, RHS, Instruction::Select>
1472m_Select(const Cond &C, const LHS &L, const RHS &R) {
1473 return ThreeOps_match<Cond, LHS, RHS, Instruction::Select>(C, L, R);
1474}
1475
1476/// This matches a select of two constants, e.g.:
1477/// m_SelectCst<-1, 0>(m_Value(V))
1478template <int64_t L, int64_t R, typename Cond>
1479inline ThreeOps_match<Cond, constantint_match<L>, constantint_match<R>,
1480 Instruction::Select>
1481m_SelectCst(const Cond &C) {
1482 return m_Select(C, m_ConstantInt<L>(), m_ConstantInt<R>());
1483}
1484
1485/// Matches FreezeInst.
1486template <typename OpTy>
1487inline OneOps_match<OpTy, Instruction::Freeze> m_Freeze(const OpTy &Op) {
1488 return OneOps_match<OpTy, Instruction::Freeze>(Op);
1489}
1490
1491/// Matches InsertElementInst.
1492template <typename Val_t, typename Elt_t, typename Idx_t>
1493inline ThreeOps_match<Val_t, Elt_t, Idx_t, Instruction::InsertElement>
1494m_InsertElt(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx) {
1495 return ThreeOps_match<Val_t, Elt_t, Idx_t, Instruction::InsertElement>(
1496 Val, Elt, Idx);
1497}
1498
1499/// Matches ExtractElementInst.
1500template <typename Val_t, typename Idx_t>
1501inline TwoOps_match<Val_t, Idx_t, Instruction::ExtractElement>
1502m_ExtractElt(const Val_t &Val, const Idx_t &Idx) {
1503 return TwoOps_match<Val_t, Idx_t, Instruction::ExtractElement>(Val, Idx);
1504}
1505
1506/// Matches shuffle.
1507template <typename T0, typename T1, typename T2> struct Shuffle_match {
1508 T0 Op1;
1509 T1 Op2;
1510 T2 Mask;
1511
1512 Shuffle_match(const T0 &Op1, const T1 &Op2, const T2 &Mask)
1513 : Op1(Op1), Op2(Op2), Mask(Mask) {}
1514
1515 template <typename OpTy> bool match(OpTy *V) {
1516 if (auto *I = dyn_cast<ShuffleVectorInst>(V)) {
1517 return Op1.match(I->getOperand(0)) && Op2.match(I->getOperand(1)) &&
1518 Mask.match(I->getShuffleMask());
1519 }
1520 return false;
1521 }
1522};
1523
1524struct m_Mask {
1525 ArrayRef<int> &MaskRef;
1526 m_Mask(ArrayRef<int> &MaskRef) : MaskRef(MaskRef) {}
1527 bool match(ArrayRef<int> Mask) {
1528 MaskRef = Mask;
1529 return true;
1530 }
1531};
1532
1533struct m_ZeroMask {
1534 bool match(ArrayRef<int> Mask) {
1535 return all_of(Mask, [](int Elem) { return Elem == 0 || Elem == -1; });
1536 }
1537};
1538
1539struct m_SpecificMask {
1540 ArrayRef<int> &MaskRef;
1541 m_SpecificMask(ArrayRef<int> &MaskRef) : MaskRef(MaskRef) {}
1542 bool match(ArrayRef<int> Mask) { return MaskRef == Mask; }
1543};
1544
1545struct m_SplatOrUndefMask {
1546 int &SplatIndex;
1547 m_SplatOrUndefMask(int &SplatIndex) : SplatIndex(SplatIndex) {}
1548 bool match(ArrayRef<int> Mask) {
1549 auto First = find_if(Mask, [](int Elem) { return Elem != -1; });
1550 if (First == Mask.end())
1551 return false;
1552 SplatIndex = *First;
1553 return all_of(Mask,
1554 [First](int Elem) { return Elem == *First || Elem == -1; });
1555 }
1556};
1557
1558/// Matches ShuffleVectorInst independently of mask value.
1559template <typename V1_t, typename V2_t>
1560inline TwoOps_match<V1_t, V2_t, Instruction::ShuffleVector>
1561m_Shuffle(const V1_t &v1, const V2_t &v2) {
1562 return TwoOps_match<V1_t, V2_t, Instruction::ShuffleVector>(v1, v2);
1563}
1564
1565template <typename V1_t, typename V2_t, typename Mask_t>
1566inline Shuffle_match<V1_t, V2_t, Mask_t>
1567m_Shuffle(const V1_t &v1, const V2_t &v2, const Mask_t &mask) {
1568 return Shuffle_match<V1_t, V2_t, Mask_t>(v1, v2, mask);
1569}
1570
1571/// Matches LoadInst.
1572template <typename OpTy>
1573inline OneOps_match<OpTy, Instruction::Load> m_Load(const OpTy &Op) {
1574 return OneOps_match<OpTy, Instruction::Load>(Op);
1575}
1576
1577/// Matches StoreInst.
1578template <typename ValueOpTy, typename PointerOpTy>
1579inline TwoOps_match<ValueOpTy, PointerOpTy, Instruction::Store>
1580m_Store(const ValueOpTy &ValueOp, const PointerOpTy &PointerOp) {
1581 return TwoOps_match<ValueOpTy, PointerOpTy, Instruction::Store>(ValueOp,
1582 PointerOp);
1583}
1584
1585//===----------------------------------------------------------------------===//
1586// Matchers for CastInst classes
1587//
1588
1589template <typename Op_t, unsigned Opcode> struct CastClass_match {
1590 Op_t Op;
1591
1592 CastClass_match(const Op_t &OpMatch) : Op(OpMatch) {}
1593
1594 template <typename OpTy> bool match(OpTy *V) {
1595 if (auto *O = dyn_cast<Operator>(V))
1596 return O->getOpcode() == Opcode && Op.match(O->getOperand(0));
1597 return false;
1598 }
1599};
1600
1601/// Matches BitCast.
1602template <typename OpTy>
1603inline CastClass_match<OpTy, Instruction::BitCast> m_BitCast(const OpTy &Op) {
1604 return CastClass_match<OpTy, Instruction::BitCast>(Op);
1605}
1606
1607/// Matches PtrToInt.
1608template <typename OpTy>
1609inline CastClass_match<OpTy, Instruction::PtrToInt> m_PtrToInt(const OpTy &Op) {
1610 return CastClass_match<OpTy, Instruction::PtrToInt>(Op);
1611}
1612
1613/// Matches IntToPtr.
1614template <typename OpTy>
1615inline CastClass_match<OpTy, Instruction::IntToPtr> m_IntToPtr(const OpTy &Op) {
1616 return CastClass_match<OpTy, Instruction::IntToPtr>(Op);
1617}
1618
1619/// Matches Trunc.
1620template <typename OpTy>
1621inline CastClass_match<OpTy, Instruction::Trunc> m_Trunc(const OpTy &Op) {
1622 return CastClass_match<OpTy, Instruction::Trunc>(Op);
1623}
1624
1625template <typename OpTy>
1626inline match_combine_or<CastClass_match<OpTy, Instruction::Trunc>, OpTy>
1627m_TruncOrSelf(const OpTy &Op) {
1628 return m_CombineOr(m_Trunc(Op), Op);
1629}
1630
1631/// Matches SExt.
1632template <typename OpTy>
1633inline CastClass_match<OpTy, Instruction::SExt> m_SExt(const OpTy &Op) {
1634 return CastClass_match<OpTy, Instruction::SExt>(Op);
1635}
1636
1637/// Matches ZExt.
1638template <typename OpTy>
1639inline CastClass_match<OpTy, Instruction::ZExt> m_ZExt(const OpTy &Op) {
1640 return CastClass_match<OpTy, Instruction::ZExt>(Op);
1641}
1642
1643template <typename OpTy>
1644inline match_combine_or<CastClass_match<OpTy, Instruction::ZExt>, OpTy>
1645m_ZExtOrSelf(const OpTy &Op) {
1646 return m_CombineOr(m_ZExt(Op), Op);
1647}
1648
1649template <typename OpTy>
1650inline match_combine_or<CastClass_match<OpTy, Instruction::SExt>, OpTy>
1651m_SExtOrSelf(const OpTy &Op) {
1652 return m_CombineOr(m_SExt(Op), Op);
1653}
1654
1655template <typename OpTy>
1656inline match_combine_or<CastClass_match<OpTy, Instruction::ZExt>,
1657 CastClass_match<OpTy, Instruction::SExt>>
1658m_ZExtOrSExt(const OpTy &Op) {
1659 return m_CombineOr(m_ZExt(Op), m_SExt(Op));
1660}
1661
1662template <typename OpTy>
1663inline match_combine_or<
1664 match_combine_or<CastClass_match<OpTy, Instruction::ZExt>,
1665 CastClass_match<OpTy, Instruction::SExt>>,
1666 OpTy>
1667m_ZExtOrSExtOrSelf(const OpTy &Op) {
1668 return m_CombineOr(m_ZExtOrSExt(Op), Op);
1669}
1670
1671template <typename OpTy>
1672inline CastClass_match<OpTy, Instruction::UIToFP> m_UIToFP(const OpTy &Op) {
1673 return CastClass_match<OpTy, Instruction::UIToFP>(Op);
1674}
1675
1676template <typename OpTy>
1677inline CastClass_match<OpTy, Instruction::SIToFP> m_SIToFP(const OpTy &Op) {
1678 return CastClass_match<OpTy, Instruction::SIToFP>(Op);
1679}
1680
1681template <typename OpTy>
1682inline CastClass_match<OpTy, Instruction::FPToUI> m_FPToUI(const OpTy &Op) {
1683 return CastClass_match<OpTy, Instruction::FPToUI>(Op);
1684}
1685
1686template <typename OpTy>
1687inline CastClass_match<OpTy, Instruction::FPToSI> m_FPToSI(const OpTy &Op) {
1688 return CastClass_match<OpTy, Instruction::FPToSI>(Op);
1689}
1690
1691template <typename OpTy>
1692inline CastClass_match<OpTy, Instruction::FPTrunc> m_FPTrunc(const OpTy &Op) {
1693 return CastClass_match<OpTy, Instruction::FPTrunc>(Op);
1694}
1695
1696template <typename OpTy>
1697inline CastClass_match<OpTy, Instruction::FPExt> m_FPExt(const OpTy &Op) {
1698 return CastClass_match<OpTy, Instruction::FPExt>(Op);
1699}
1700
1701//===----------------------------------------------------------------------===//
1702// Matchers for control flow.
1703//
1704
1705struct br_match {
1706 BasicBlock *&Succ;
1707
1708 br_match(BasicBlock *&Succ) : Succ(Succ) {}
1709
1710 template <typename OpTy> bool match(OpTy *V) {
1711 if (auto *BI = dyn_cast<BranchInst>(V))
1712 if (BI->isUnconditional()) {
1713 Succ = BI->getSuccessor(0);
1714 return true;
1715 }
1716 return false;
1717 }
1718};
1719
1720inline br_match m_UnconditionalBr(BasicBlock *&Succ) { return br_match(Succ); }
1721
1722template <typename Cond_t, typename TrueBlock_t, typename FalseBlock_t>
1723struct brc_match {
1724 Cond_t Cond;
1725 TrueBlock_t T;
1726 FalseBlock_t F;
1727
1728 brc_match(const Cond_t &C, const TrueBlock_t &t, const FalseBlock_t &f)
1729 : Cond(C), T(t), F(f) {}
1730
1731 template <typename OpTy> bool match(OpTy *V) {
1732 if (auto *BI = dyn_cast<BranchInst>(V))
1733 if (BI->isConditional() && Cond.match(BI->getCondition()))
1734 return T.match(BI->getSuccessor(0)) && F.match(BI->getSuccessor(1));
1735 return false;
1736 }
1737};
1738
1739template <typename Cond_t>
1740inline brc_match<Cond_t, bind_ty<BasicBlock>, bind_ty<BasicBlock>>
1741m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F) {
1742 return brc_match<Cond_t, bind_ty<BasicBlock>, bind_ty<BasicBlock>>(
1743 C, m_BasicBlock(T), m_BasicBlock(F));
1744}
1745
1746template <typename Cond_t, typename TrueBlock_t, typename FalseBlock_t>
1747inline brc_match<Cond_t, TrueBlock_t, FalseBlock_t>
1748m_Br(const Cond_t &C, const TrueBlock_t &T, const FalseBlock_t &F) {
1749 return brc_match<Cond_t, TrueBlock_t, FalseBlock_t>(C, T, F);
1750}
1751
1752//===----------------------------------------------------------------------===//
1753// Matchers for max/min idioms, eg: "select (sgt x, y), x, y" -> smax(x,y).
1754//
1755
1756template <typename CmpInst_t, typename LHS_t, typename RHS_t, typename Pred_t,
1757 bool Commutable = false>
1758struct MaxMin_match {
1759 using PredType = Pred_t;
1760 LHS_t L;
1761 RHS_t R;
1762
1763 // The evaluation order is always stable, regardless of Commutability.
1764 // The LHS is always matched first.
1765 MaxMin_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
1766
1767 template <typename OpTy> bool match(OpTy *V) {
1768 if (auto *II = dyn_cast<IntrinsicInst>(V)) {
1769 Intrinsic::ID IID = II->getIntrinsicID();
1770 if ((IID == Intrinsic::smax && Pred_t::match(ICmpInst::ICMP_SGT)) ||
1771 (IID == Intrinsic::smin && Pred_t::match(ICmpInst::ICMP_SLT)) ||
1772 (IID == Intrinsic::umax && Pred_t::match(ICmpInst::ICMP_UGT)) ||
1773 (IID == Intrinsic::umin && Pred_t::match(ICmpInst::ICMP_ULT))) {
1774 Value *LHS = II->getOperand(0), *RHS = II->getOperand(1);
1775 return (L.match(LHS) && R.match(RHS)) ||
1776 (Commutable && L.match(RHS) && R.match(LHS));
1777 }
1778 }
1779 // Look for "(x pred y) ? x : y" or "(x pred y) ? y : x".
1780 auto *SI = dyn_cast<SelectInst>(V);
1781 if (!SI)
1782 return false;
1783 auto *Cmp = dyn_cast<CmpInst_t>(SI->getCondition());
1784 if (!Cmp)
1785 return false;
1786 // At this point we have a select conditioned on a comparison. Check that
1787 // it is the values returned by the select that are being compared.
1788 auto *TrueVal = SI->getTrueValue();
1789 auto *FalseVal = SI->getFalseValue();
1790 auto *LHS = Cmp->getOperand(0);
1791 auto *RHS = Cmp->getOperand(1);
1792 if ((TrueVal != LHS || FalseVal != RHS) &&
1793 (TrueVal != RHS || FalseVal != LHS))
1794 return false;
1795 typename CmpInst_t::Predicate Pred =
1796 LHS == TrueVal ? Cmp->getPredicate() : Cmp->getInversePredicate();
1797 // Does "(x pred y) ? x : y" represent the desired max/min operation?
1798 if (!Pred_t::match(Pred))
1799 return false;
1800 // It does! Bind the operands.
1801 return (L.match(LHS) && R.match(RHS)) ||
1802 (Commutable && L.match(RHS) && R.match(LHS));
1803 }
1804};
1805
1806/// Helper class for identifying signed max predicates.
1807struct smax_pred_ty {
1808 static bool match(ICmpInst::Predicate Pred) {
1809 return Pred == CmpInst::ICMP_SGT || Pred == CmpInst::ICMP_SGE;
1810 }
1811};
1812
1813/// Helper class for identifying signed min predicates.
1814struct smin_pred_ty {
1815 static bool match(ICmpInst::Predicate Pred) {
1816 return Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_SLE;
1817 }
1818};
1819
1820/// Helper class for identifying unsigned max predicates.
1821struct umax_pred_ty {
1822 static bool match(ICmpInst::Predicate Pred) {
1823 return Pred == CmpInst::ICMP_UGT || Pred == CmpInst::ICMP_UGE;
1824 }
1825};
1826
1827/// Helper class for identifying unsigned min predicates.
1828struct umin_pred_ty {
1829 static bool match(ICmpInst::Predicate Pred) {
1830 return Pred == CmpInst::ICMP_ULT || Pred == CmpInst::ICMP_ULE;
1831 }
1832};
1833
1834/// Helper class for identifying ordered max predicates.
1835struct ofmax_pred_ty {
1836 static bool match(FCmpInst::Predicate Pred) {
1837 return Pred == CmpInst::FCMP_OGT || Pred == CmpInst::FCMP_OGE;
1838 }
1839};
1840
1841/// Helper class for identifying ordered min predicates.
1842struct ofmin_pred_ty {
1843 static bool match(FCmpInst::Predicate Pred) {
1844 return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE;
1845 }
1846};
1847
1848/// Helper class for identifying unordered max predicates.
1849struct ufmax_pred_ty {
1850 static bool match(FCmpInst::Predicate Pred) {
1851 return Pred == CmpInst::FCMP_UGT || Pred == CmpInst::FCMP_UGE;
1852 }
1853};
1854
1855/// Helper class for identifying unordered min predicates.
1856struct ufmin_pred_ty {
1857 static bool match(FCmpInst::Predicate Pred) {
1858 return Pred == CmpInst::FCMP_ULT || Pred == CmpInst::FCMP_ULE;
1859 }
1860};
1861
1862template <typename LHS, typename RHS>
1863inline MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty> m_SMax(const LHS &L,
1864 const RHS &R) {
1865 return MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty>(L, R);
1866}
1867
1868template <typename LHS, typename RHS>
1869inline MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty> m_SMin(const LHS &L,
1870 const RHS &R) {
1871 return MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty>(L, R);
1872}
1873
1874template <typename LHS, typename RHS>
1875inline MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty> m_UMax(const LHS &L,
1876 const RHS &R) {
1877 return MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty>(L, R);
1878}
1879
1880template <typename LHS, typename RHS>
1881inline MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty> m_UMin(const LHS &L,
1882 const RHS &R) {
1883 return MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty>(L, R);
1884}
1885
1886template <typename LHS, typename RHS>
1887inline match_combine_or<
1888 match_combine_or<MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty>,
1889 MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty>>,
1890 match_combine_or<MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty>,
1891 MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty>>>
1892m_MaxOrMin(const LHS &L, const RHS &R) {
1893 return m_CombineOr(m_CombineOr(m_SMax(L, R), m_SMin(L, R)),
1894 m_CombineOr(m_UMax(L, R), m_UMin(L, R)));
1895}
1896
1897/// Match an 'ordered' floating point maximum function.
1898/// Floating point has one special value 'NaN'. Therefore, there is no total
1899/// order. However, if we can ignore the 'NaN' value (for example, because of a
1900/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'maximum'
1901/// semantics. In the presence of 'NaN' we have to preserve the original
1902/// select(fcmp(ogt/ge, L, R), L, R) semantics matched by this predicate.
1903///
1904/// max(L, R) iff L and R are not NaN
1905/// m_OrdFMax(L, R) = R iff L or R are NaN
1906template <typename LHS, typename RHS>
1907inline MaxMin_match<FCmpInst, LHS, RHS, ofmax_pred_ty> m_OrdFMax(const LHS &L,
1908 const RHS &R) {
1909 return MaxMin_match<FCmpInst, LHS, RHS, ofmax_pred_ty>(L, R);
1910}
1911
1912/// Match an 'ordered' floating point minimum function.
1913/// Floating point has one special value 'NaN'. Therefore, there is no total
1914/// order. However, if we can ignore the 'NaN' value (for example, because of a
1915/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'minimum'
1916/// semantics. In the presence of 'NaN' we have to preserve the original
1917/// select(fcmp(olt/le, L, R), L, R) semantics matched by this predicate.
1918///
1919/// min(L, R) iff L and R are not NaN
1920/// m_OrdFMin(L, R) = R iff L or R are NaN
1921template <typename LHS, typename RHS>
1922inline MaxMin_match<FCmpInst, LHS, RHS, ofmin_pred_ty> m_OrdFMin(const LHS &L,
1923 const RHS &R) {
1924 return MaxMin_match<FCmpInst, LHS, RHS, ofmin_pred_ty>(L, R);
1925}
1926
1927/// Match an 'unordered' floating point maximum function.
1928/// Floating point has one special value 'NaN'. Therefore, there is no total
1929/// order. However, if we can ignore the 'NaN' value (for example, because of a
1930/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'maximum'
1931/// semantics. In the presence of 'NaN' we have to preserve the original
1932/// select(fcmp(ugt/ge, L, R), L, R) semantics matched by this predicate.
1933///
1934/// max(L, R) iff L and R are not NaN
1935/// m_UnordFMax(L, R) = L iff L or R are NaN
1936template <typename LHS, typename RHS>
1937inline MaxMin_match<FCmpInst, LHS, RHS, ufmax_pred_ty>
1938m_UnordFMax(const LHS &L, const RHS &R) {
1939 return MaxMin_match<FCmpInst, LHS, RHS, ufmax_pred_ty>(L, R);
1940}
1941
1942/// Match an 'unordered' floating point minimum function.
1943/// Floating point has one special value 'NaN'. Therefore, there is no total
1944/// order. However, if we can ignore the 'NaN' value (for example, because of a
1945/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'minimum'
1946/// semantics. In the presence of 'NaN' we have to preserve the original
1947/// select(fcmp(ult/le, L, R), L, R) semantics matched by this predicate.
1948///
1949/// min(L, R) iff L and R are not NaN
1950/// m_UnordFMin(L, R) = L iff L or R are NaN
1951template <typename LHS, typename RHS>
1952inline MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>
1953m_UnordFMin(const LHS &L, const RHS &R) {
1954 return MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>(L, R);
1955}
1956
1957//===----------------------------------------------------------------------===//
1958// Matchers for overflow check patterns: e.g. (a + b) u< a, (a ^ -1) <u b
1959// Note that S might be matched to other instructions than AddInst.
1960//
1961
1962template <typename LHS_t, typename RHS_t, typename Sum_t>
1963struct UAddWithOverflow_match {
1964 LHS_t L;
1965 RHS_t R;
1966 Sum_t S;
1967
1968 UAddWithOverflow_match(const LHS_t &L, const RHS_t &R, const Sum_t &S)
1969 : L(L), R(R), S(S) {}
1970
1971 template <typename OpTy> bool match(OpTy *V) {
1972 Value *ICmpLHS, *ICmpRHS;
1973 ICmpInst::Predicate Pred;
1974 if (!m_ICmp(Pred, m_Value(ICmpLHS), m_Value(ICmpRHS)).match(V))
1975 return false;
1976
1977 Value *AddLHS, *AddRHS;
1978 auto AddExpr = m_Add(m_Value(AddLHS), m_Value(AddRHS));
1979
1980 // (a + b) u< a, (a + b) u< b
1981 if (Pred == ICmpInst::ICMP_ULT)
1982 if (AddExpr.match(ICmpLHS) && (ICmpRHS == AddLHS || ICmpRHS == AddRHS))
1983 return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpLHS);
1984
1985 // a >u (a + b), b >u (a + b)
1986 if (Pred == ICmpInst::ICMP_UGT)
1987 if (AddExpr.match(ICmpRHS) && (ICmpLHS == AddLHS || ICmpLHS == AddRHS))
1988 return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpRHS);
1989
1990 Value *Op1;
1991 auto XorExpr = m_OneUse(m_Xor(m_Value(Op1), m_AllOnes()));
1992 // (a ^ -1) <u b
1993 if (Pred == ICmpInst::ICMP_ULT) {
1994 if (XorExpr.match(ICmpLHS))
1995 return L.match(Op1) && R.match(ICmpRHS) && S.match(ICmpLHS);
1996 }
1997 // b > u (a ^ -1)
1998 if (Pred == ICmpInst::ICMP_UGT) {
1999 if (XorExpr.match(ICmpRHS))
2000 return L.match(Op1) && R.match(ICmpLHS) && S.match(ICmpRHS);
2001 }
2002
2003 // Match special-case for increment-by-1.
2004 if (Pred == ICmpInst::ICMP_EQ) {
2005 // (a + 1) == 0
2006 // (1 + a) == 0
2007 if (AddExpr.match(ICmpLHS) && m_ZeroInt().match(ICmpRHS) &&
2008 (m_One().match(AddLHS) || m_One().match(AddRHS)))
2009 return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpLHS);
2010 // 0 == (a + 1)
2011 // 0 == (1 + a)
2012 if (m_ZeroInt().match(ICmpLHS) && AddExpr.match(ICmpRHS) &&
2013 (m_One().match(AddLHS) || m_One().match(AddRHS)))
2014 return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpRHS);
2015 }
2016
2017 return false;
2018 }
2019};
2020
2021/// Match an icmp instruction checking for unsigned overflow on addition.
2022///
2023/// S is matched to the addition whose result is being checked for overflow, and
2024/// L and R are matched to the LHS and RHS of S.
2025template <typename LHS_t, typename RHS_t, typename Sum_t>
2026UAddWithOverflow_match<LHS_t, RHS_t, Sum_t>
2027m_UAddWithOverflow(const LHS_t &L, const RHS_t &R, const Sum_t &S) {
2028 return UAddWithOverflow_match<LHS_t, RHS_t, Sum_t>(L, R, S);
2029}
2030
2031template <typename Opnd_t> struct Argument_match {
2032 unsigned OpI;
2033 Opnd_t Val;
2034
2035 Argument_match(unsigned OpIdx, const Opnd_t &V) : OpI(OpIdx), Val(V) {}
2036
2037 template <typename OpTy> bool match(OpTy *V) {
2038 // FIXME: Should likely be switched to use `CallBase`.
2039 if (const auto *CI = dyn_cast<CallInst>(V))
2040 return Val.match(CI->getArgOperand(OpI));
2041 return false;
2042 }
2043};
2044
2045/// Match an argument.
2046template <unsigned OpI, typename Opnd_t>
2047inline Argument_match<Opnd_t> m_Argument(const Opnd_t &Op) {
2048 return Argument_match<Opnd_t>(OpI, Op);
2049}
2050
2051/// Intrinsic matchers.
2052struct IntrinsicID_match {
2053 unsigned ID;
2054
2055 IntrinsicID_match(Intrinsic::ID IntrID) : ID(IntrID) {}
2056
2057 template <typename OpTy> bool match(OpTy *V) {
2058 if (const auto *CI
37.1
'CI' is null
46.1
'CI' is null
37.1
'CI' is null
46.1
'CI' is null
37.1
'CI' is null
46.1
'CI' is null
37.1
'CI' is null
46.1
'CI' is null
= dyn_cast<CallInst>(V))
37
Assuming 'V' is not a 'CallInst'
38
Taking false branch
46
'V' is not a 'CallInst'
47
Taking false branch
2059 if (const auto *F = CI->getCalledFunction())
2060 return F->getIntrinsicID() == ID;
2061 return false;
39
Returning zero, which participates in a condition later
48
Returning zero, which participates in a condition later
2062 }
2063};
2064
2065/// Intrinsic matches are combinations of ID matchers, and argument
2066/// matchers. Higher arity matcher are defined recursively in terms of and-ing
2067/// them with lower arity matchers. Here's some convenient typedefs for up to
2068/// several arguments, and more can be added as needed
2069template <typename T0 = void, typename T1 = void, typename T2 = void,
2070 typename T3 = void, typename T4 = void, typename T5 = void,
2071 typename T6 = void, typename T7 = void, typename T8 = void,
2072 typename T9 = void, typename T10 = void>
2073struct m_Intrinsic_Ty;
2074template <typename T0> struct m_Intrinsic_Ty<T0> {
2075 using Ty = match_combine_and<IntrinsicID_match, Argument_match<T0>>;
2076};
2077template <typename T0, typename T1> struct m_Intrinsic_Ty<T0, T1> {
2078 using Ty =
2079 match_combine_and<typename m_Intrinsic_Ty<T0>::Ty, Argument_match<T1>>;
2080};
2081template <typename T0, typename T1, typename T2>
2082struct m_Intrinsic_Ty<T0, T1, T2> {
2083 using Ty =
2084 match_combine_and<typename m_Intrinsic_Ty<T0, T1>::Ty,
2085 Argument_match<T2>>;
2086};
2087template <typename T0, typename T1, typename T2, typename T3>
2088struct m_Intrinsic_Ty<T0, T1, T2, T3> {
2089 using Ty =
2090 match_combine_and<typename m_Intrinsic_Ty<T0, T1, T2>::Ty,
2091 Argument_match<T3>>;
2092};
2093
2094template <typename T0, typename T1, typename T2, typename T3, typename T4>
2095struct m_Intrinsic_Ty<T0, T1, T2, T3, T4> {
2096 using Ty = match_combine_and<typename m_Intrinsic_Ty<T0, T1, T2, T3>::Ty,
2097 Argument_match<T4>>;
2098};
2099
2100template <typename T0, typename T1, typename T2, typename T3, typename T4, typename T5>
2101struct m_Intrinsic_Ty<T0, T1, T2, T3, T4, T5> {
2102 using Ty = match_combine_and<typename m_Intrinsic_Ty<T0, T1, T2, T3, T4>::Ty,
2103 Argument_match<T5>>;
2104};
2105
2106/// Match intrinsic calls like this:
2107/// m_Intrinsic<Intrinsic::fabs>(m_Value(X))
2108template <Intrinsic::ID IntrID> inline IntrinsicID_match m_Intrinsic() {
2109 return IntrinsicID_match(IntrID);
2110}
2111
2112/// Matches MaskedLoad Intrinsic.
2113template <typename Opnd0, typename Opnd1, typename Opnd2, typename Opnd3>
2114inline typename m_Intrinsic_Ty<Opnd0, Opnd1, Opnd2, Opnd3>::Ty
2115m_MaskedLoad(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2,
2116 const Opnd3 &Op3) {
2117 return m_Intrinsic<Intrinsic::masked_load>(Op0, Op1, Op2, Op3);
2118}
2119
2120template <Intrinsic::ID IntrID, typename T0>
2121inline typename m_Intrinsic_Ty<T0>::Ty m_Intrinsic(const T0 &Op0) {
2122 return m_CombineAnd(m_Intrinsic<IntrID>(), m_Argument<0>(Op0));
2123}
2124
2125template <Intrinsic::ID IntrID, typename T0, typename T1>
2126inline typename m_Intrinsic_Ty<T0, T1>::Ty m_Intrinsic(const T0 &Op0,
2127 const T1 &Op1) {
2128 return m_CombineAnd(m_Intrinsic<IntrID>(Op0), m_Argument<1>(Op1));
2129}
2130
2131template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2>
2132inline typename m_Intrinsic_Ty<T0, T1, T2>::Ty
2133m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2) {
2134 return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1), m_Argument<2>(Op2));
2135}
2136
2137template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2,
2138 typename T3>
2139inline typename m_Intrinsic_Ty<T0, T1, T2, T3>::Ty
2140m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2, const T3 &Op3) {
2141 return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1, Op2), m_Argument<3>(Op3));
2142}
2143
2144template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2,
2145 typename T3, typename T4>
2146inline typename m_Intrinsic_Ty<T0, T1, T2, T3, T4>::Ty
2147m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2, const T3 &Op3,
2148 const T4 &Op4) {
2149 return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1, Op2, Op3),
2150 m_Argument<4>(Op4));
2151}
2152
2153template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2,
2154 typename T3, typename T4, typename T5>
2155inline typename m_Intrinsic_Ty<T0, T1, T2, T3, T4, T5>::Ty
2156m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2, const T3 &Op3,
2157 const T4 &Op4, const T5 &Op5) {
2158 return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1, Op2, Op3, Op4),
2159 m_Argument<5>(Op5));
2160}
2161
2162// Helper intrinsic matching specializations.
2163template <typename Opnd0>
2164inline typename m_Intrinsic_Ty<Opnd0>::Ty m_BitReverse(const Opnd0 &Op0) {
2165 return m_Intrinsic<Intrinsic::bitreverse>(Op0);
2166}
2167
2168template <typename Opnd0>
2169inline typename m_Intrinsic_Ty<Opnd0>::Ty m_BSwap(const Opnd0 &Op0) {
2170 return m_Intrinsic<Intrinsic::bswap>(Op0);
2171}
2172
2173template <typename Opnd0>
2174inline typename m_Intrinsic_Ty<Opnd0>::Ty m_FAbs(const Opnd0 &Op0) {
2175 return m_Intrinsic<Intrinsic::fabs>(Op0);
2176}
2177
2178template <typename Opnd0>
2179inline typename m_Intrinsic_Ty<Opnd0>::Ty m_FCanonicalize(const Opnd0 &Op0) {
2180 return m_Intrinsic<Intrinsic::canonicalize>(Op0);
2181}
2182
2183template <typename Opnd0, typename Opnd1>
2184inline typename m_Intrinsic_Ty<Opnd0, Opnd1>::Ty m_FMin(const Opnd0 &Op0,
2185 const Opnd1 &Op1) {
2186 return m_Intrinsic<Intrinsic::minnum>(Op0, Op1);
2187}
2188
2189template <typename Opnd0, typename Opnd1>
2190inline typename m_Intrinsic_Ty<Opnd0, Opnd1>::Ty m_FMax(const Opnd0 &Op0,
2191 const Opnd1 &Op1) {
2192 return m_Intrinsic<Intrinsic::maxnum>(Op0, Op1);
2193}
2194
2195template <typename Opnd0, typename Opnd1, typename Opnd2>
2196inline typename m_Intrinsic_Ty<Opnd0, Opnd1, Opnd2>::Ty
2197m_FShl(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2) {
2198 return m_Intrinsic<Intrinsic::fshl>(Op0, Op1, Op2);
2199}
2200
2201template <typename Opnd0, typename Opnd1, typename Opnd2>
2202inline typename m_Intrinsic_Ty<Opnd0, Opnd1, Opnd2>::Ty
2203m_FShr(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2) {
2204 return m_Intrinsic<Intrinsic::fshr>(Op0, Op1, Op2);
2205}
2206
2207//===----------------------------------------------------------------------===//
2208// Matchers for two-operands operators with the operators in either order
2209//
2210
2211/// Matches a BinaryOperator with LHS and RHS in either order.
2212template <typename LHS, typename RHS>
2213inline AnyBinaryOp_match<LHS, RHS, true> m_c_BinOp(const LHS &L, const RHS &R) {
2214 return AnyBinaryOp_match<LHS, RHS, true>(L, R);
2215}
2216
2217/// Matches an ICmp with a predicate over LHS and RHS in either order.
2218/// Swaps the predicate if operands are commuted.
2219template <typename LHS, typename RHS>
2220inline CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate, true>
2221m_c_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
2222 return CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate, true>(Pred, L,
2223 R);
2224}
2225
2226/// Matches a Add with LHS and RHS in either order.
2227template <typename LHS, typename RHS>
2228inline BinaryOp_match<LHS, RHS, Instruction::Add, true> m_c_Add(const LHS &L,
2229 const RHS &R) {
2230 return BinaryOp_match<LHS, RHS, Instruction::Add, true>(L, R);
2231}
2232
2233/// Matches a Mul with LHS and RHS in either order.
2234template <typename LHS, typename RHS>
2235inline BinaryOp_match<LHS, RHS, Instruction::Mul, true> m_c_Mul(const LHS &L,
2236 const RHS &R) {
2237 return BinaryOp_match<LHS, RHS, Instruction::Mul, true>(L, R);
2238}
2239
2240/// Matches an And with LHS and RHS in either order.
2241template <typename LHS, typename RHS>
2242inline BinaryOp_match<LHS, RHS, Instruction::And, true> m_c_And(const LHS &L,
2243 const RHS &R) {
2244 return BinaryOp_match<LHS, RHS, Instruction::And, true>(L, R);
2245}
2246
2247/// Matches an Or with LHS and RHS in either order.
2248template <typename LHS, typename RHS>
2249inline BinaryOp_match<LHS, RHS, Instruction::Or, true> m_c_Or(const LHS &L,
2250 const RHS &R) {
2251 return BinaryOp_match<LHS, RHS, Instruction::Or, true>(L, R);
2252}
2253
2254/// Matches an Xor with LHS and RHS in either order.
2255template <typename LHS, typename RHS>
2256inline BinaryOp_match<LHS, RHS, Instruction::Xor, true> m_c_Xor(const LHS &L,
2257 const RHS &R) {
2258 return BinaryOp_match<LHS, RHS, Instruction::Xor, true>(L, R);
2259}
2260
2261/// Matches a 'Neg' as 'sub 0, V'.
2262template <typename ValTy>
2263inline BinaryOp_match<cst_pred_ty<is_zero_int>, ValTy, Instruction::Sub>
2264m_Neg(const ValTy &V) {
2265 return m_Sub(m_ZeroInt(), V);
2266}
2267
2268/// Matches a 'Neg' as 'sub nsw 0, V'.
2269template <typename ValTy>
2270inline OverflowingBinaryOp_match<cst_pred_ty<is_zero_int>, ValTy,
2271 Instruction::Sub,
2272 OverflowingBinaryOperator::NoSignedWrap>
2273m_NSWNeg(const ValTy &V) {
2274 return m_NSWSub(m_ZeroInt(), V);
2275}
2276
2277/// Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
2278template <typename ValTy>
2279inline BinaryOp_match<ValTy, cst_pred_ty<is_all_ones>, Instruction::Xor, true>
2280m_Not(const ValTy &V) {
2281 return m_c_Xor(V, m_AllOnes());
2282}
2283
2284/// Matches an SMin with LHS and RHS in either order.
2285template <typename LHS, typename RHS>
2286inline MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty, true>
2287m_c_SMin(const LHS &L, const RHS &R) {
2288 return MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty, true>(L, R);
2289}
2290/// Matches an SMax with LHS and RHS in either order.
2291template <typename LHS, typename RHS>
2292inline MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty, true>
2293m_c_SMax(const LHS &L, const RHS &R) {
2294 return MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty, true>(L, R);
2295}
2296/// Matches a UMin with LHS and RHS in either order.
2297template <typename LHS, typename RHS>
2298inline MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty, true>
2299m_c_UMin(const LHS &L, const RHS &R) {
2300 return MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty, true>(L, R);
2301}
2302/// Matches a UMax with LHS and RHS in either order.
2303template <typename LHS, typename RHS>
2304inline MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty, true>
2305m_c_UMax(const LHS &L, const RHS &R) {
2306 return MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty, true>(L, R);
2307}
2308
2309template <typename LHS, typename RHS>
2310inline match_combine_or<
2311 match_combine_or<MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty, true>,
2312 MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty, true>>,
2313 match_combine_or<MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty, true>,
2314 MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty, true>>>
2315m_c_MaxOrMin(const LHS &L, const RHS &R) {
2316 return m_CombineOr(m_CombineOr(m_c_SMax(L, R), m_c_SMin(L, R)),
2317 m_CombineOr(m_c_UMax(L, R), m_c_UMin(L, R)));
2318}
2319
2320/// Matches FAdd with LHS and RHS in either order.
2321template <typename LHS, typename RHS>
2322inline BinaryOp_match<LHS, RHS, Instruction::FAdd, true>
2323m_c_FAdd(const LHS &L, const RHS &R) {
2324 return BinaryOp_match<LHS, RHS, Instruction::FAdd, true>(L, R);
2325}
2326
2327/// Matches FMul with LHS and RHS in either order.
2328template <typename LHS, typename RHS>
2329inline BinaryOp_match<LHS, RHS, Instruction::FMul, true>
2330m_c_FMul(const LHS &L, const RHS &R) {
2331 return BinaryOp_match<LHS, RHS, Instruction::FMul, true>(L, R);
2332}
2333
2334template <typename Opnd_t> struct Signum_match {
2335 Opnd_t Val;
2336 Signum_match(const Opnd_t &V) : Val(V) {}
2337
2338 template <typename OpTy> bool match(OpTy *V) {
2339 unsigned TypeSize = V->getType()->getScalarSizeInBits();
2340 if (TypeSize == 0)
2341 return false;
2342
2343 unsigned ShiftWidth = TypeSize - 1;
2344 Value *OpL = nullptr, *OpR = nullptr;
2345
2346 // This is the representation of signum we match:
2347 //
2348 // signum(x) == (x >> 63) | (-x >>u 63)
2349 //
2350 // An i1 value is its own signum, so it's correct to match
2351 //
2352 // signum(x) == (x >> 0) | (-x >>u 0)
2353 //
2354 // for i1 values.
2355
2356 auto LHS = m_AShr(m_Value(OpL), m_SpecificInt(ShiftWidth));
2357 auto RHS = m_LShr(m_Neg(m_Value(OpR)), m_SpecificInt(ShiftWidth));
2358 auto Signum = m_Or(LHS, RHS);
2359
2360 return Signum.match(V) && OpL == OpR && Val.match(OpL);
2361 }
2362};
2363
2364/// Matches a signum pattern.
2365///
2366/// signum(x) =
2367/// x > 0 -> 1
2368/// x == 0 -> 0
2369/// x < 0 -> -1
2370template <typename Val_t> inline Signum_match<Val_t> m_Signum(const Val_t &V) {
2371 return Signum_match<Val_t>(V);
2372}
2373
2374template <int Ind, typename Opnd_t> struct ExtractValue_match {
2375 Opnd_t Val;
2376 ExtractValue_match(const Opnd_t &V) : Val(V) {}
2377
2378 template <typename OpTy> bool match(OpTy *V) {
2379 if (auto *I = dyn_cast<ExtractValueInst>(V)) {
2380 // If Ind is -1, don't inspect indices
2381 if (Ind != -1 &&
2382 !(I->getNumIndices() == 1 && I->getIndices()[0] == (unsigned)Ind))
2383 return false;
2384 return Val.match(I->getAggregateOperand());
2385 }
2386 return false;
2387 }
2388};
2389
2390/// Match a single index ExtractValue instruction.
2391/// For example m_ExtractValue<1>(...)
2392template <int Ind, typename Val_t>
2393inline ExtractValue_match<Ind, Val_t> m_ExtractValue(const Val_t &V) {
2394 return ExtractValue_match<Ind, Val_t>(V);
2395}
2396
2397/// Match an ExtractValue instruction with any index.
2398/// For example m_ExtractValue(...)
2399template <typename Val_t>
2400inline ExtractValue_match<-1, Val_t> m_ExtractValue(const Val_t &V) {
2401 return ExtractValue_match<-1, Val_t>(V);
2402}
2403
2404/// Matcher for a single index InsertValue instruction.
2405template <int Ind, typename T0, typename T1> struct InsertValue_match {
2406 T0 Op0;
2407 T1 Op1;
2408
2409 InsertValue_match(const T0 &Op0, const T1 &Op1) : Op0(Op0), Op1(Op1) {}
2410
2411 template <typename OpTy> bool match(OpTy *V) {
2412 if (auto *I = dyn_cast<InsertValueInst>(V)) {
2413 return Op0.match(I->getOperand(0)) && Op1.match(I->getOperand(1)) &&
2414 I->getNumIndices() == 1 && Ind == I->getIndices()[0];
2415 }
2416 return false;
2417 }
2418};
2419
2420/// Matches a single index InsertValue instruction.
2421template <int Ind, typename Val_t, typename Elt_t>
2422inline InsertValue_match<Ind, Val_t, Elt_t> m_InsertValue(const Val_t &Val,
2423 const Elt_t &Elt) {
2424 return InsertValue_match<Ind, Val_t, Elt_t>(Val, Elt);
2425}
2426
2427/// Matches patterns for `vscale`. This can either be a call to `llvm.vscale` or
2428/// the constant expression
2429/// `ptrtoint(gep <vscale x 1 x i8>, <vscale x 1 x i8>* null, i32 1>`
2430/// under the right conditions determined by DataLayout.
2431struct VScaleVal_match {
2432 const DataLayout &DL;
2433 VScaleVal_match(const DataLayout &DL) : DL(DL) {}
2434
2435 template <typename ITy> bool match(ITy *V) {
2436 if (m_Intrinsic<Intrinsic::vscale>().match(V))
2437 return true;
2438
2439 Value *Ptr;
2440 if (m_PtrToInt(m_Value(Ptr)).match(V)) {
2441 if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
2442 auto *DerefTy = GEP->getSourceElementType();
2443 if (GEP->getNumIndices() == 1 && isa<ScalableVectorType>(DerefTy) &&
2444 m_Zero().match(GEP->getPointerOperand()) &&
2445 m_SpecificInt(1).match(GEP->idx_begin()->get()) &&
2446 DL.getTypeAllocSizeInBits(DerefTy).getKnownMinSize() == 8)
2447 return true;
2448 }
2449 }
2450
2451 return false;
2452 }
2453};
2454
2455inline VScaleVal_match m_VScale(const DataLayout &DL) {
2456 return VScaleVal_match(DL);
2457}
2458
2459template <typename LHS, typename RHS, unsigned Opcode>
2460struct LogicalOp_match {
2461 LHS L;
2462 RHS R;
2463
2464 LogicalOp_match(const LHS &L, const RHS &R) : L(L), R(R) {}
2465
2466 template <typename T> bool match(T *V) {
2467 if (auto *I = dyn_cast<Instruction>(V)) {
2468 if (!I->getType()->isIntOrIntVectorTy(1))
2469 return false;
2470
2471 if (I->getOpcode() == Opcode && L.match(I->getOperand(0)) &&
2472 R.match(I->getOperand(1)))
2473 return true;
2474
2475 if (auto *SI = dyn_cast<SelectInst>(I)) {
2476 if (Opcode == Instruction::And) {
2477 if (const auto *C = dyn_cast<Constant>(SI->getFalseValue()))
2478 if (C->isNullValue() && L.match(SI->getCondition()) &&
2479 R.match(SI->getTrueValue()))
2480 return true;
2481 } else {
2482 assert(Opcode == Instruction::Or)(static_cast<void> (0));
2483 if (const auto *C = dyn_cast<Constant>(SI->getTrueValue()))
2484 if (C->isOneValue() && L.match(SI->getCondition()) &&
2485 R.match(SI->getFalseValue()))
2486 return true;
2487 }
2488 }
2489 }
2490
2491 return false;
2492 }
2493};
2494
2495/// Matches L && R either in the form of L & R or L ? R : false.
2496/// Note that the latter form is poison-blocking.
2497template <typename LHS, typename RHS>
2498inline LogicalOp_match<LHS, RHS, Instruction::And>
2499m_LogicalAnd(const LHS &L, const RHS &R) {
2500 return LogicalOp_match<LHS, RHS, Instruction::And>(L, R);
2501}
2502
2503/// Matches L && R where L and R are arbitrary values.
2504inline auto m_LogicalAnd() { return m_LogicalAnd(m_Value(), m_Value()); }
2505
2506/// Matches L || R either in the form of L | R or L ? true : R.
2507/// Note that the latter form is poison-blocking.
2508template <typename LHS, typename RHS>
2509inline LogicalOp_match<LHS, RHS, Instruction::Or>
2510m_LogicalOr(const LHS &L, const RHS &R) {
2511 return LogicalOp_match<LHS, RHS, Instruction::Or>(L, R);
2512}
2513
2514/// Matches L || R where L and R are arbitrary values.
2515inline auto m_LogicalOr() {
2516 return m_LogicalOr(m_Value(), m_Value());
2517}
2518
2519} // end namespace PatternMatch
2520} // end namespace llvm
2521
2522#endif // LLVM_IR_PATTERNMATCH_H

/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include/llvm/Analysis/AliasAnalysis.h

1//===- llvm/Analysis/AliasAnalysis.h - Alias Analysis Interface -*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the generic AliasAnalysis interface, which is used as the
10// common interface used by all clients of alias analysis information, and
11// implemented by all alias analysis implementations. Mod/Ref information is
12// also captured by this interface.
13//
14// Implementations of this interface must implement the various virtual methods,
15// which automatically provides functionality for the entire suite of client
16// APIs.
17//
18// This API identifies memory regions with the MemoryLocation class. The pointer
19// component specifies the base memory address of the region. The Size specifies
20// the maximum size (in address units) of the memory region, or
21// MemoryLocation::UnknownSize if the size is not known. The TBAA tag
22// identifies the "type" of the memory reference; see the
23// TypeBasedAliasAnalysis class for details.
24//
25// Some non-obvious details include:
26// - Pointers that point to two completely different objects in memory never
27// alias, regardless of the value of the Size component.
28// - NoAlias doesn't imply inequal pointers. The most obvious example of this
29// is two pointers to constant memory. Even if they are equal, constant
30// memory is never stored to, so there will never be any dependencies.
31// In this and other situations, the pointers may be both NoAlias and
32// MustAlias at the same time. The current API can only return one result,
33// though this is rarely a problem in practice.
34//
35//===----------------------------------------------------------------------===//
36
37#ifndef LLVM_ANALYSIS_ALIASANALYSIS_H
38#define LLVM_ANALYSIS_ALIASANALYSIS_H
39
40#include "llvm/ADT/DenseMap.h"
41#include "llvm/ADT/None.h"
42#include "llvm/ADT/Optional.h"
43#include "llvm/ADT/SmallVector.h"
44#include "llvm/Analysis/MemoryLocation.h"
45#include "llvm/IR/PassManager.h"
46#include "llvm/Pass.h"
47#include <cstdint>
48#include <functional>
49#include <memory>
50#include <vector>
51
52namespace llvm {
53
54class AnalysisUsage;
55class AtomicCmpXchgInst;
56class BasicAAResult;
57class BasicBlock;
58class CatchPadInst;
59class CatchReturnInst;
60class DominatorTree;
61class FenceInst;
62class Function;
63class InvokeInst;
64class PreservedAnalyses;
65class TargetLibraryInfo;
66class Value;
67
68/// The possible results of an alias query.
69///
70/// These results are always computed between two MemoryLocation objects as
71/// a query to some alias analysis.
72///
73/// Note that these are unscoped enumerations because we would like to support
74/// implicitly testing a result for the existence of any possible aliasing with
75/// a conversion to bool, but an "enum class" doesn't support this. The
76/// canonical names from the literature are suffixed and unique anyways, and so
77/// they serve as global constants in LLVM for these results.
78///
79/// See docs/AliasAnalysis.html for more information on the specific meanings
80/// of these values.
81class AliasResult {
82private:
83 static const int OffsetBits = 23;
84 static const int AliasBits = 8;
85 static_assert(AliasBits + 1 + OffsetBits <= 32,
86 "AliasResult size is intended to be 4 bytes!");
87
88 unsigned int Alias : AliasBits;
89 unsigned int HasOffset : 1;
90 signed int Offset : OffsetBits;
91
92public:
93 enum Kind : uint8_t {
94 /// The two locations do not alias at all.
95 ///
96 /// This value is arranged to convert to false, while all other values
97 /// convert to true. This allows a boolean context to convert the result to
98 /// a binary flag indicating whether there is the possibility of aliasing.
99 NoAlias = 0,
100 /// The two locations may or may not alias. This is the least precise
101 /// result.
102 MayAlias,
103 /// The two locations alias, but only due to a partial overlap.
104 PartialAlias,
105 /// The two locations precisely alias each other.
106 MustAlias,
107 };
108 static_assert(MustAlias < (1 << AliasBits),
109 "Not enough bit field size for the enum!");
110
111 explicit AliasResult() = delete;
112 constexpr AliasResult(const Kind &Alias)
113 : Alias(Alias), HasOffset(false), Offset(0) {}
114
115 operator Kind() const { return static_cast<Kind>(Alias); }
116
117 constexpr bool hasOffset() const { return HasOffset; }
118 constexpr int32_t getOffset() const {
119 assert(HasOffset && "No offset!")(static_cast<void> (0));
120 return Offset;
121 }
122 void setOffset(int32_t NewOffset) {
123 if (isInt<OffsetBits>(NewOffset)) {
124 HasOffset = true;
125 Offset = NewOffset;
126 }
127 }
128
129 /// Helper for processing AliasResult for swapped memory location pairs.
130 void swap(bool DoSwap = true) {
131 if (DoSwap && hasOffset())
132 setOffset(-getOffset());
133 }
134};
135
136static_assert(sizeof(AliasResult) == 4,
137 "AliasResult size is intended to be 4 bytes!");
138
139/// << operator for AliasResult.
140raw_ostream &operator<<(raw_ostream &OS, AliasResult AR);
141
142/// Flags indicating whether a memory access modifies or references memory.
143///
144/// This is no access at all, a modification, a reference, or both
145/// a modification and a reference. These are specifically structured such that
146/// they form a three bit matrix and bit-tests for 'mod' or 'ref' or 'must'
147/// work with any of the possible values.
148enum class ModRefInfo : uint8_t {
149 /// Must is provided for completeness, but no routines will return only
150 /// Must today. See definition of Must below.
151 Must = 0,
152 /// The access may reference the value stored in memory,
153 /// a mustAlias relation was found, and no mayAlias or partialAlias found.
154 MustRef = 1,
155 /// The access may modify the value stored in memory,
156 /// a mustAlias relation was found, and no mayAlias or partialAlias found.
157 MustMod = 2,
158 /// The access may reference, modify or both the value stored in memory,
159 /// a mustAlias relation was found, and no mayAlias or partialAlias found.
160 MustModRef = MustRef | MustMod,
161 /// The access neither references nor modifies the value stored in memory.
162 NoModRef = 4,
163 /// The access may reference the value stored in memory.
164 Ref = NoModRef | MustRef,
165 /// The access may modify the value stored in memory.
166 Mod = NoModRef | MustMod,
167 /// The access may reference and may modify the value stored in memory.
168 ModRef = Ref | Mod,
169
170 /// About Must:
171 /// Must is set in a best effort manner.
172 /// We usually do not try our best to infer Must, instead it is merely
173 /// another piece of "free" information that is presented when available.
174 /// Must set means there was certainly a MustAlias found. For calls,
175 /// where multiple arguments are checked (argmemonly), this translates to
176 /// only MustAlias or NoAlias was found.
177 /// Must is not set for RAR accesses, even if the two locations must
178 /// alias. The reason is that two read accesses translate to an early return
179 /// of NoModRef. An additional alias check to set Must may be
180 /// expensive. Other cases may also not set Must(e.g. callCapturesBefore).
181 /// We refer to Must being *set* when the most significant bit is *cleared*.
182 /// Conversely we *clear* Must information by *setting* the Must bit to 1.
183};
184
185LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isNoModRef(const ModRefInfo MRI) {
186 return (static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustModRef)) ==
187 static_cast<int>(ModRefInfo::Must);
188}
189LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isModOrRefSet(const ModRefInfo MRI) {
190 return static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustModRef);
191}
192LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isModAndRefSet(const ModRefInfo MRI) {
193 return (static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustModRef)) ==
194 static_cast<int>(ModRefInfo::MustModRef);
195}
196LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isModSet(const ModRefInfo MRI) {
197 return static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustMod);
198}
199LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isRefSet(const ModRefInfo MRI) {
200 return static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustRef);
201}
202LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isMustSet(const ModRefInfo MRI) {
203 return !(static_cast<int>(MRI) & static_cast<int>(ModRefInfo::NoModRef));
204}
205
206LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo setMod(const ModRefInfo MRI) {
207 return ModRefInfo(static_cast<int>(MRI) |
208 static_cast<int>(ModRefInfo::MustMod));
209}
210LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo setRef(const ModRefInfo MRI) {
211 return ModRefInfo(static_cast<int>(MRI) |
212 static_cast<int>(ModRefInfo::MustRef));
213}
214LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo setMust(const ModRefInfo MRI) {
215 return ModRefInfo(static_cast<int>(MRI) &
216 static_cast<int>(ModRefInfo::MustModRef));
217}
218LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo setModAndRef(const ModRefInfo MRI) {
219 return ModRefInfo(static_cast<int>(MRI) |
220 static_cast<int>(ModRefInfo::MustModRef));
221}
222LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo clearMod(const ModRefInfo MRI) {
223 return ModRefInfo(static_cast<int>(MRI) & static_cast<int>(ModRefInfo::Ref));
224}
225LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo clearRef(const ModRefInfo MRI) {
226 return ModRefInfo(static_cast<int>(MRI) & static_cast<int>(ModRefInfo::Mod));
227}
228LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo clearMust(const ModRefInfo MRI) {
229 return ModRefInfo(static_cast<int>(MRI) |
230 static_cast<int>(ModRefInfo::NoModRef));
231}
232LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo unionModRef(const ModRefInfo MRI1,
233 const ModRefInfo MRI2) {
234 return ModRefInfo(static_cast<int>(MRI1) | static_cast<int>(MRI2));
235}
236LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo intersectModRef(const ModRefInfo MRI1,
237 const ModRefInfo MRI2) {
238 return ModRefInfo(static_cast<int>(MRI1) & static_cast<int>(MRI2));
239}
240
241/// The locations at which a function might access memory.
242///
243/// These are primarily used in conjunction with the \c AccessKind bits to
244/// describe both the nature of access and the locations of access for a
245/// function call.
246enum FunctionModRefLocation {
247 /// Base case is no access to memory.
248 FMRL_Nowhere = 0,
249 /// Access to memory via argument pointers.
250 FMRL_ArgumentPointees = 8,
251 /// Memory that is inaccessible via LLVM IR.
252 FMRL_InaccessibleMem = 16,
253 /// Access to any memory.
254 FMRL_Anywhere = 32 | FMRL_InaccessibleMem | FMRL_ArgumentPointees
255};
256
257/// Summary of how a function affects memory in the program.
258///
259/// Loads from constant globals are not considered memory accesses for this
260/// interface. Also, functions may freely modify stack space local to their
261/// invocation without having to report it through these interfaces.
262enum FunctionModRefBehavior {
263 /// This function does not perform any non-local loads or stores to memory.
264 ///
265 /// This property corresponds to the GCC 'const' attribute.
266 /// This property corresponds to the LLVM IR 'readnone' attribute.
267 /// This property corresponds to the IntrNoMem LLVM intrinsic flag.
268 FMRB_DoesNotAccessMemory =
269 FMRL_Nowhere | static_cast<int>(ModRefInfo::NoModRef),
270
271 /// The only memory references in this function (if it has any) are
272 /// non-volatile loads from objects pointed to by its pointer-typed
273 /// arguments, with arbitrary offsets.
274 ///
275 /// This property corresponds to the combination of the IntrReadMem
276 /// and IntrArgMemOnly LLVM intrinsic flags.
277 FMRB_OnlyReadsArgumentPointees =
278 FMRL_ArgumentPointees | static_cast<int>(ModRefInfo::Ref),
279
280 /// The only memory references in this function (if it has any) are
281 /// non-volatile stores from objects pointed to by its pointer-typed
282 /// arguments, with arbitrary offsets.
283 ///
284 /// This property corresponds to the combination of the IntrWriteMem
285 /// and IntrArgMemOnly LLVM intrinsic flags.
286 FMRB_OnlyWritesArgumentPointees =
287 FMRL_ArgumentPointees | static_cast<int>(ModRefInfo::Mod),
288
289 /// The only memory references in this function (if it has any) are
290 /// non-volatile loads and stores from objects pointed to by its
291 /// pointer-typed arguments, with arbitrary offsets.
292 ///
293 /// This property corresponds to the IntrArgMemOnly LLVM intrinsic flag.
294 FMRB_OnlyAccessesArgumentPointees =
295 FMRL_ArgumentPointees | static_cast<int>(ModRefInfo::ModRef),
296
297 /// The only memory references in this function (if it has any) are
298 /// reads of memory that is otherwise inaccessible via LLVM IR.
299 ///
300 /// This property corresponds to the LLVM IR inaccessiblememonly attribute.
301 FMRB_OnlyReadsInaccessibleMem =
302 FMRL_InaccessibleMem | static_cast<int>(ModRefInfo::Ref),
303
304 /// The only memory references in this function (if it has any) are
305 /// writes to memory that is otherwise inaccessible via LLVM IR.
306 ///
307 /// This property corresponds to the LLVM IR inaccessiblememonly attribute.
308 FMRB_OnlyWritesInaccessibleMem =
309 FMRL_InaccessibleMem | static_cast<int>(ModRefInfo::Mod),
310
311 /// The only memory references in this function (if it has any) are
312 /// references of memory that is otherwise inaccessible via LLVM IR.
313 ///
314 /// This property corresponds to the LLVM IR inaccessiblememonly attribute.
315 FMRB_OnlyAccessesInaccessibleMem =
316 FMRL_InaccessibleMem | static_cast<int>(ModRefInfo::ModRef),
317
318 /// The function may perform non-volatile loads from objects pointed
319 /// to by its pointer-typed arguments, with arbitrary offsets, and
320 /// it may also perform loads of memory that is otherwise
321 /// inaccessible via LLVM IR.
322 ///
323 /// This property corresponds to the LLVM IR
324 /// inaccessiblemem_or_argmemonly attribute.
325 FMRB_OnlyReadsInaccessibleOrArgMem = FMRL_InaccessibleMem |
326 FMRL_ArgumentPointees |
327 static_cast<int>(ModRefInfo::Ref),
328
329 /// The function may perform non-volatile stores to objects pointed
330 /// to by its pointer-typed arguments, with arbitrary offsets, and
331 /// it may also perform stores of memory that is otherwise
332 /// inaccessible via LLVM IR.
333 ///
334 /// This property corresponds to the LLVM IR
335 /// inaccessiblemem_or_argmemonly attribute.
336 FMRB_OnlyWritesInaccessibleOrArgMem = FMRL_InaccessibleMem |
337 FMRL_ArgumentPointees |
338 static_cast<int>(ModRefInfo::Mod),
339
340 /// The function may perform non-volatile loads and stores of objects
341 /// pointed to by its pointer-typed arguments, with arbitrary offsets, and
342 /// it may also perform loads and stores of memory that is otherwise
343 /// inaccessible via LLVM IR.
344 ///
345 /// This property corresponds to the LLVM IR
346 /// inaccessiblemem_or_argmemonly attribute.
347 FMRB_OnlyAccessesInaccessibleOrArgMem = FMRL_InaccessibleMem |
348 FMRL_ArgumentPointees |
349 static_cast<int>(ModRefInfo::ModRef),
350
351 /// This function does not perform any non-local stores or volatile loads,
352 /// but may read from any memory location.
353 ///
354 /// This property corresponds to the GCC 'pure' attribute.
355 /// This property corresponds to the LLVM IR 'readonly' attribute.
356 /// This property corresponds to the IntrReadMem LLVM intrinsic flag.
357 FMRB_OnlyReadsMemory = FMRL_Anywhere | static_cast<int>(ModRefInfo::Ref),
358
359 // This function does not read from memory anywhere, but may write to any
360 // memory location.
361 //
362 // This property corresponds to the LLVM IR 'writeonly' attribute.
363 // This property corresponds to the IntrWriteMem LLVM intrinsic flag.
364 FMRB_OnlyWritesMemory = FMRL_Anywhere | static_cast<int>(ModRefInfo::Mod),
365
366 /// This indicates that the function could not be classified into one of the
367 /// behaviors above.
368 FMRB_UnknownModRefBehavior =
369 FMRL_Anywhere | static_cast<int>(ModRefInfo::ModRef)
370};
371
372// Wrapper method strips bits significant only in FunctionModRefBehavior,
373// to obtain a valid ModRefInfo. The benefit of using the wrapper is that if
374// ModRefInfo enum changes, the wrapper can be updated to & with the new enum
375// entry with all bits set to 1.
376LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo
377createModRefInfo(const FunctionModRefBehavior FMRB) {
378 return ModRefInfo(FMRB & static_cast<int>(ModRefInfo::ModRef));
379}
380
381/// Reduced version of MemoryLocation that only stores a pointer and size.
382/// Used for caching AATags independent BasicAA results.
383struct AACacheLoc {
384 const Value *Ptr;
385 LocationSize Size;
386};
387
388template <> struct DenseMapInfo<AACacheLoc> {
389 static inline AACacheLoc getEmptyKey() {
390 return {DenseMapInfo<const Value *>::getEmptyKey(),
391 DenseMapInfo<LocationSize>::getEmptyKey()};
392 }
393 static inline AACacheLoc getTombstoneKey() {
394 return {DenseMapInfo<const Value *>::getTombstoneKey(),
395 DenseMapInfo<LocationSize>::getTombstoneKey()};
396 }
397 static unsigned getHashValue(const AACacheLoc &Val) {
398 return DenseMapInfo<const Value *>::getHashValue(Val.Ptr) ^
399 DenseMapInfo<LocationSize>::getHashValue(Val.Size);
400 }
401 static bool isEqual(const AACacheLoc &LHS, const AACacheLoc &RHS) {
402 return LHS.Ptr == RHS.Ptr && LHS.Size == RHS.Size;
403 }
404};
405
406/// This class stores info we want to provide to or retain within an alias
407/// query. By default, the root query is stateless and starts with a freshly
408/// constructed info object. Specific alias analyses can use this query info to
409/// store per-query state that is important for recursive or nested queries to
410/// avoid recomputing. To enable preserving this state across multiple queries
411/// where safe (due to the IR not changing), use a `BatchAAResults` wrapper.
412/// The information stored in an `AAQueryInfo` is currently limitted to the
413/// caches used by BasicAA, but can further be extended to fit other AA needs.
414class AAQueryInfo {
415public:
416 using LocPair = std::pair<AACacheLoc, AACacheLoc>;
417 struct CacheEntry {
418 AliasResult Result;
419 /// Number of times a NoAlias assumption has been used.
420 /// 0 for assumptions that have not been used, -1 for definitive results.
421 int NumAssumptionUses;
422 /// Whether this is a definitive (non-assumption) result.
423 bool isDefinitive() const { return NumAssumptionUses < 0; }
424 };
425 using AliasCacheT = SmallDenseMap<LocPair, CacheEntry, 8>;
426 AliasCacheT AliasCache;
427
428 using IsCapturedCacheT = SmallDenseMap<const Value *, bool, 8>;
429 IsCapturedCacheT IsCapturedCache;
430
431 /// Query depth used to distinguish recursive queries.
432 unsigned Depth = 0;
433
434 /// How many active NoAlias assumption uses there are.
435 int NumAssumptionUses = 0;
436
437 /// Location pairs for which an assumption based result is currently stored.
438 /// Used to remove all potentially incorrect results from the cache if an
439 /// assumption is disproven.
440 SmallVector<AAQueryInfo::LocPair, 4> AssumptionBasedResults;
441
442 AAQueryInfo() : AliasCache(), IsCapturedCache() {}
443
444 /// Create a new AAQueryInfo based on this one, but with the cache cleared.
445 /// This is used for recursive queries across phis, where cache results may
446 /// not be valid.
447 AAQueryInfo withEmptyCache() {
448 AAQueryInfo NewAAQI;
449 NewAAQI.Depth = Depth;
450 return NewAAQI;
451 }
452};
453
454class BatchAAResults;
455
456class AAResults {
457public:
458 // Make these results default constructable and movable. We have to spell
459 // these out because MSVC won't synthesize them.
460 AAResults(const TargetLibraryInfo &TLI) : TLI(TLI) {}
461 AAResults(AAResults &&Arg);
462 ~AAResults();
463
464 /// Register a specific AA result.
465 template <typename AAResultT> void addAAResult(AAResultT &AAResult) {
466 // FIXME: We should use a much lighter weight system than the usual
467 // polymorphic pattern because we don't own AAResult. It should
468 // ideally involve two pointers and no separate allocation.
469 AAs.emplace_back(new Model<AAResultT>(AAResult, *this));
470 }
471
472 /// Register a function analysis ID that the results aggregation depends on.
473 ///
474 /// This is used in the new pass manager to implement the invalidation logic
475 /// where we must invalidate the results aggregation if any of our component
476 /// analyses become invalid.
477 void addAADependencyID(AnalysisKey *ID) { AADeps.push_back(ID); }
478
479 /// Handle invalidation events in the new pass manager.
480 ///
481 /// The aggregation is invalidated if any of the underlying analyses is
482 /// invalidated.
483 bool invalidate(Function &F, const PreservedAnalyses &PA,
484 FunctionAnalysisManager::Invalidator &Inv);
485
486 //===--------------------------------------------------------------------===//
487 /// \name Alias Queries
488 /// @{
489
490 /// The main low level interface to the alias analysis implementation.
491 /// Returns an AliasResult indicating whether the two pointers are aliased to
492 /// each other. This is the interface that must be implemented by specific
493 /// alias analysis implementations.
494 AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
495
496 /// A convenience wrapper around the primary \c alias interface.
497 AliasResult alias(const Value *V1, LocationSize V1Size, const Value *V2,
498 LocationSize V2Size) {
499 return alias(MemoryLocation(V1, V1Size), MemoryLocation(V2, V2Size));
500 }
501
502 /// A convenience wrapper around the primary \c alias interface.
503 AliasResult alias(const Value *V1, const Value *V2) {
504 return alias(MemoryLocation::getBeforeOrAfter(V1),
505 MemoryLocation::getBeforeOrAfter(V2));
506 }
507
508 /// A trivial helper function to check to see if the specified pointers are
509 /// no-alias.
510 bool isNoAlias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
511 return alias(LocA, LocB) == AliasResult::NoAlias;
512 }
513
514 /// A convenience wrapper around the \c isNoAlias helper interface.
515 bool isNoAlias(const Value *V1, LocationSize V1Size, const Value *V2,
516 LocationSize V2Size) {
517 return isNoAlias(MemoryLocation(V1, V1Size), MemoryLocation(V2, V2Size));
518 }
519
520 /// A convenience wrapper around the \c isNoAlias helper interface.
521 bool isNoAlias(const Value *V1, const Value *V2) {
522 return isNoAlias(MemoryLocation::getBeforeOrAfter(V1),
523 MemoryLocation::getBeforeOrAfter(V2));
524 }
525
526 /// A trivial helper function to check to see if the specified pointers are
527 /// must-alias.
528 bool isMustAlias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
529 return alias(LocA, LocB) == AliasResult::MustAlias;
530 }
531
532 /// A convenience wrapper around the \c isMustAlias helper interface.
533 bool isMustAlias(const Value *V1, const Value *V2) {
534 return alias(V1, LocationSize::precise(1), V2, LocationSize::precise(1)) ==
535 AliasResult::MustAlias;
536 }
537
538 /// Checks whether the given location points to constant memory, or if
539 /// \p OrLocal is true whether it points to a local alloca.
540 bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal = false);
541
542 /// A convenience wrapper around the primary \c pointsToConstantMemory
543 /// interface.
544 bool pointsToConstantMemory(const Value *P, bool OrLocal = false) {
545 return pointsToConstantMemory(MemoryLocation::getBeforeOrAfter(P), OrLocal);
546 }
547
548 /// @}
549 //===--------------------------------------------------------------------===//
550 /// \name Simple mod/ref information
551 /// @{
552
553 /// Get the ModRef info associated with a pointer argument of a call. The
554 /// result's bits are set to indicate the allowed aliasing ModRef kinds. Note
555 /// that these bits do not necessarily account for the overall behavior of
556 /// the function, but rather only provide additional per-argument
557 /// information. This never sets ModRefInfo::Must.
558 ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx);
559
560 /// Return the behavior of the given call site.
561 FunctionModRefBehavior getModRefBehavior(const CallBase *Call);
562
563 /// Return the behavior when calling the given function.
564 FunctionModRefBehavior getModRefBehavior(const Function *F);
565
566 /// Checks if the specified call is known to never read or write memory.
567 ///
568 /// Note that if the call only reads from known-constant memory, it is also
569 /// legal to return true. Also, calls that unwind the stack are legal for
570 /// this predicate.
571 ///
572 /// Many optimizations (such as CSE and LICM) can be performed on such calls
573 /// without worrying about aliasing properties, and many calls have this
574 /// property (e.g. calls to 'sin' and 'cos').
575 ///
576 /// This property corresponds to the GCC 'const' attribute.
577 bool doesNotAccessMemory(const CallBase *Call) {
578 return getModRefBehavior(Call) == FMRB_DoesNotAccessMemory;
579 }
580
581 /// Checks if the specified function is known to never read or write memory.
582 ///
583 /// Note that if the function only reads from known-constant memory, it is
584 /// also legal to return true. Also, function that unwind the stack are legal
585 /// for this predicate.
586 ///
587 /// Many optimizations (such as CSE and LICM) can be performed on such calls
588 /// to such functions without worrying about aliasing properties, and many
589 /// functions have this property (e.g. 'sin' and 'cos').
590 ///
591 /// This property corresponds to the GCC 'const' attribute.
592 bool doesNotAccessMemory(const Function *F) {
593 return getModRefBehavior(F) == FMRB_DoesNotAccessMemory;
594 }
595
596 /// Checks if the specified call is known to only read from non-volatile
597 /// memory (or not access memory at all).
598 ///
599 /// Calls that unwind the stack are legal for this predicate.
600 ///
601 /// This property allows many common optimizations to be performed in the
602 /// absence of interfering store instructions, such as CSE of strlen calls.
603 ///
604 /// This property corresponds to the GCC 'pure' attribute.
605 bool onlyReadsMemory(const CallBase *Call) {
606 return onlyReadsMemory(getModRefBehavior(Call));
607 }
608
609 /// Checks if the specified function is known to only read from non-volatile
610 /// memory (or not access memory at all).
611 ///
612 /// Functions that unwind the stack are legal for this predicate.
613 ///
614 /// This property allows many common optimizations to be performed in the
615 /// absence of interfering store instructions, such as CSE of strlen calls.
616 ///
617 /// This property corresponds to the GCC 'pure' attribute.
618 bool onlyReadsMemory(const Function *F) {
619 return onlyReadsMemory(getModRefBehavior(F));
620 }
621
622 /// Checks if functions with the specified behavior are known to only read
623 /// from non-volatile memory (or not access memory at all).
624 static bool onlyReadsMemory(FunctionModRefBehavior MRB) {
625 return !isModSet(createModRefInfo(MRB));
56
Assuming the condition is true
57
Returning the value 1, which participates in a condition later
626 }
627
628 /// Checks if functions with the specified behavior are known to only write
629 /// memory (or not access memory at all).
630 static bool doesNotReadMemory(FunctionModRefBehavior MRB) {
631 return !isRefSet(createModRefInfo(MRB));
632 }
633
634 /// Checks if functions with the specified behavior are known to read and
635 /// write at most from objects pointed to by their pointer-typed arguments
636 /// (with arbitrary offsets).
637 static bool onlyAccessesArgPointees(FunctionModRefBehavior MRB) {
638 return !((unsigned)MRB & FMRL_Anywhere & ~FMRL_ArgumentPointees);
61
Assuming the condition is false
62
Returning zero, which participates in a condition later
639 }
640
641 /// Checks if functions with the specified behavior are known to potentially
642 /// read or write from objects pointed to be their pointer-typed arguments
643 /// (with arbitrary offsets).
644 static bool doesAccessArgPointees(FunctionModRefBehavior MRB) {
645 return isModOrRefSet(createModRefInfo(MRB)) &&
646 ((unsigned)MRB & FMRL_ArgumentPointees);
647 }
648
649 /// Checks if functions with the specified behavior are known to read and
650 /// write at most from memory that is inaccessible from LLVM IR.
651 static bool onlyAccessesInaccessibleMem(FunctionModRefBehavior MRB) {
652 return !((unsigned)MRB & FMRL_Anywhere & ~FMRL_InaccessibleMem);
653 }
654
655 /// Checks if functions with the specified behavior are known to potentially
656 /// read or write from memory that is inaccessible from LLVM IR.
657 static bool doesAccessInaccessibleMem(FunctionModRefBehavior MRB) {
658 return isModOrRefSet(createModRefInfo(MRB)) &&
659 ((unsigned)MRB & FMRL_InaccessibleMem);
660 }
661
662 /// Checks if functions with the specified behavior are known to read and
663 /// write at most from memory that is inaccessible from LLVM IR or objects
664 /// pointed to by their pointer-typed arguments (with arbitrary offsets).
665 static bool onlyAccessesInaccessibleOrArgMem(FunctionModRefBehavior MRB) {
666 return !((unsigned)MRB & FMRL_Anywhere &
667 ~(FMRL_InaccessibleMem | FMRL_ArgumentPointees));
668 }
669
670 /// getModRefInfo (for call sites) - Return information about whether
671 /// a particular call site modifies or reads the specified memory location.
672 ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc);
673
674 /// getModRefInfo (for call sites) - A convenience wrapper.
675 ModRefInfo getModRefInfo(const CallBase *Call, const Value *P,
676 LocationSize Size) {
677 return getModRefInfo(Call, MemoryLocation(P, Size));
678 }
679
680 /// getModRefInfo (for loads) - Return information about whether
681 /// a particular load modifies or reads the specified memory location.
682 ModRefInfo getModRefInfo(const LoadInst *L, const MemoryLocation &Loc);
683
684 /// getModRefInfo (for loads) - A convenience wrapper.
685 ModRefInfo getModRefInfo(const LoadInst *L, const Value *P,
686 LocationSize Size) {
687 return getModRefInfo(L, MemoryLocation(P, Size));
688 }
689
690 /// getModRefInfo (for stores) - Return information about whether
691 /// a particular store modifies or reads the specified memory location.
692 ModRefInfo getModRefInfo(const StoreInst *S, const MemoryLocation &Loc);
693
694 /// getModRefInfo (for stores) - A convenience wrapper.
695 ModRefInfo getModRefInfo(const StoreInst *S, const Value *P,
696 LocationSize Size) {
697 return getModRefInfo(S, MemoryLocation(P, Size));
698 }
699
700 /// getModRefInfo (for fences) - Return information about whether
701 /// a particular store modifies or reads the specified memory location.
702 ModRefInfo getModRefInfo(const FenceInst *S, const MemoryLocation &Loc);
703
704 /// getModRefInfo (for fences) - A convenience wrapper.
705 ModRefInfo getModRefInfo(const FenceInst *S, const Value *P,
706 LocationSize Size) {
707 return getModRefInfo(S, MemoryLocation(P, Size));
708 }
709
710 /// getModRefInfo (for cmpxchges) - Return information about whether
711 /// a particular cmpxchg modifies or reads the specified memory location.
712 ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX,
713 const MemoryLocation &Loc);
714
715 /// getModRefInfo (for cmpxchges) - A convenience wrapper.
716 ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX, const Value *P,
717 LocationSize Size) {
718 return getModRefInfo(CX, MemoryLocation(P, Size));
719 }
720
721 /// getModRefInfo (for atomicrmws) - Return information about whether
722 /// a particular atomicrmw modifies or reads the specified memory location.
723 ModRefInfo getModRefInfo(const AtomicRMWInst *RMW, const MemoryLocation &Loc);
724
725 /// getModRefInfo (for atomicrmws) - A convenience wrapper.
726 ModRefInfo getModRefInfo(const AtomicRMWInst *RMW, const Value *P,
727 LocationSize Size) {
728 return getModRefInfo(RMW, MemoryLocation(P, Size));
729 }
730
731 /// getModRefInfo (for va_args) - Return information about whether
732 /// a particular va_arg modifies or reads the specified memory location.
733 ModRefInfo getModRefInfo(const VAArgInst *I, const MemoryLocation &Loc);
734
735 /// getModRefInfo (for va_args) - A convenience wrapper.
736 ModRefInfo getModRefInfo(const VAArgInst *I, const Value *P,
737 LocationSize Size) {
738 return getModRefInfo(I, MemoryLocation(P, Size));
739 }
740
741 /// getModRefInfo (for catchpads) - Return information about whether
742 /// a particular catchpad modifies or reads the specified memory location.
743 ModRefInfo getModRefInfo(const CatchPadInst *I, const MemoryLocation &Loc);
744
745 /// getModRefInfo (for catchpads) - A convenience wrapper.
746 ModRefInfo getModRefInfo(const CatchPadInst *I, const Value *P,
747 LocationSize Size) {
748 return getModRefInfo(I, MemoryLocation(P, Size));
749 }
750
751 /// getModRefInfo (for catchrets) - Return information about whether
752 /// a particular catchret modifies or reads the specified memory location.
753 ModRefInfo getModRefInfo(const CatchReturnInst *I, const MemoryLocation &Loc);
754
755 /// getModRefInfo (for catchrets) - A convenience wrapper.
756 ModRefInfo getModRefInfo(const CatchReturnInst *I, const Value *P,
757 LocationSize Size) {
758 return getModRefInfo(I, MemoryLocation(P, Size));
759 }
760
761 /// Check whether or not an instruction may read or write the optionally
762 /// specified memory location.
763 ///
764 ///
765 /// An instruction that doesn't read or write memory may be trivially LICM'd
766 /// for example.
767 ///
768 /// For function calls, this delegates to the alias-analysis specific
769 /// call-site mod-ref behavior queries. Otherwise it delegates to the specific
770 /// helpers above.
771 ModRefInfo getModRefInfo(const Instruction *I,
772 const Optional<MemoryLocation> &OptLoc) {
773 AAQueryInfo AAQIP;
774 return getModRefInfo(I, OptLoc, AAQIP);
775 }
776
777 /// A convenience wrapper for constructing the memory location.
778 ModRefInfo getModRefInfo(const Instruction *I, const Value *P,
779 LocationSize Size) {
780 return getModRefInfo(I, MemoryLocation(P, Size));
781 }
782
783 /// Return information about whether a call and an instruction may refer to
784 /// the same memory locations.
785 ModRefInfo getModRefInfo(Instruction *I, const CallBase *Call);
786
787 /// Return information about whether two call sites may refer to the same set
788 /// of memory locations. See the AA documentation for details:
789 /// http://llvm.org/docs/AliasAnalysis.html#ModRefInfo
790 ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2);
791
792 /// Return information about whether a particular call site modifies
793 /// or reads the specified memory location \p MemLoc before instruction \p I
794 /// in a BasicBlock.
795 /// Early exits in callCapturesBefore may lead to ModRefInfo::Must not being
796 /// set.
797 ModRefInfo callCapturesBefore(const Instruction *I,
798 const MemoryLocation &MemLoc,
799 DominatorTree *DT) {
800 AAQueryInfo AAQIP;
801 return callCapturesBefore(I, MemLoc, DT, AAQIP);
802 }
803
804 /// A convenience wrapper to synthesize a memory location.
805 ModRefInfo callCapturesBefore(const Instruction *I, const Value *P,
806 LocationSize Size, DominatorTree *DT) {
807 return callCapturesBefore(I, MemoryLocation(P, Size), DT);
808 }
809
810 /// @}
811 //===--------------------------------------------------------------------===//
812 /// \name Higher level methods for querying mod/ref information.
813 /// @{
814
815 /// Check if it is possible for execution of the specified basic block to
816 /// modify the location Loc.
817 bool canBasicBlockModify(const BasicBlock &BB, const MemoryLocation &Loc);
818
819 /// A convenience wrapper synthesizing a memory location.
820 bool canBasicBlockModify(const BasicBlock &BB, const Value *P,
821 LocationSize Size) {
822 return canBasicBlockModify(BB, MemoryLocation(P, Size));
823 }
824
825 /// Check if it is possible for the execution of the specified instructions
826 /// to mod\ref (according to the mode) the location Loc.
827 ///
828 /// The instructions to consider are all of the instructions in the range of
829 /// [I1,I2] INCLUSIVE. I1 and I2 must be in the same basic block.
830 bool canInstructionRangeModRef(const Instruction &I1, const Instruction &I2,
831 const MemoryLocation &Loc,
832 const ModRefInfo Mode);
833
834 /// A convenience wrapper synthesizing a memory location.
835 bool canInstructionRangeModRef(const Instruction &I1, const Instruction &I2,
836 const Value *Ptr, LocationSize Size,
837 const ModRefInfo Mode) {
838 return canInstructionRangeModRef(I1, I2, MemoryLocation(Ptr, Size), Mode);
839 }
840
841private:
842 AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
843 AAQueryInfo &AAQI);
844 bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI,
845 bool OrLocal = false);
846 ModRefInfo getModRefInfo(Instruction *I, const CallBase *Call2,
847 AAQueryInfo &AAQIP);
848 ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
849 AAQueryInfo &AAQI);
850 ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
851 AAQueryInfo &AAQI);
852 ModRefInfo getModRefInfo(const VAArgInst *V, const MemoryLocation &Loc,
853 AAQueryInfo &AAQI);
854 ModRefInfo getModRefInfo(const LoadInst *L, const MemoryLocation &Loc,
855 AAQueryInfo &AAQI);
856 ModRefInfo getModRefInfo(const StoreInst *S, const MemoryLocation &Loc,
857 AAQueryInfo &AAQI);
858 ModRefInfo getModRefInfo(const FenceInst *S, const MemoryLocation &Loc,
859 AAQueryInfo &AAQI);
860 ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX,
861 const MemoryLocation &Loc, AAQueryInfo &AAQI);
862 ModRefInfo getModRefInfo(const AtomicRMWInst *RMW, const MemoryLocation &Loc,
863 AAQueryInfo &AAQI);
864 ModRefInfo getModRefInfo(const CatchPadInst *I, const MemoryLocation &Loc,
865 AAQueryInfo &AAQI);
866 ModRefInfo getModRefInfo(const CatchReturnInst *I, const MemoryLocation &Loc,
867 AAQueryInfo &AAQI);
868 ModRefInfo getModRefInfo(const Instruction *I,
869 const Optional<MemoryLocation> &OptLoc,
870 AAQueryInfo &AAQIP);
871 ModRefInfo callCapturesBefore(const Instruction *I,
872 const MemoryLocation &MemLoc, DominatorTree *DT,
873 AAQueryInfo &AAQIP);
874
875 class Concept;
876
877 template <typename T> class Model;
878
879 template <typename T> friend class AAResultBase;
880
881 const TargetLibraryInfo &TLI;
882
883 std::vector<std::unique_ptr<Concept>> AAs;
884
885 std::vector<AnalysisKey *> AADeps;
886
887 friend class BatchAAResults;
888};
889
890/// This class is a wrapper over an AAResults, and it is intended to be used
891/// only when there are no IR changes inbetween queries. BatchAAResults is
892/// reusing the same `AAQueryInfo` to preserve the state across queries,
893/// esentially making AA work in "batch mode". The internal state cannot be
894/// cleared, so to go "out-of-batch-mode", the user must either use AAResults,
895/// or create a new BatchAAResults.
896class BatchAAResults {
897 AAResults &AA;
898 AAQueryInfo AAQI;
899
900public:
901 BatchAAResults(AAResults &AAR) : AA(AAR), AAQI() {}
902 AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
903 return AA.alias(LocA, LocB, AAQI);
904 }
905 bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal = false) {
906 return AA.pointsToConstantMemory(Loc, AAQI, OrLocal);
907 }
908 ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc) {
909 return AA.getModRefInfo(Call, Loc, AAQI);
910 }
911 ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2) {
912 return AA.getModRefInfo(Call1, Call2, AAQI);
913 }
914 ModRefInfo getModRefInfo(const Instruction *I,
915 const Optional<MemoryLocation> &OptLoc) {
916 return AA.getModRefInfo(I, OptLoc, AAQI);
917 }
918 ModRefInfo getModRefInfo(Instruction *I, const CallBase *Call2) {
919 return AA.getModRefInfo(I, Call2, AAQI);
920 }
921 ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) {
922 return AA.getArgModRefInfo(Call, ArgIdx);
923 }
924 FunctionModRefBehavior getModRefBehavior(const CallBase *Call) {
925 return AA.getModRefBehavior(Call);
926 }
927 bool isMustAlias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
928 return alias(LocA, LocB) == AliasResult::MustAlias;
929 }
930 bool isMustAlias(const Value *V1, const Value *V2) {
931 return alias(MemoryLocation(V1, LocationSize::precise(1)),
932 MemoryLocation(V2, LocationSize::precise(1))) ==
933 AliasResult::MustAlias;
934 }
935 ModRefInfo callCapturesBefore(const Instruction *I,
936 const MemoryLocation &MemLoc,
937 DominatorTree *DT) {
938 return AA.callCapturesBefore(I, MemLoc, DT, AAQI);
939 }
940};
941
942/// Temporary typedef for legacy code that uses a generic \c AliasAnalysis
943/// pointer or reference.
944using AliasAnalysis = AAResults;
945
946/// A private abstract base class describing the concept of an individual alias
947/// analysis implementation.
948///
949/// This interface is implemented by any \c Model instantiation. It is also the
950/// interface which a type used to instantiate the model must provide.
951///
952/// All of these methods model methods by the same name in the \c
953/// AAResults class. Only differences and specifics to how the
954/// implementations are called are documented here.
955class AAResults::Concept {
956public:
957 virtual ~Concept() = 0;
958
959 /// An update API used internally by the AAResults to provide
960 /// a handle back to the top level aggregation.
961 virtual void setAAResults(AAResults *NewAAR) = 0;
962
963 //===--------------------------------------------------------------------===//
964 /// \name Alias Queries
965 /// @{
966
967 /// The main low level interface to the alias analysis implementation.
968 /// Returns an AliasResult indicating whether the two pointers are aliased to
969 /// each other. This is the interface that must be implemented by specific
970 /// alias analysis implementations.
971 virtual AliasResult alias(const MemoryLocation &LocA,
972 const MemoryLocation &LocB, AAQueryInfo &AAQI) = 0;
973
974 /// Checks whether the given location points to constant memory, or if
975 /// \p OrLocal is true whether it points to a local alloca.
976 virtual bool pointsToConstantMemory(const MemoryLocation &Loc,
977 AAQueryInfo &AAQI, bool OrLocal) = 0;
978
979 /// @}
980 //===--------------------------------------------------------------------===//
981 /// \name Simple mod/ref information
982 /// @{
983
984 /// Get the ModRef info associated with a pointer argument of a callsite. The
985 /// result's bits are set to indicate the allowed aliasing ModRef kinds. Note
986 /// that these bits do not necessarily account for the overall behavior of
987 /// the function, but rather only provide additional per-argument
988 /// information.
989 virtual ModRefInfo getArgModRefInfo(const CallBase *Call,
990 unsigned ArgIdx) = 0;
991
992 /// Return the behavior of the given call site.
993 virtual FunctionModRefBehavior getModRefBehavior(const CallBase *Call) = 0;
994
995 /// Return the behavior when calling the given function.
996 virtual FunctionModRefBehavior getModRefBehavior(const Function *F) = 0;
997
998 /// getModRefInfo (for call sites) - Return information about whether
999 /// a particular call site modifies or reads the specified memory location.
1000 virtual ModRefInfo getModRefInfo(const CallBase *Call,
1001 const MemoryLocation &Loc,
1002 AAQueryInfo &AAQI) = 0;
1003
1004 /// Return information about whether two call sites may refer to the same set
1005 /// of memory locations. See the AA documentation for details:
1006 /// http://llvm.org/docs/AliasAnalysis.html#ModRefInfo
1007 virtual ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
1008 AAQueryInfo &AAQI) = 0;
1009
1010 /// @}
1011};
1012
1013/// A private class template which derives from \c Concept and wraps some other
1014/// type.
1015///
1016/// This models the concept by directly forwarding each interface point to the
1017/// wrapped type which must implement a compatible interface. This provides
1018/// a type erased binding.
1019template <typename AAResultT> class AAResults::Model final : public Concept {
1020 AAResultT &Result;
1021
1022public:
1023 explicit Model(AAResultT &Result, AAResults &AAR) : Result(Result) {
1024 Result.setAAResults(&AAR);
1025 }
1026 ~Model() override = default;
1027
1028 void setAAResults(AAResults *NewAAR) override { Result.setAAResults(NewAAR); }
1029
1030 AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
1031 AAQueryInfo &AAQI) override {
1032 return Result.alias(LocA, LocB, AAQI);
1033 }
1034
1035 bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI,
1036 bool OrLocal) override {
1037 return Result.pointsToConstantMemory(Loc, AAQI, OrLocal);
1038 }
1039
1040 ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) override {
1041 return Result.getArgModRefInfo(Call, ArgIdx);
1042 }
1043
1044 FunctionModRefBehavior getModRefBehavior(const CallBase *Call) override {
1045 return Result.getModRefBehavior(Call);
1046 }
1047
1048 FunctionModRefBehavior getModRefBehavior(const Function *F) override {
1049 return Result.getModRefBehavior(F);
1050 }
1051
1052 ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
1053 AAQueryInfo &AAQI) override {
1054 return Result.getModRefInfo(Call, Loc, AAQI);
1055 }
1056
1057 ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
1058 AAQueryInfo &AAQI) override {
1059 return Result.getModRefInfo(Call1, Call2, AAQI);
1060 }
1061};
1062
1063/// A CRTP-driven "mixin" base class to help implement the function alias
1064/// analysis results concept.
1065///
1066/// Because of the nature of many alias analysis implementations, they often
1067/// only implement a subset of the interface. This base class will attempt to
1068/// implement the remaining portions of the interface in terms of simpler forms
1069/// of the interface where possible, and otherwise provide conservatively
1070/// correct fallback implementations.
1071///
1072/// Implementors of an alias analysis should derive from this CRTP, and then
1073/// override specific methods that they wish to customize. There is no need to
1074/// use virtual anywhere, the CRTP base class does static dispatch to the
1075/// derived type passed into it.
1076template <typename DerivedT> class AAResultBase {
1077 // Expose some parts of the interface only to the AAResults::Model
1078 // for wrapping. Specifically, this allows the model to call our
1079 // setAAResults method without exposing it as a fully public API.
1080 friend class AAResults::Model<DerivedT>;
1081
1082 /// A pointer to the AAResults object that this AAResult is
1083 /// aggregated within. May be null if not aggregated.
1084 AAResults *AAR = nullptr;
1085
1086 /// Helper to dispatch calls back through the derived type.
1087 DerivedT &derived() { return static_cast<DerivedT &>(*this); }
1088
1089 /// A setter for the AAResults pointer, which is used to satisfy the
1090 /// AAResults::Model contract.
1091 void setAAResults(AAResults *NewAAR) { AAR = NewAAR; }
1092
1093protected:
1094 /// This proxy class models a common pattern where we delegate to either the
1095 /// top-level \c AAResults aggregation if one is registered, or to the
1096 /// current result if none are registered.
1097 class AAResultsProxy {
1098 AAResults *AAR;
1099 DerivedT &CurrentResult;
1100
1101 public:
1102 AAResultsProxy(AAResults *AAR, DerivedT &CurrentResult)
1103 : AAR(AAR), CurrentResult(CurrentResult) {}
1104
1105 AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
1106 AAQueryInfo &AAQI) {
1107 return AAR ? AAR->alias(LocA, LocB, AAQI)
1108 : CurrentResult.alias(LocA, LocB, AAQI);
1109 }
1110
1111 bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI,
1112 bool OrLocal) {
1113 return AAR ? AAR->pointsToConstantMemory(Loc, AAQI, OrLocal)
1114 : CurrentResult.pointsToConstantMemory(Loc, AAQI, OrLocal);
1115 }
1116
1117 ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) {
1118 return AAR ? AAR->getArgModRefInfo(Call, ArgIdx)
1119 : CurrentResult.getArgModRefInfo(Call, ArgIdx);
1120 }
1121
1122 FunctionModRefBehavior getModRefBehavior(const CallBase *Call) {
1123 return AAR ? AAR->getModRefBehavior(Call)
1124 : CurrentResult.getModRefBehavior(Call);
1125 }
1126
1127 FunctionModRefBehavior getModRefBehavior(const Function *F) {
1128 return AAR ? AAR->getModRefBehavior(F) : CurrentResult.getModRefBehavior(F);
1129 }
1130
1131 ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
1132 AAQueryInfo &AAQI) {
1133 return AAR ? AAR->getModRefInfo(Call, Loc, AAQI)
1134 : CurrentResult.getModRefInfo(Call, Loc, AAQI);
1135 }
1136
1137 ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
1138 AAQueryInfo &AAQI) {
1139 return AAR ? AAR->getModRefInfo(Call1, Call2, AAQI)
1140 : CurrentResult.getModRefInfo(Call1, Call2, AAQI);
1141 }
1142 };
1143
1144 explicit AAResultBase() = default;
1145
1146 // Provide all the copy and move constructors so that derived types aren't
1147 // constrained.
1148 AAResultBase(const AAResultBase &Arg) {}
1149 AAResultBase(AAResultBase &&Arg) {}
1150
1151 /// Get a proxy for the best AA result set to query at this time.
1152 ///
1153 /// When this result is part of a larger aggregation, this will proxy to that
1154 /// aggregation. When this result is used in isolation, it will just delegate
1155 /// back to the derived class's implementation.
1156 ///
1157 /// Note that callers of this need to take considerable care to not cause
1158 /// performance problems when they use this routine, in the case of a large
1159 /// number of alias analyses being aggregated, it can be expensive to walk
1160 /// back across the chain.
1161 AAResultsProxy getBestAAResults() { return AAResultsProxy(AAR, derived()); }
1162
1163public:
1164 AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB,
1165 AAQueryInfo &AAQI) {
1166 return AliasResult::MayAlias;
1167 }
1168
1169 bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI,
1170 bool OrLocal) {
1171 return false;
1172 }
1173
1174 ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) {
1175 return ModRefInfo::ModRef;
1176 }
1177
1178 FunctionModRefBehavior getModRefBehavior(const CallBase *Call) {
1179 return FMRB_UnknownModRefBehavior;
1180 }
1181
1182 FunctionModRefBehavior getModRefBehavior(const Function *F) {
1183 return FMRB_UnknownModRefBehavior;
1184 }
1185
1186 ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc,
1187 AAQueryInfo &AAQI) {
1188 return ModRefInfo::ModRef;
1189 }
1190
1191 ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2,
1192 AAQueryInfo &AAQI) {
1193 return ModRefInfo::ModRef;
1194 }
1195};
1196
1197/// Return true if this pointer is returned by a noalias function.
1198bool isNoAliasCall(const Value *V);
1199
1200/// Return true if this pointer refers to a distinct and identifiable object.
1201/// This returns true for:
1202/// Global Variables and Functions (but not Global Aliases)
1203/// Allocas
1204/// ByVal and NoAlias Arguments
1205/// NoAlias returns (e.g. calls to malloc)
1206///
1207bool isIdentifiedObject(const Value *V);
1208
1209/// Return true if V is umabigously identified at the function-level.
1210/// Different IdentifiedFunctionLocals can't alias.
1211/// Further, an IdentifiedFunctionLocal can not alias with any function
1212/// arguments other than itself, which is not necessarily true for
1213/// IdentifiedObjects.
1214bool isIdentifiedFunctionLocal(const Value *V);
1215
1216/// A manager for alias analyses.
1217///
1218/// This class can have analyses registered with it and when run, it will run
1219/// all of them and aggregate their results into single AA results interface
1220/// that dispatches across all of the alias analysis results available.
1221///
1222/// Note that the order in which analyses are registered is very significant.
1223/// That is the order in which the results will be aggregated and queried.
1224///
1225/// This manager effectively wraps the AnalysisManager for registering alias
1226/// analyses. When you register your alias analysis with this manager, it will
1227/// ensure the analysis itself is registered with its AnalysisManager.
1228///
1229/// The result of this analysis is only invalidated if one of the particular
1230/// aggregated AA results end up being invalidated. This removes the need to
1231/// explicitly preserve the results of `AAManager`. Note that analyses should no
1232/// longer be registered once the `AAManager` is run.
1233class AAManager : public AnalysisInfoMixin<AAManager> {
1234public:
1235 using Result = AAResults;
1236
1237 /// Register a specific AA result.
1238 template <typename AnalysisT> void registerFunctionAnalysis() {
1239 ResultGetters.push_back(&getFunctionAAResultImpl<AnalysisT>);
1240 }
1241
1242 /// Register a specific AA result.
1243 template <typename AnalysisT> void registerModuleAnalysis() {
1244 ResultGetters.push_back(&getModuleAAResultImpl<AnalysisT>);
1245 }
1246
1247 Result run(Function &F, FunctionAnalysisManager &AM);
1248
1249private:
1250 friend AnalysisInfoMixin<AAManager>;
1251
1252 static AnalysisKey Key;
1253
1254 SmallVector<void (*)(Function &F, FunctionAnalysisManager &AM,
1255 AAResults &AAResults),
1256 4> ResultGetters;
1257
1258 template <typename AnalysisT>
1259 static void getFunctionAAResultImpl(Function &F,
1260 FunctionAnalysisManager &AM,
1261 AAResults &AAResults) {
1262 AAResults.addAAResult(AM.template getResult<AnalysisT>(F));
1263 AAResults.addAADependencyID(AnalysisT::ID());
1264 }
1265
1266 template <typename AnalysisT>
1267 static void getModuleAAResultImpl(Function &F, FunctionAnalysisManager &AM,
1268 AAResults &AAResults) {
1269 auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
1270 if (auto *R =
1271 MAMProxy.template getCachedResult<AnalysisT>(*F.getParent())) {
1272 AAResults.addAAResult(*R);
1273 MAMProxy
1274 .template registerOuterAnalysisInvalidation<AnalysisT, AAManager>();
1275 }
1276 }
1277};
1278
1279/// A wrapper pass to provide the legacy pass manager access to a suitably
1280/// prepared AAResults object.
1281class AAResultsWrapperPass : public FunctionPass {
1282 std::unique_ptr<AAResults> AAR;
1283
1284public:
1285 static char ID;
1286
1287 AAResultsWrapperPass();
1288
1289 AAResults &getAAResults() { return *AAR; }
1290 const AAResults &getAAResults() const { return *AAR; }
1291
1292 bool runOnFunction(Function &F) override;
1293
1294 void getAnalysisUsage(AnalysisUsage &AU) const override;
1295};
1296
1297/// A wrapper pass for external alias analyses. This just squirrels away the
1298/// callback used to run any analyses and register their results.
1299struct ExternalAAWrapperPass : ImmutablePass {
1300 using CallbackT = std::function<void(Pass &, Function &, AAResults &)>;
1301
1302 CallbackT CB;
1303
1304 static char ID;
1305
1306 ExternalAAWrapperPass();
1307
1308 explicit ExternalAAWrapperPass(CallbackT CB);
1309
1310 void getAnalysisUsage(AnalysisUsage &AU) const override {
1311 AU.setPreservesAll();
1312 }
1313};
1314
1315FunctionPass *createAAResultsWrapperPass();
1316
1317/// A wrapper pass around a callback which can be used to populate the
1318/// AAResults in the AAResultsWrapperPass from an external AA.
1319///
1320/// The callback provided here will be used each time we prepare an AAResults
1321/// object, and will receive a reference to the function wrapper pass, the
1322/// function, and the AAResults object to populate. This should be used when
1323/// setting up a custom pass pipeline to inject a hook into the AA results.
1324ImmutablePass *createExternalAAWrapperPass(
1325 std::function<void(Pass &, Function &, AAResults &)> Callback);
1326
1327/// A helper for the legacy pass manager to create a \c AAResults
1328/// object populated to the best of our ability for a particular function when
1329/// inside of a \c ModulePass or a \c CallGraphSCCPass.
1330///
1331/// If a \c ModulePass or a \c CallGraphSCCPass calls \p
1332/// createLegacyPMAAResults, it also needs to call \p addUsedAAAnalyses in \p
1333/// getAnalysisUsage.
1334AAResults createLegacyPMAAResults(Pass &P, Function &F, BasicAAResult &BAR);
1335
1336/// A helper for the legacy pass manager to populate \p AU to add uses to make
1337/// sure the analyses required by \p createLegacyPMAAResults are available.
1338void getAAResultsAnalysisUsage(AnalysisUsage &AU);
1339
1340} // end namespace llvm
1341
1342#endif // LLVM_ANALYSIS_ALIASANALYSIS_H