File: | lib/Transforms/Scalar/LICM.cpp |
Warning: | line 1164, column 41 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===// | ||||||||
2 | // | ||||||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||||||
6 | // | ||||||||
7 | //===----------------------------------------------------------------------===// | ||||||||
8 | // | ||||||||
9 | // This pass performs loop invariant code motion, attempting to remove as much | ||||||||
10 | // code from the body of a loop as possible. It does this by either hoisting | ||||||||
11 | // code into the preheader block, or by sinking code to the exit blocks if it is | ||||||||
12 | // safe. This pass also promotes must-aliased memory locations in the loop to | ||||||||
13 | // live in registers, thus hoisting and sinking "invariant" loads and stores. | ||||||||
14 | // | ||||||||
15 | // This pass uses alias analysis for two purposes: | ||||||||
16 | // | ||||||||
17 | // 1. Moving loop invariant loads and calls out of loops. If we can determine | ||||||||
18 | // that a load or call inside of a loop never aliases anything stored to, | ||||||||
19 | // we can hoist it or sink it like any other instruction. | ||||||||
20 | // 2. Scalar Promotion of Memory - If there is a store instruction inside of | ||||||||
21 | // the loop, we try to move the store to happen AFTER the loop instead of | ||||||||
22 | // inside of the loop. This can only happen if a few conditions are true: | ||||||||
23 | // A. The pointer stored through is loop invariant | ||||||||
24 | // B. There are no stores or loads in the loop which _may_ alias the | ||||||||
25 | // pointer. There are no calls in the loop which mod/ref the pointer. | ||||||||
26 | // If these conditions are true, we can promote the loads and stores in the | ||||||||
27 | // loop of the pointer to use a temporary alloca'd variable. We then use | ||||||||
28 | // the SSAUpdater to construct the appropriate SSA form for the value. | ||||||||
29 | // | ||||||||
30 | //===----------------------------------------------------------------------===// | ||||||||
31 | |||||||||
32 | #include "llvm/Transforms/Scalar/LICM.h" | ||||||||
33 | #include "llvm/ADT/SetOperations.h" | ||||||||
34 | #include "llvm/ADT/Statistic.h" | ||||||||
35 | #include "llvm/Analysis/AliasAnalysis.h" | ||||||||
36 | #include "llvm/Analysis/AliasSetTracker.h" | ||||||||
37 | #include "llvm/Analysis/BasicAliasAnalysis.h" | ||||||||
38 | #include "llvm/Analysis/CaptureTracking.h" | ||||||||
39 | #include "llvm/Analysis/ConstantFolding.h" | ||||||||
40 | #include "llvm/Analysis/GlobalsModRef.h" | ||||||||
41 | #include "llvm/Analysis/GuardUtils.h" | ||||||||
42 | #include "llvm/Analysis/Loads.h" | ||||||||
43 | #include "llvm/Analysis/LoopInfo.h" | ||||||||
44 | #include "llvm/Analysis/LoopIterator.h" | ||||||||
45 | #include "llvm/Analysis/LoopPass.h" | ||||||||
46 | #include "llvm/Analysis/MemoryBuiltins.h" | ||||||||
47 | #include "llvm/Analysis/MemorySSA.h" | ||||||||
48 | #include "llvm/Analysis/MemorySSAUpdater.h" | ||||||||
49 | #include "llvm/Analysis/OptimizationRemarkEmitter.h" | ||||||||
50 | #include "llvm/Analysis/ScalarEvolution.h" | ||||||||
51 | #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" | ||||||||
52 | #include "llvm/Analysis/TargetLibraryInfo.h" | ||||||||
53 | #include "llvm/Analysis/ValueTracking.h" | ||||||||
54 | #include "llvm/IR/CFG.h" | ||||||||
55 | #include "llvm/IR/Constants.h" | ||||||||
56 | #include "llvm/IR/DataLayout.h" | ||||||||
57 | #include "llvm/IR/DebugInfoMetadata.h" | ||||||||
58 | #include "llvm/IR/DerivedTypes.h" | ||||||||
59 | #include "llvm/IR/Dominators.h" | ||||||||
60 | #include "llvm/IR/Instructions.h" | ||||||||
61 | #include "llvm/IR/IntrinsicInst.h" | ||||||||
62 | #include "llvm/IR/LLVMContext.h" | ||||||||
63 | #include "llvm/IR/Metadata.h" | ||||||||
64 | #include "llvm/IR/PatternMatch.h" | ||||||||
65 | #include "llvm/IR/PredIteratorCache.h" | ||||||||
66 | #include "llvm/Support/CommandLine.h" | ||||||||
67 | #include "llvm/Support/Debug.h" | ||||||||
68 | #include "llvm/Support/raw_ostream.h" | ||||||||
69 | #include "llvm/Transforms/Scalar.h" | ||||||||
70 | #include "llvm/Transforms/Scalar/LoopPassManager.h" | ||||||||
71 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" | ||||||||
72 | #include "llvm/Transforms/Utils/Local.h" | ||||||||
73 | #include "llvm/Transforms/Utils/LoopUtils.h" | ||||||||
74 | #include "llvm/Transforms/Utils/SSAUpdater.h" | ||||||||
75 | #include <algorithm> | ||||||||
76 | #include <utility> | ||||||||
77 | using namespace llvm; | ||||||||
78 | |||||||||
79 | #define DEBUG_TYPE"licm" "licm" | ||||||||
80 | |||||||||
81 | STATISTIC(NumCreatedBlocks, "Number of blocks created")static llvm::Statistic NumCreatedBlocks = {"licm", "NumCreatedBlocks" , "Number of blocks created"}; | ||||||||
82 | STATISTIC(NumClonedBranches, "Number of branches cloned")static llvm::Statistic NumClonedBranches = {"licm", "NumClonedBranches" , "Number of branches cloned"}; | ||||||||
83 | STATISTIC(NumSunk, "Number of instructions sunk out of loop")static llvm::Statistic NumSunk = {"licm", "NumSunk", "Number of instructions sunk out of loop" }; | ||||||||
84 | STATISTIC(NumHoisted, "Number of instructions hoisted out of loop")static llvm::Statistic NumHoisted = {"licm", "NumHoisted", "Number of instructions hoisted out of loop" }; | ||||||||
85 | STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk")static llvm::Statistic NumMovedLoads = {"licm", "NumMovedLoads" , "Number of load insts hoisted or sunk"}; | ||||||||
86 | STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk")static llvm::Statistic NumMovedCalls = {"licm", "NumMovedCalls" , "Number of call insts hoisted or sunk"}; | ||||||||
87 | STATISTIC(NumPromoted, "Number of memory locations promoted to registers")static llvm::Statistic NumPromoted = {"licm", "NumPromoted", "Number of memory locations promoted to registers" }; | ||||||||
88 | |||||||||
89 | /// Memory promotion is enabled by default. | ||||||||
90 | static cl::opt<bool> | ||||||||
91 | DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false), | ||||||||
92 | cl::desc("Disable memory promotion in LICM pass")); | ||||||||
93 | |||||||||
94 | static cl::opt<bool> ControlFlowHoisting( | ||||||||
95 | "licm-control-flow-hoisting", cl::Hidden, cl::init(false), | ||||||||
96 | cl::desc("Enable control flow (and PHI) hoisting in LICM")); | ||||||||
97 | |||||||||
98 | static cl::opt<uint32_t> MaxNumUsesTraversed( | ||||||||
99 | "licm-max-num-uses-traversed", cl::Hidden, cl::init(8), | ||||||||
100 | cl::desc("Max num uses visited for identifying load " | ||||||||
101 | "invariance in loop using invariant start (default = 8)")); | ||||||||
102 | |||||||||
103 | // Default value of zero implies we use the regular alias set tracker mechanism | ||||||||
104 | // instead of the cross product using AA to identify aliasing of the memory | ||||||||
105 | // location we are interested in. | ||||||||
106 | static cl::opt<int> | ||||||||
107 | LICMN2Theshold("licm-n2-threshold", cl::Hidden, cl::init(0), | ||||||||
108 | cl::desc("How many instruction to cross product using AA")); | ||||||||
109 | |||||||||
110 | // Experimental option to allow imprecision in LICM in pathological cases, in | ||||||||
111 | // exchange for faster compile. This is to be removed if MemorySSA starts to | ||||||||
112 | // address the same issue. This flag applies only when LICM uses MemorySSA | ||||||||
113 | // instead on AliasSetTracker. LICM calls MemorySSAWalker's | ||||||||
114 | // getClobberingMemoryAccess, up to the value of the Cap, getting perfect | ||||||||
115 | // accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess, | ||||||||
116 | // which may not be precise, since optimizeUses is capped. The result is | ||||||||
117 | // correct, but we may not get as "far up" as possible to get which access is | ||||||||
118 | // clobbering the one queried. | ||||||||
119 | cl::opt<unsigned> llvm::SetLicmMssaOptCap( | ||||||||
120 | "licm-mssa-optimization-cap", cl::init(100), cl::Hidden, | ||||||||
121 | cl::desc("Enable imprecision in LICM in pathological cases, in exchange " | ||||||||
122 | "for faster compile. Caps the MemorySSA clobbering calls.")); | ||||||||
123 | |||||||||
124 | // Experimentally, memory promotion carries less importance than sinking and | ||||||||
125 | // hoisting. Limit when we do promotion when using MemorySSA, in order to save | ||||||||
126 | // compile time. | ||||||||
127 | cl::opt<unsigned> llvm::SetLicmMssaNoAccForPromotionCap( | ||||||||
128 | "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden, | ||||||||
129 | cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no " | ||||||||
130 | "effect. When MSSA in LICM is enabled, then this is the maximum " | ||||||||
131 | "number of accesses allowed to be present in a loop in order to " | ||||||||
132 | "enable memory promotion.")); | ||||||||
133 | |||||||||
134 | static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI); | ||||||||
135 | static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop, | ||||||||
136 | const LoopSafetyInfo *SafetyInfo, | ||||||||
137 | TargetTransformInfo *TTI, bool &FreeInLoop); | ||||||||
138 | static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, | ||||||||
139 | BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo, | ||||||||
140 | MemorySSAUpdater *MSSAU, OptimizationRemarkEmitter *ORE); | ||||||||
141 | static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT, | ||||||||
142 | const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo, | ||||||||
143 | MemorySSAUpdater *MSSAU, OptimizationRemarkEmitter *ORE); | ||||||||
144 | static bool isSafeToExecuteUnconditionally(Instruction &Inst, | ||||||||
145 | const DominatorTree *DT, | ||||||||
146 | const Loop *CurLoop, | ||||||||
147 | const LoopSafetyInfo *SafetyInfo, | ||||||||
148 | OptimizationRemarkEmitter *ORE, | ||||||||
149 | const Instruction *CtxI = nullptr); | ||||||||
150 | static bool pointerInvalidatedByLoop(MemoryLocation MemLoc, | ||||||||
151 | AliasSetTracker *CurAST, Loop *CurLoop, | ||||||||
152 | AliasAnalysis *AA); | ||||||||
153 | static bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU, | ||||||||
154 | Loop *CurLoop, | ||||||||
155 | SinkAndHoistLICMFlags &Flags); | ||||||||
156 | static Instruction *CloneInstructionInExitBlock( | ||||||||
157 | Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI, | ||||||||
158 | const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU); | ||||||||
159 | |||||||||
160 | static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo, | ||||||||
161 | AliasSetTracker *AST, MemorySSAUpdater *MSSAU); | ||||||||
162 | |||||||||
163 | static void moveInstructionBefore(Instruction &I, Instruction &Dest, | ||||||||
164 | ICFLoopSafetyInfo &SafetyInfo, | ||||||||
165 | MemorySSAUpdater *MSSAU); | ||||||||
166 | |||||||||
167 | namespace { | ||||||||
168 | struct LoopInvariantCodeMotion { | ||||||||
169 | using ASTrackerMapTy = DenseMap<Loop *, std::unique_ptr<AliasSetTracker>>; | ||||||||
170 | bool runOnLoop(Loop *L, AliasAnalysis *AA, LoopInfo *LI, DominatorTree *DT, | ||||||||
171 | TargetLibraryInfo *TLI, TargetTransformInfo *TTI, | ||||||||
172 | ScalarEvolution *SE, MemorySSA *MSSA, | ||||||||
173 | OptimizationRemarkEmitter *ORE, bool DeleteAST); | ||||||||
174 | |||||||||
175 | ASTrackerMapTy &getLoopToAliasSetMap() { return LoopToAliasSetMap; } | ||||||||
176 | LoopInvariantCodeMotion(unsigned LicmMssaOptCap, | ||||||||
177 | unsigned LicmMssaNoAccForPromotionCap) | ||||||||
178 | : LicmMssaOptCap(LicmMssaOptCap), | ||||||||
179 | LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap) {} | ||||||||
180 | |||||||||
181 | private: | ||||||||
182 | ASTrackerMapTy LoopToAliasSetMap; | ||||||||
183 | unsigned LicmMssaOptCap; | ||||||||
184 | unsigned LicmMssaNoAccForPromotionCap; | ||||||||
185 | |||||||||
186 | std::unique_ptr<AliasSetTracker> | ||||||||
187 | collectAliasInfoForLoop(Loop *L, LoopInfo *LI, AliasAnalysis *AA); | ||||||||
188 | std::unique_ptr<AliasSetTracker> | ||||||||
189 | collectAliasInfoForLoopWithMSSA(Loop *L, AliasAnalysis *AA, | ||||||||
190 | MemorySSAUpdater *MSSAU); | ||||||||
191 | }; | ||||||||
192 | |||||||||
193 | struct LegacyLICMPass : public LoopPass { | ||||||||
194 | static char ID; // Pass identification, replacement for typeid | ||||||||
195 | LegacyLICMPass( | ||||||||
196 | unsigned LicmMssaOptCap = SetLicmMssaOptCap, | ||||||||
197 | unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap) | ||||||||
198 | : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap) { | ||||||||
199 | initializeLegacyLICMPassPass(*PassRegistry::getPassRegistry()); | ||||||||
200 | } | ||||||||
201 | |||||||||
202 | bool runOnLoop(Loop *L, LPPassManager &LPM) override { | ||||||||
203 | if (skipLoop(L)) { | ||||||||
204 | // If we have run LICM on a previous loop but now we are skipping | ||||||||
205 | // (because we've hit the opt-bisect limit), we need to clear the | ||||||||
206 | // loop alias information. | ||||||||
207 | LICM.getLoopToAliasSetMap().clear(); | ||||||||
208 | return false; | ||||||||
209 | } | ||||||||
210 | |||||||||
211 | auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>(); | ||||||||
212 | MemorySSA *MSSA = EnableMSSALoopDependency | ||||||||
213 | ? (&getAnalysis<MemorySSAWrapperPass>().getMSSA()) | ||||||||
214 | : nullptr; | ||||||||
215 | // For the old PM, we can't use OptimizationRemarkEmitter as an analysis | ||||||||
216 | // pass. Function analyses need to be preserved across loop transformations | ||||||||
217 | // but ORE cannot be preserved (see comment before the pass definition). | ||||||||
218 | OptimizationRemarkEmitter ORE(L->getHeader()->getParent()); | ||||||||
219 | return LICM.runOnLoop(L, | ||||||||
220 | &getAnalysis<AAResultsWrapperPass>().getAAResults(), | ||||||||
221 | &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(), | ||||||||
222 | &getAnalysis<DominatorTreeWrapperPass>().getDomTree(), | ||||||||
223 | &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI( | ||||||||
224 | *L->getHeader()->getParent()), | ||||||||
225 | &getAnalysis<TargetTransformInfoWrapperPass>().getTTI( | ||||||||
226 | *L->getHeader()->getParent()), | ||||||||
227 | SE ? &SE->getSE() : nullptr, MSSA, &ORE, false); | ||||||||
228 | } | ||||||||
229 | |||||||||
230 | /// This transformation requires natural loop information & requires that | ||||||||
231 | /// loop preheaders be inserted into the CFG... | ||||||||
232 | /// | ||||||||
233 | void getAnalysisUsage(AnalysisUsage &AU) const override { | ||||||||
234 | AU.addPreserved<DominatorTreeWrapperPass>(); | ||||||||
235 | AU.addPreserved<LoopInfoWrapperPass>(); | ||||||||
236 | AU.addRequired<TargetLibraryInfoWrapperPass>(); | ||||||||
237 | if (EnableMSSALoopDependency) { | ||||||||
238 | AU.addRequired<MemorySSAWrapperPass>(); | ||||||||
239 | AU.addPreserved<MemorySSAWrapperPass>(); | ||||||||
240 | } | ||||||||
241 | AU.addRequired<TargetTransformInfoWrapperPass>(); | ||||||||
242 | getLoopAnalysisUsage(AU); | ||||||||
243 | } | ||||||||
244 | |||||||||
245 | using llvm::Pass::doFinalization; | ||||||||
246 | |||||||||
247 | bool doFinalization() override { | ||||||||
248 | auto &AliasSetMap = LICM.getLoopToAliasSetMap(); | ||||||||
249 | // All loops in the AliasSetMap should be cleaned up already. The only case | ||||||||
250 | // where we fail to do so is if an outer loop gets deleted before LICM | ||||||||
251 | // visits it. | ||||||||
252 | assert(all_of(AliasSetMap,((all_of(AliasSetMap, [](LoopInvariantCodeMotion::ASTrackerMapTy ::value_type &KV) { return !KV.first->getParentLoop(); }) && "Didn't free loop alias sets") ? static_cast< void> (0) : __assert_fail ("all_of(AliasSetMap, [](LoopInvariantCodeMotion::ASTrackerMapTy::value_type &KV) { return !KV.first->getParentLoop(); }) && \"Didn't free loop alias sets\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 256, __PRETTY_FUNCTION__)) | ||||||||
253 | [](LoopInvariantCodeMotion::ASTrackerMapTy::value_type &KV) {((all_of(AliasSetMap, [](LoopInvariantCodeMotion::ASTrackerMapTy ::value_type &KV) { return !KV.first->getParentLoop(); }) && "Didn't free loop alias sets") ? static_cast< void> (0) : __assert_fail ("all_of(AliasSetMap, [](LoopInvariantCodeMotion::ASTrackerMapTy::value_type &KV) { return !KV.first->getParentLoop(); }) && \"Didn't free loop alias sets\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 256, __PRETTY_FUNCTION__)) | ||||||||
254 | return !KV.first->getParentLoop();((all_of(AliasSetMap, [](LoopInvariantCodeMotion::ASTrackerMapTy ::value_type &KV) { return !KV.first->getParentLoop(); }) && "Didn't free loop alias sets") ? static_cast< void> (0) : __assert_fail ("all_of(AliasSetMap, [](LoopInvariantCodeMotion::ASTrackerMapTy::value_type &KV) { return !KV.first->getParentLoop(); }) && \"Didn't free loop alias sets\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 256, __PRETTY_FUNCTION__)) | ||||||||
255 | }) &&((all_of(AliasSetMap, [](LoopInvariantCodeMotion::ASTrackerMapTy ::value_type &KV) { return !KV.first->getParentLoop(); }) && "Didn't free loop alias sets") ? static_cast< void> (0) : __assert_fail ("all_of(AliasSetMap, [](LoopInvariantCodeMotion::ASTrackerMapTy::value_type &KV) { return !KV.first->getParentLoop(); }) && \"Didn't free loop alias sets\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 256, __PRETTY_FUNCTION__)) | ||||||||
256 | "Didn't free loop alias sets")((all_of(AliasSetMap, [](LoopInvariantCodeMotion::ASTrackerMapTy ::value_type &KV) { return !KV.first->getParentLoop(); }) && "Didn't free loop alias sets") ? static_cast< void> (0) : __assert_fail ("all_of(AliasSetMap, [](LoopInvariantCodeMotion::ASTrackerMapTy::value_type &KV) { return !KV.first->getParentLoop(); }) && \"Didn't free loop alias sets\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 256, __PRETTY_FUNCTION__)); | ||||||||
257 | AliasSetMap.clear(); | ||||||||
258 | return false; | ||||||||
259 | } | ||||||||
260 | |||||||||
261 | private: | ||||||||
262 | LoopInvariantCodeMotion LICM; | ||||||||
263 | |||||||||
264 | /// cloneBasicBlockAnalysis - Simple Analysis hook. Clone alias set info. | ||||||||
265 | void cloneBasicBlockAnalysis(BasicBlock *From, BasicBlock *To, | ||||||||
266 | Loop *L) override; | ||||||||
267 | |||||||||
268 | /// deleteAnalysisValue - Simple Analysis hook. Delete value V from alias | ||||||||
269 | /// set. | ||||||||
270 | void deleteAnalysisValue(Value *V, Loop *L) override; | ||||||||
271 | |||||||||
272 | /// Simple Analysis hook. Delete loop L from alias set map. | ||||||||
273 | void deleteAnalysisLoop(Loop *L) override; | ||||||||
274 | }; | ||||||||
275 | } // namespace | ||||||||
276 | |||||||||
277 | PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM, | ||||||||
278 | LoopStandardAnalysisResults &AR, LPMUpdater &) { | ||||||||
279 | const auto &FAM = | ||||||||
280 | AM.getResult<FunctionAnalysisManagerLoopProxy>(L, AR).getManager(); | ||||||||
281 | Function *F = L.getHeader()->getParent(); | ||||||||
282 | |||||||||
283 | auto *ORE = FAM.getCachedResult<OptimizationRemarkEmitterAnalysis>(*F); | ||||||||
284 | // FIXME: This should probably be optional rather than required. | ||||||||
285 | if (!ORE) | ||||||||
286 | report_fatal_error("LICM: OptimizationRemarkEmitterAnalysis not " | ||||||||
287 | "cached at a higher level"); | ||||||||
288 | |||||||||
289 | LoopInvariantCodeMotion LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap); | ||||||||
290 | if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, &AR.TLI, &AR.TTI, &AR.SE, | ||||||||
291 | AR.MSSA, ORE, true)) | ||||||||
292 | return PreservedAnalyses::all(); | ||||||||
293 | |||||||||
294 | auto PA = getLoopPassPreservedAnalyses(); | ||||||||
295 | |||||||||
296 | PA.preserve<DominatorTreeAnalysis>(); | ||||||||
297 | PA.preserve<LoopAnalysis>(); | ||||||||
298 | if (AR.MSSA) | ||||||||
299 | PA.preserve<MemorySSAAnalysis>(); | ||||||||
300 | |||||||||
301 | return PA; | ||||||||
302 | } | ||||||||
303 | |||||||||
304 | char LegacyLICMPass::ID = 0; | ||||||||
305 | INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion",static void *initializeLegacyLICMPassPassOnce(PassRegistry & Registry) { | ||||||||
306 | false, false)static void *initializeLegacyLICMPassPassOnce(PassRegistry & Registry) { | ||||||||
307 | INITIALIZE_PASS_DEPENDENCY(LoopPass)initializeLoopPassPass(Registry); | ||||||||
308 | INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry); | ||||||||
309 | INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)initializeTargetTransformInfoWrapperPassPass(Registry); | ||||||||
310 | INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)initializeMemorySSAWrapperPassPass(Registry); | ||||||||
311 | INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false,PassInfo *PI = new PassInfo( "Loop Invariant Code Motion", "licm" , &LegacyLICMPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <LegacyLICMPass>), false, false); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeLegacyLICMPassPassFlag ; void llvm::initializeLegacyLICMPassPass(PassRegistry &Registry ) { llvm::call_once(InitializeLegacyLICMPassPassFlag, initializeLegacyLICMPassPassOnce , std::ref(Registry)); } | ||||||||
312 | false)PassInfo *PI = new PassInfo( "Loop Invariant Code Motion", "licm" , &LegacyLICMPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <LegacyLICMPass>), false, false); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeLegacyLICMPassPassFlag ; void llvm::initializeLegacyLICMPassPass(PassRegistry &Registry ) { llvm::call_once(InitializeLegacyLICMPassPassFlag, initializeLegacyLICMPassPassOnce , std::ref(Registry)); } | ||||||||
313 | |||||||||
314 | Pass *llvm::createLICMPass() { return new LegacyLICMPass(); } | ||||||||
315 | Pass *llvm::createLICMPass(unsigned LicmMssaOptCap, | ||||||||
316 | unsigned LicmMssaNoAccForPromotionCap) { | ||||||||
317 | return new LegacyLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap); | ||||||||
318 | } | ||||||||
319 | |||||||||
320 | /// Hoist expressions out of the specified loop. Note, alias info for inner | ||||||||
321 | /// loop is not preserved so it is not a good idea to run LICM multiple | ||||||||
322 | /// times on one loop. | ||||||||
323 | /// We should delete AST for inner loops in the new pass manager to avoid | ||||||||
324 | /// memory leak. | ||||||||
325 | /// | ||||||||
326 | bool LoopInvariantCodeMotion::runOnLoop( | ||||||||
327 | Loop *L, AliasAnalysis *AA, LoopInfo *LI, DominatorTree *DT, | ||||||||
328 | TargetLibraryInfo *TLI, TargetTransformInfo *TTI, ScalarEvolution *SE, | ||||||||
329 | MemorySSA *MSSA, OptimizationRemarkEmitter *ORE, bool DeleteAST) { | ||||||||
330 | bool Changed = false; | ||||||||
331 | |||||||||
332 | assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.")((L->isLCSSAForm(*DT) && "Loop is not in LCSSA form." ) ? static_cast<void> (0) : __assert_fail ("L->isLCSSAForm(*DT) && \"Loop is not in LCSSA form.\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 332, __PRETTY_FUNCTION__)); | ||||||||
333 | |||||||||
334 | // If this loop has metadata indicating that LICM is not to be performed then | ||||||||
335 | // just exit. | ||||||||
336 | if (hasDisableLICMTransformsHint(L)) { | ||||||||
337 | return false; | ||||||||
338 | } | ||||||||
339 | |||||||||
340 | std::unique_ptr<AliasSetTracker> CurAST; | ||||||||
341 | std::unique_ptr<MemorySSAUpdater> MSSAU; | ||||||||
342 | bool NoOfMemAccTooLarge = false; | ||||||||
343 | unsigned LicmMssaOptCounter = 0; | ||||||||
344 | |||||||||
345 | if (!MSSA) { | ||||||||
346 | LLVM_DEBUG(dbgs() << "LICM: Using Alias Set Tracker.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM: Using Alias Set Tracker.\n" ; } } while (false); | ||||||||
347 | CurAST = collectAliasInfoForLoop(L, LI, AA); | ||||||||
348 | } else { | ||||||||
349 | LLVM_DEBUG(dbgs() << "LICM: Using MemorySSA.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM: Using MemorySSA.\n"; } } while (false); | ||||||||
350 | MSSAU = std::make_unique<MemorySSAUpdater>(MSSA); | ||||||||
351 | |||||||||
352 | unsigned AccessCapCount = 0; | ||||||||
353 | for (auto *BB : L->getBlocks()) { | ||||||||
354 | if (auto *Accesses = MSSA->getBlockAccesses(BB)) { | ||||||||
355 | for (const auto &MA : *Accesses) { | ||||||||
356 | (void)MA; | ||||||||
357 | AccessCapCount++; | ||||||||
358 | if (AccessCapCount > LicmMssaNoAccForPromotionCap) { | ||||||||
359 | NoOfMemAccTooLarge = true; | ||||||||
360 | break; | ||||||||
361 | } | ||||||||
362 | } | ||||||||
363 | } | ||||||||
364 | if (NoOfMemAccTooLarge) | ||||||||
365 | break; | ||||||||
366 | } | ||||||||
367 | } | ||||||||
368 | |||||||||
369 | // Get the preheader block to move instructions into... | ||||||||
370 | BasicBlock *Preheader = L->getLoopPreheader(); | ||||||||
371 | |||||||||
372 | // Compute loop safety information. | ||||||||
373 | ICFLoopSafetyInfo SafetyInfo(DT); | ||||||||
374 | SafetyInfo.computeLoopSafetyInfo(L); | ||||||||
375 | |||||||||
376 | // We want to visit all of the instructions in this loop... that are not parts | ||||||||
377 | // of our subloops (they have already had their invariants hoisted out of | ||||||||
378 | // their loop, into this loop, so there is no need to process the BODIES of | ||||||||
379 | // the subloops). | ||||||||
380 | // | ||||||||
381 | // Traverse the body of the loop in depth first order on the dominator tree so | ||||||||
382 | // that we are guaranteed to see definitions before we see uses. This allows | ||||||||
383 | // us to sink instructions in one pass, without iteration. After sinking | ||||||||
384 | // instructions, we perform another pass to hoist them out of the loop. | ||||||||
385 | SinkAndHoistLICMFlags Flags = {NoOfMemAccTooLarge, LicmMssaOptCounter, | ||||||||
386 | LicmMssaOptCap, LicmMssaNoAccForPromotionCap, | ||||||||
387 | /*IsSink=*/true}; | ||||||||
388 | if (L->hasDedicatedExits()) | ||||||||
389 | Changed |= sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, TTI, L, | ||||||||
390 | CurAST.get(), MSSAU.get(), &SafetyInfo, Flags, ORE); | ||||||||
391 | Flags.IsSink = false; | ||||||||
392 | if (Preheader) | ||||||||
393 | Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, L, | ||||||||
394 | CurAST.get(), MSSAU.get(), &SafetyInfo, Flags, ORE); | ||||||||
395 | |||||||||
396 | // Now that all loop invariants have been removed from the loop, promote any | ||||||||
397 | // memory references to scalars that we can. | ||||||||
398 | // Don't sink stores from loops without dedicated block exits. Exits | ||||||||
399 | // containing indirect branches are not transformed by loop simplify, | ||||||||
400 | // make sure we catch that. An additional load may be generated in the | ||||||||
401 | // preheader for SSA updater, so also avoid sinking when no preheader | ||||||||
402 | // is available. | ||||||||
403 | if (!DisablePromotion && Preheader && L->hasDedicatedExits() && | ||||||||
404 | !NoOfMemAccTooLarge) { | ||||||||
405 | // Figure out the loop exits and their insertion points | ||||||||
406 | SmallVector<BasicBlock *, 8> ExitBlocks; | ||||||||
407 | L->getUniqueExitBlocks(ExitBlocks); | ||||||||
408 | |||||||||
409 | // We can't insert into a catchswitch. | ||||||||
410 | bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) { | ||||||||
411 | return isa<CatchSwitchInst>(Exit->getTerminator()); | ||||||||
412 | }); | ||||||||
413 | |||||||||
414 | if (!HasCatchSwitch) { | ||||||||
415 | SmallVector<Instruction *, 8> InsertPts; | ||||||||
416 | SmallVector<MemoryAccess *, 8> MSSAInsertPts; | ||||||||
417 | InsertPts.reserve(ExitBlocks.size()); | ||||||||
418 | if (MSSAU) | ||||||||
419 | MSSAInsertPts.reserve(ExitBlocks.size()); | ||||||||
420 | for (BasicBlock *ExitBlock : ExitBlocks) { | ||||||||
421 | InsertPts.push_back(&*ExitBlock->getFirstInsertionPt()); | ||||||||
422 | if (MSSAU) | ||||||||
423 | MSSAInsertPts.push_back(nullptr); | ||||||||
424 | } | ||||||||
425 | |||||||||
426 | PredIteratorCache PIC; | ||||||||
427 | |||||||||
428 | bool Promoted = false; | ||||||||
429 | |||||||||
430 | // Build an AST using MSSA. | ||||||||
431 | if (!CurAST.get()) | ||||||||
432 | CurAST = collectAliasInfoForLoopWithMSSA(L, AA, MSSAU.get()); | ||||||||
433 | |||||||||
434 | // Loop over all of the alias sets in the tracker object. | ||||||||
435 | for (AliasSet &AS : *CurAST) { | ||||||||
436 | // We can promote this alias set if it has a store, if it is a "Must" | ||||||||
437 | // alias set, if the pointer is loop invariant, and if we are not | ||||||||
438 | // eliminating any volatile loads or stores. | ||||||||
439 | if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() || | ||||||||
440 | !L->isLoopInvariant(AS.begin()->getValue())) | ||||||||
441 | continue; | ||||||||
442 | |||||||||
443 | assert(((!AS.empty() && "Must alias set should have at least one pointer element in it!" ) ? static_cast<void> (0) : __assert_fail ("!AS.empty() && \"Must alias set should have at least one pointer element in it!\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 445, __PRETTY_FUNCTION__)) | ||||||||
444 | !AS.empty() &&((!AS.empty() && "Must alias set should have at least one pointer element in it!" ) ? static_cast<void> (0) : __assert_fail ("!AS.empty() && \"Must alias set should have at least one pointer element in it!\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 445, __PRETTY_FUNCTION__)) | ||||||||
445 | "Must alias set should have at least one pointer element in it!")((!AS.empty() && "Must alias set should have at least one pointer element in it!" ) ? static_cast<void> (0) : __assert_fail ("!AS.empty() && \"Must alias set should have at least one pointer element in it!\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 445, __PRETTY_FUNCTION__)); | ||||||||
446 | |||||||||
447 | SmallSetVector<Value *, 8> PointerMustAliases; | ||||||||
448 | for (const auto &ASI : AS) | ||||||||
449 | PointerMustAliases.insert(ASI.getValue()); | ||||||||
450 | |||||||||
451 | Promoted |= promoteLoopAccessesToScalars( | ||||||||
452 | PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, LI, | ||||||||
453 | DT, TLI, L, CurAST.get(), MSSAU.get(), &SafetyInfo, ORE); | ||||||||
454 | } | ||||||||
455 | |||||||||
456 | // Once we have promoted values across the loop body we have to | ||||||||
457 | // recursively reform LCSSA as any nested loop may now have values defined | ||||||||
458 | // within the loop used in the outer loop. | ||||||||
459 | // FIXME: This is really heavy handed. It would be a bit better to use an | ||||||||
460 | // SSAUpdater strategy during promotion that was LCSSA aware and reformed | ||||||||
461 | // it as it went. | ||||||||
462 | if (Promoted) | ||||||||
463 | formLCSSARecursively(*L, *DT, LI, SE); | ||||||||
464 | |||||||||
465 | Changed |= Promoted; | ||||||||
466 | } | ||||||||
467 | } | ||||||||
468 | |||||||||
469 | // Check that neither this loop nor its parent have had LCSSA broken. LICM is | ||||||||
470 | // specifically moving instructions across the loop boundary and so it is | ||||||||
471 | // especially in need of sanity checking here. | ||||||||
472 | assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!")((L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!" ) ? static_cast<void> (0) : __assert_fail ("L->isLCSSAForm(*DT) && \"Loop not left in LCSSA form after LICM!\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 472, __PRETTY_FUNCTION__)); | ||||||||
473 | assert((!L->getParentLoop() || L->getParentLoop()->isLCSSAForm(*DT)) &&(((!L->getParentLoop() || L->getParentLoop()->isLCSSAForm (*DT)) && "Parent loop not left in LCSSA form after LICM!" ) ? static_cast<void> (0) : __assert_fail ("(!L->getParentLoop() || L->getParentLoop()->isLCSSAForm(*DT)) && \"Parent loop not left in LCSSA form after LICM!\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 474, __PRETTY_FUNCTION__)) | ||||||||
474 | "Parent loop not left in LCSSA form after LICM!")(((!L->getParentLoop() || L->getParentLoop()->isLCSSAForm (*DT)) && "Parent loop not left in LCSSA form after LICM!" ) ? static_cast<void> (0) : __assert_fail ("(!L->getParentLoop() || L->getParentLoop()->isLCSSAForm(*DT)) && \"Parent loop not left in LCSSA form after LICM!\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 474, __PRETTY_FUNCTION__)); | ||||||||
475 | |||||||||
476 | // If this loop is nested inside of another one, save the alias information | ||||||||
477 | // for when we process the outer loop. | ||||||||
478 | if (!MSSAU.get() && CurAST.get() && L->getParentLoop() && !DeleteAST) | ||||||||
479 | LoopToAliasSetMap[L] = std::move(CurAST); | ||||||||
480 | |||||||||
481 | if (MSSAU.get() && VerifyMemorySSA) | ||||||||
482 | MSSAU->getMemorySSA()->verifyMemorySSA(); | ||||||||
483 | |||||||||
484 | if (Changed && SE) | ||||||||
485 | SE->forgetLoopDispositions(L); | ||||||||
486 | return Changed; | ||||||||
487 | } | ||||||||
488 | |||||||||
489 | /// Walk the specified region of the CFG (defined by all blocks dominated by | ||||||||
490 | /// the specified block, and that are in the current loop) in reverse depth | ||||||||
491 | /// first order w.r.t the DominatorTree. This allows us to visit uses before | ||||||||
492 | /// definitions, allowing us to sink a loop body in one pass without iteration. | ||||||||
493 | /// | ||||||||
494 | bool llvm::sinkRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI, | ||||||||
495 | DominatorTree *DT, TargetLibraryInfo *TLI, | ||||||||
496 | TargetTransformInfo *TTI, Loop *CurLoop, | ||||||||
497 | AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU, | ||||||||
498 | ICFLoopSafetyInfo *SafetyInfo, | ||||||||
499 | SinkAndHoistLICMFlags &Flags, | ||||||||
500 | OptimizationRemarkEmitter *ORE) { | ||||||||
501 | |||||||||
502 | // Verify inputs. | ||||||||
503 | assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&((N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && "Unexpected input to sinkRegion." ) ? static_cast<void> (0) : __assert_fail ("N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && \"Unexpected input to sinkRegion.\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 505, __PRETTY_FUNCTION__)) | ||||||||
504 | CurLoop != nullptr && SafetyInfo != nullptr &&((N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && "Unexpected input to sinkRegion." ) ? static_cast<void> (0) : __assert_fail ("N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && \"Unexpected input to sinkRegion.\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 505, __PRETTY_FUNCTION__)) | ||||||||
505 | "Unexpected input to sinkRegion.")((N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && "Unexpected input to sinkRegion." ) ? static_cast<void> (0) : __assert_fail ("N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && \"Unexpected input to sinkRegion.\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 505, __PRETTY_FUNCTION__)); | ||||||||
506 | assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&((((CurAST != nullptr) ^ (MSSAU != nullptr)) && "Either AliasSetTracker or MemorySSA should be initialized." ) ? static_cast<void> (0) : __assert_fail ("((CurAST != nullptr) ^ (MSSAU != nullptr)) && \"Either AliasSetTracker or MemorySSA should be initialized.\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 507, __PRETTY_FUNCTION__)) | ||||||||
507 | "Either AliasSetTracker or MemorySSA should be initialized.")((((CurAST != nullptr) ^ (MSSAU != nullptr)) && "Either AliasSetTracker or MemorySSA should be initialized." ) ? static_cast<void> (0) : __assert_fail ("((CurAST != nullptr) ^ (MSSAU != nullptr)) && \"Either AliasSetTracker or MemorySSA should be initialized.\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 507, __PRETTY_FUNCTION__)); | ||||||||
508 | |||||||||
509 | // We want to visit children before parents. We will enque all the parents | ||||||||
510 | // before their children in the worklist and process the worklist in reverse | ||||||||
511 | // order. | ||||||||
512 | SmallVector<DomTreeNode *, 16> Worklist = collectChildrenInLoop(N, CurLoop); | ||||||||
513 | |||||||||
514 | bool Changed = false; | ||||||||
515 | for (DomTreeNode *DTN : reverse(Worklist)) { | ||||||||
516 | BasicBlock *BB = DTN->getBlock(); | ||||||||
517 | // Only need to process the contents of this block if it is not part of a | ||||||||
518 | // subloop (which would already have been processed). | ||||||||
519 | if (inSubLoop(BB, CurLoop, LI)) | ||||||||
520 | continue; | ||||||||
521 | |||||||||
522 | for (BasicBlock::iterator II = BB->end(); II != BB->begin();) { | ||||||||
523 | Instruction &I = *--II; | ||||||||
524 | |||||||||
525 | // If the instruction is dead, we would try to sink it because it isn't | ||||||||
526 | // used in the loop, instead, just delete it. | ||||||||
527 | if (isInstructionTriviallyDead(&I, TLI)) { | ||||||||
528 | LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM deleting dead inst: " << I << '\n'; } } while (false); | ||||||||
529 | salvageDebugInfo(I); | ||||||||
530 | ++II; | ||||||||
531 | eraseInstruction(I, *SafetyInfo, CurAST, MSSAU); | ||||||||
532 | Changed = true; | ||||||||
533 | continue; | ||||||||
534 | } | ||||||||
535 | |||||||||
536 | // Check to see if we can sink this instruction to the exit blocks | ||||||||
537 | // of the loop. We can do this if the all users of the instruction are | ||||||||
538 | // outside of the loop. In this case, it doesn't even matter if the | ||||||||
539 | // operands of the instruction are loop invariant. | ||||||||
540 | // | ||||||||
541 | bool FreeInLoop = false; | ||||||||
542 | if (isNotUsedOrFreeInLoop(I, CurLoop, SafetyInfo, TTI, FreeInLoop) && | ||||||||
543 | canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, MSSAU, true, &Flags, | ||||||||
544 | ORE) && | ||||||||
545 | !I.mayHaveSideEffects()) { | ||||||||
546 | if (sink(I, LI, DT, CurLoop, SafetyInfo, MSSAU, ORE)) { | ||||||||
547 | if (!FreeInLoop) { | ||||||||
548 | ++II; | ||||||||
549 | eraseInstruction(I, *SafetyInfo, CurAST, MSSAU); | ||||||||
550 | } | ||||||||
551 | Changed = true; | ||||||||
552 | } | ||||||||
553 | } | ||||||||
554 | } | ||||||||
555 | } | ||||||||
556 | if (MSSAU && VerifyMemorySSA) | ||||||||
557 | MSSAU->getMemorySSA()->verifyMemorySSA(); | ||||||||
558 | return Changed; | ||||||||
559 | } | ||||||||
560 | |||||||||
561 | namespace { | ||||||||
562 | // This is a helper class for hoistRegion to make it able to hoist control flow | ||||||||
563 | // in order to be able to hoist phis. The way this works is that we initially | ||||||||
564 | // start hoisting to the loop preheader, and when we see a loop invariant branch | ||||||||
565 | // we make note of this. When we then come to hoist an instruction that's | ||||||||
566 | // conditional on such a branch we duplicate the branch and the relevant control | ||||||||
567 | // flow, then hoist the instruction into the block corresponding to its original | ||||||||
568 | // block in the duplicated control flow. | ||||||||
569 | class ControlFlowHoister { | ||||||||
570 | private: | ||||||||
571 | // Information about the loop we are hoisting from | ||||||||
572 | LoopInfo *LI; | ||||||||
573 | DominatorTree *DT; | ||||||||
574 | Loop *CurLoop; | ||||||||
575 | MemorySSAUpdater *MSSAU; | ||||||||
576 | |||||||||
577 | // A map of blocks in the loop to the block their instructions will be hoisted | ||||||||
578 | // to. | ||||||||
579 | DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap; | ||||||||
580 | |||||||||
581 | // The branches that we can hoist, mapped to the block that marks a | ||||||||
582 | // convergence point of their control flow. | ||||||||
583 | DenseMap<BranchInst *, BasicBlock *> HoistableBranches; | ||||||||
584 | |||||||||
585 | public: | ||||||||
586 | ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop, | ||||||||
587 | MemorySSAUpdater *MSSAU) | ||||||||
588 | : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {} | ||||||||
589 | |||||||||
590 | void registerPossiblyHoistableBranch(BranchInst *BI) { | ||||||||
591 | // We can only hoist conditional branches with loop invariant operands. | ||||||||
592 | if (!ControlFlowHoisting || !BI->isConditional() || | ||||||||
593 | !CurLoop->hasLoopInvariantOperands(BI)) | ||||||||
594 | return; | ||||||||
595 | |||||||||
596 | // The branch destinations need to be in the loop, and we don't gain | ||||||||
597 | // anything by duplicating conditional branches with duplicate successors, | ||||||||
598 | // as it's essentially the same as an unconditional branch. | ||||||||
599 | BasicBlock *TrueDest = BI->getSuccessor(0); | ||||||||
600 | BasicBlock *FalseDest = BI->getSuccessor(1); | ||||||||
601 | if (!CurLoop->contains(TrueDest) || !CurLoop->contains(FalseDest) || | ||||||||
602 | TrueDest == FalseDest) | ||||||||
603 | return; | ||||||||
604 | |||||||||
605 | // We can hoist BI if one branch destination is the successor of the other, | ||||||||
606 | // or both have common successor which we check by seeing if the | ||||||||
607 | // intersection of their successors is non-empty. | ||||||||
608 | // TODO: This could be expanded to allowing branches where both ends | ||||||||
609 | // eventually converge to a single block. | ||||||||
610 | SmallPtrSet<BasicBlock *, 4> TrueDestSucc, FalseDestSucc; | ||||||||
611 | TrueDestSucc.insert(succ_begin(TrueDest), succ_end(TrueDest)); | ||||||||
612 | FalseDestSucc.insert(succ_begin(FalseDest), succ_end(FalseDest)); | ||||||||
613 | BasicBlock *CommonSucc = nullptr; | ||||||||
614 | if (TrueDestSucc.count(FalseDest)) { | ||||||||
615 | CommonSucc = FalseDest; | ||||||||
616 | } else if (FalseDestSucc.count(TrueDest)) { | ||||||||
617 | CommonSucc = TrueDest; | ||||||||
618 | } else { | ||||||||
619 | set_intersect(TrueDestSucc, FalseDestSucc); | ||||||||
620 | // If there's one common successor use that. | ||||||||
621 | if (TrueDestSucc.size() == 1) | ||||||||
622 | CommonSucc = *TrueDestSucc.begin(); | ||||||||
623 | // If there's more than one pick whichever appears first in the block list | ||||||||
624 | // (we can't use the value returned by TrueDestSucc.begin() as it's | ||||||||
625 | // unpredicatable which element gets returned). | ||||||||
626 | else if (!TrueDestSucc.empty()) { | ||||||||
627 | Function *F = TrueDest->getParent(); | ||||||||
628 | auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(&BB); }; | ||||||||
629 | auto It = std::find_if(F->begin(), F->end(), IsSucc); | ||||||||
630 | assert(It != F->end() && "Could not find successor in function")((It != F->end() && "Could not find successor in function" ) ? static_cast<void> (0) : __assert_fail ("It != F->end() && \"Could not find successor in function\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 630, __PRETTY_FUNCTION__)); | ||||||||
631 | CommonSucc = &*It; | ||||||||
632 | } | ||||||||
633 | } | ||||||||
634 | // The common successor has to be dominated by the branch, as otherwise | ||||||||
635 | // there will be some other path to the successor that will not be | ||||||||
636 | // controlled by this branch so any phi we hoist would be controlled by the | ||||||||
637 | // wrong condition. This also takes care of avoiding hoisting of loop back | ||||||||
638 | // edges. | ||||||||
639 | // TODO: In some cases this could be relaxed if the successor is dominated | ||||||||
640 | // by another block that's been hoisted and we can guarantee that the | ||||||||
641 | // control flow has been replicated exactly. | ||||||||
642 | if (CommonSucc && DT->dominates(BI, CommonSucc)) | ||||||||
643 | HoistableBranches[BI] = CommonSucc; | ||||||||
644 | } | ||||||||
645 | |||||||||
646 | bool canHoistPHI(PHINode *PN) { | ||||||||
647 | // The phi must have loop invariant operands. | ||||||||
648 | if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(PN)) | ||||||||
649 | return false; | ||||||||
650 | // We can hoist phis if the block they are in is the target of hoistable | ||||||||
651 | // branches which cover all of the predecessors of the block. | ||||||||
652 | SmallPtrSet<BasicBlock *, 8> PredecessorBlocks; | ||||||||
653 | BasicBlock *BB = PN->getParent(); | ||||||||
654 | for (BasicBlock *PredBB : predecessors(BB)) | ||||||||
655 | PredecessorBlocks.insert(PredBB); | ||||||||
656 | // If we have less predecessor blocks than predecessors then the phi will | ||||||||
657 | // have more than one incoming value for the same block which we can't | ||||||||
658 | // handle. | ||||||||
659 | // TODO: This could be handled be erasing some of the duplicate incoming | ||||||||
660 | // values. | ||||||||
661 | if (PredecessorBlocks.size() != pred_size(BB)) | ||||||||
662 | return false; | ||||||||
663 | for (auto &Pair : HoistableBranches) { | ||||||||
664 | if (Pair.second == BB) { | ||||||||
665 | // Which blocks are predecessors via this branch depends on if the | ||||||||
666 | // branch is triangle-like or diamond-like. | ||||||||
667 | if (Pair.first->getSuccessor(0) == BB) { | ||||||||
668 | PredecessorBlocks.erase(Pair.first->getParent()); | ||||||||
669 | PredecessorBlocks.erase(Pair.first->getSuccessor(1)); | ||||||||
670 | } else if (Pair.first->getSuccessor(1) == BB) { | ||||||||
671 | PredecessorBlocks.erase(Pair.first->getParent()); | ||||||||
672 | PredecessorBlocks.erase(Pair.first->getSuccessor(0)); | ||||||||
673 | } else { | ||||||||
674 | PredecessorBlocks.erase(Pair.first->getSuccessor(0)); | ||||||||
675 | PredecessorBlocks.erase(Pair.first->getSuccessor(1)); | ||||||||
676 | } | ||||||||
677 | } | ||||||||
678 | } | ||||||||
679 | // PredecessorBlocks will now be empty if for every predecessor of BB we | ||||||||
680 | // found a hoistable branch source. | ||||||||
681 | return PredecessorBlocks.empty(); | ||||||||
682 | } | ||||||||
683 | |||||||||
684 | BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) { | ||||||||
685 | if (!ControlFlowHoisting) | ||||||||
686 | return CurLoop->getLoopPreheader(); | ||||||||
687 | // If BB has already been hoisted, return that | ||||||||
688 | if (HoistDestinationMap.count(BB)) | ||||||||
689 | return HoistDestinationMap[BB]; | ||||||||
690 | |||||||||
691 | // Check if this block is conditional based on a pending branch | ||||||||
692 | auto HasBBAsSuccessor = | ||||||||
693 | [&](DenseMap<BranchInst *, BasicBlock *>::value_type &Pair) { | ||||||||
694 | return BB != Pair.second && (Pair.first->getSuccessor(0) == BB || | ||||||||
695 | Pair.first->getSuccessor(1) == BB); | ||||||||
696 | }; | ||||||||
697 | auto It = std::find_if(HoistableBranches.begin(), HoistableBranches.end(), | ||||||||
698 | HasBBAsSuccessor); | ||||||||
699 | |||||||||
700 | // If not involved in a pending branch, hoist to preheader | ||||||||
701 | BasicBlock *InitialPreheader = CurLoop->getLoopPreheader(); | ||||||||
702 | if (It == HoistableBranches.end()) { | ||||||||
703 | LLVM_DEBUG(dbgs() << "LICM using " << InitialPreheader->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM using " << InitialPreheader ->getName() << " as hoist destination for " << BB->getName() << "\n"; } } while (false) | ||||||||
704 | << " as hoist destination for " << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM using " << InitialPreheader ->getName() << " as hoist destination for " << BB->getName() << "\n"; } } while (false) | ||||||||
705 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM using " << InitialPreheader ->getName() << " as hoist destination for " << BB->getName() << "\n"; } } while (false); | ||||||||
706 | HoistDestinationMap[BB] = InitialPreheader; | ||||||||
707 | return InitialPreheader; | ||||||||
708 | } | ||||||||
709 | BranchInst *BI = It->first; | ||||||||
710 | assert(std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) ==((std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor ) == HoistableBranches.end() && "BB is expected to be the target of at most one branch" ) ? static_cast<void> (0) : __assert_fail ("std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) == HoistableBranches.end() && \"BB is expected to be the target of at most one branch\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 712, __PRETTY_FUNCTION__)) | ||||||||
711 | HoistableBranches.end() &&((std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor ) == HoistableBranches.end() && "BB is expected to be the target of at most one branch" ) ? static_cast<void> (0) : __assert_fail ("std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) == HoistableBranches.end() && \"BB is expected to be the target of at most one branch\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 712, __PRETTY_FUNCTION__)) | ||||||||
712 | "BB is expected to be the target of at most one branch")((std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor ) == HoistableBranches.end() && "BB is expected to be the target of at most one branch" ) ? static_cast<void> (0) : __assert_fail ("std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) == HoistableBranches.end() && \"BB is expected to be the target of at most one branch\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 712, __PRETTY_FUNCTION__)); | ||||||||
713 | |||||||||
714 | LLVMContext &C = BB->getContext(); | ||||||||
715 | BasicBlock *TrueDest = BI->getSuccessor(0); | ||||||||
716 | BasicBlock *FalseDest = BI->getSuccessor(1); | ||||||||
717 | BasicBlock *CommonSucc = HoistableBranches[BI]; | ||||||||
718 | BasicBlock *HoistTarget = getOrCreateHoistedBlock(BI->getParent()); | ||||||||
719 | |||||||||
720 | // Create hoisted versions of blocks that currently don't have them | ||||||||
721 | auto CreateHoistedBlock = [&](BasicBlock *Orig) { | ||||||||
722 | if (HoistDestinationMap.count(Orig)) | ||||||||
723 | return HoistDestinationMap[Orig]; | ||||||||
724 | BasicBlock *New = | ||||||||
725 | BasicBlock::Create(C, Orig->getName() + ".licm", Orig->getParent()); | ||||||||
726 | HoistDestinationMap[Orig] = New; | ||||||||
727 | DT->addNewBlock(New, HoistTarget); | ||||||||
728 | if (CurLoop->getParentLoop()) | ||||||||
729 | CurLoop->getParentLoop()->addBasicBlockToLoop(New, *LI); | ||||||||
730 | ++NumCreatedBlocks; | ||||||||
731 | LLVM_DEBUG(dbgs() << "LICM created " << New->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM created " << New-> getName() << " as hoist destination for " << Orig ->getName() << "\n"; } } while (false) | ||||||||
732 | << " as hoist destination for " << Orig->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM created " << New-> getName() << " as hoist destination for " << Orig ->getName() << "\n"; } } while (false) | ||||||||
733 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM created " << New-> getName() << " as hoist destination for " << Orig ->getName() << "\n"; } } while (false); | ||||||||
734 | return New; | ||||||||
735 | }; | ||||||||
736 | BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest); | ||||||||
737 | BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest); | ||||||||
738 | BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc); | ||||||||
739 | |||||||||
740 | // Link up these blocks with branches. | ||||||||
741 | if (!HoistCommonSucc->getTerminator()) { | ||||||||
742 | // The new common successor we've generated will branch to whatever that | ||||||||
743 | // hoist target branched to. | ||||||||
744 | BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor(); | ||||||||
745 | assert(TargetSucc && "Expected hoist target to have a single successor")((TargetSucc && "Expected hoist target to have a single successor" ) ? static_cast<void> (0) : __assert_fail ("TargetSucc && \"Expected hoist target to have a single successor\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 745, __PRETTY_FUNCTION__)); | ||||||||
746 | HoistCommonSucc->moveBefore(TargetSucc); | ||||||||
747 | BranchInst::Create(TargetSucc, HoistCommonSucc); | ||||||||
748 | } | ||||||||
749 | if (!HoistTrueDest->getTerminator()) { | ||||||||
750 | HoistTrueDest->moveBefore(HoistCommonSucc); | ||||||||
751 | BranchInst::Create(HoistCommonSucc, HoistTrueDest); | ||||||||
752 | } | ||||||||
753 | if (!HoistFalseDest->getTerminator()) { | ||||||||
754 | HoistFalseDest->moveBefore(HoistCommonSucc); | ||||||||
755 | BranchInst::Create(HoistCommonSucc, HoistFalseDest); | ||||||||
756 | } | ||||||||
757 | |||||||||
758 | // If BI is being cloned to what was originally the preheader then | ||||||||
759 | // HoistCommonSucc will now be the new preheader. | ||||||||
760 | if (HoistTarget == InitialPreheader) { | ||||||||
761 | // Phis in the loop header now need to use the new preheader. | ||||||||
762 | InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc); | ||||||||
763 | if (MSSAU) | ||||||||
764 | MSSAU->wireOldPredecessorsToNewImmediatePredecessor( | ||||||||
765 | HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget}); | ||||||||
766 | // The new preheader dominates the loop header. | ||||||||
767 | DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc); | ||||||||
768 | DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader()); | ||||||||
769 | DT->changeImmediateDominator(HeaderNode, PreheaderNode); | ||||||||
770 | // The preheader hoist destination is now the new preheader, with the | ||||||||
771 | // exception of the hoist destination of this branch. | ||||||||
772 | for (auto &Pair : HoistDestinationMap) | ||||||||
773 | if (Pair.second == InitialPreheader && Pair.first != BI->getParent()) | ||||||||
774 | Pair.second = HoistCommonSucc; | ||||||||
775 | } | ||||||||
776 | |||||||||
777 | // Now finally clone BI. | ||||||||
778 | ReplaceInstWithInst( | ||||||||
779 | HoistTarget->getTerminator(), | ||||||||
780 | BranchInst::Create(HoistTrueDest, HoistFalseDest, BI->getCondition())); | ||||||||
781 | ++NumClonedBranches; | ||||||||
782 | |||||||||
783 | assert(CurLoop->getLoopPreheader() &&((CurLoop->getLoopPreheader() && "Hoisting blocks should not have destroyed preheader" ) ? static_cast<void> (0) : __assert_fail ("CurLoop->getLoopPreheader() && \"Hoisting blocks should not have destroyed preheader\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 784, __PRETTY_FUNCTION__)) | ||||||||
784 | "Hoisting blocks should not have destroyed preheader")((CurLoop->getLoopPreheader() && "Hoisting blocks should not have destroyed preheader" ) ? static_cast<void> (0) : __assert_fail ("CurLoop->getLoopPreheader() && \"Hoisting blocks should not have destroyed preheader\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 784, __PRETTY_FUNCTION__)); | ||||||||
785 | return HoistDestinationMap[BB]; | ||||||||
786 | } | ||||||||
787 | }; | ||||||||
788 | } // namespace | ||||||||
789 | |||||||||
790 | /// Walk the specified region of the CFG (defined by all blocks dominated by | ||||||||
791 | /// the specified block, and that are in the current loop) in depth first | ||||||||
792 | /// order w.r.t the DominatorTree. This allows us to visit definitions before | ||||||||
793 | /// uses, allowing us to hoist a loop body in one pass without iteration. | ||||||||
794 | /// | ||||||||
795 | bool llvm::hoistRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI, | ||||||||
796 | DominatorTree *DT, TargetLibraryInfo *TLI, Loop *CurLoop, | ||||||||
797 | AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU, | ||||||||
798 | ICFLoopSafetyInfo *SafetyInfo, | ||||||||
799 | SinkAndHoistLICMFlags &Flags, | ||||||||
800 | OptimizationRemarkEmitter *ORE) { | ||||||||
801 | // Verify inputs. | ||||||||
802 | assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&((N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && "Unexpected input to hoistRegion." ) ? static_cast<void> (0) : __assert_fail ("N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && \"Unexpected input to hoistRegion.\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 804, __PRETTY_FUNCTION__)) | ||||||||
803 | CurLoop != nullptr && SafetyInfo != nullptr &&((N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && "Unexpected input to hoistRegion." ) ? static_cast<void> (0) : __assert_fail ("N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && \"Unexpected input to hoistRegion.\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 804, __PRETTY_FUNCTION__)) | ||||||||
804 | "Unexpected input to hoistRegion.")((N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && "Unexpected input to hoistRegion." ) ? static_cast<void> (0) : __assert_fail ("N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && \"Unexpected input to hoistRegion.\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 804, __PRETTY_FUNCTION__)); | ||||||||
805 | assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&((((CurAST != nullptr) ^ (MSSAU != nullptr)) && "Either AliasSetTracker or MemorySSA should be initialized." ) ? static_cast<void> (0) : __assert_fail ("((CurAST != nullptr) ^ (MSSAU != nullptr)) && \"Either AliasSetTracker or MemorySSA should be initialized.\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 806, __PRETTY_FUNCTION__)) | ||||||||
806 | "Either AliasSetTracker or MemorySSA should be initialized.")((((CurAST != nullptr) ^ (MSSAU != nullptr)) && "Either AliasSetTracker or MemorySSA should be initialized." ) ? static_cast<void> (0) : __assert_fail ("((CurAST != nullptr) ^ (MSSAU != nullptr)) && \"Either AliasSetTracker or MemorySSA should be initialized.\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 806, __PRETTY_FUNCTION__)); | ||||||||
807 | |||||||||
808 | ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU); | ||||||||
809 | |||||||||
810 | // Keep track of instructions that have been hoisted, as they may need to be | ||||||||
811 | // re-hoisted if they end up not dominating all of their uses. | ||||||||
812 | SmallVector<Instruction *, 16> HoistedInstructions; | ||||||||
813 | |||||||||
814 | // For PHI hoisting to work we need to hoist blocks before their successors. | ||||||||
815 | // We can do this by iterating through the blocks in the loop in reverse | ||||||||
816 | // post-order. | ||||||||
817 | LoopBlocksRPO Worklist(CurLoop); | ||||||||
818 | Worklist.perform(LI); | ||||||||
819 | bool Changed = false; | ||||||||
820 | for (BasicBlock *BB : Worklist) { | ||||||||
821 | // Only need to process the contents of this block if it is not part of a | ||||||||
822 | // subloop (which would already have been processed). | ||||||||
823 | if (inSubLoop(BB, CurLoop, LI)) | ||||||||
824 | continue; | ||||||||
825 | |||||||||
826 | for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;) { | ||||||||
827 | Instruction &I = *II++; | ||||||||
828 | // Try constant folding this instruction. If all the operands are | ||||||||
829 | // constants, it is technically hoistable, but it would be better to | ||||||||
830 | // just fold it. | ||||||||
831 | if (Constant *C = ConstantFoldInstruction( | ||||||||
832 | &I, I.getModule()->getDataLayout(), TLI)) { | ||||||||
833 | LLVM_DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *Cdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM folding inst: " << I << " --> " << *C << '\n'; } } while (false) | ||||||||
834 | << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM folding inst: " << I << " --> " << *C << '\n'; } } while (false); | ||||||||
835 | if (CurAST) | ||||||||
836 | CurAST->copyValue(&I, C); | ||||||||
837 | // FIXME MSSA: Such replacements may make accesses unoptimized (D51960). | ||||||||
838 | I.replaceAllUsesWith(C); | ||||||||
839 | if (isInstructionTriviallyDead(&I, TLI)) | ||||||||
840 | eraseInstruction(I, *SafetyInfo, CurAST, MSSAU); | ||||||||
841 | Changed = true; | ||||||||
842 | continue; | ||||||||
843 | } | ||||||||
844 | |||||||||
845 | // Try hoisting the instruction out to the preheader. We can only do | ||||||||
846 | // this if all of the operands of the instruction are loop invariant and | ||||||||
847 | // if it is safe to hoist the instruction. | ||||||||
848 | // TODO: It may be safe to hoist if we are hoisting to a conditional block | ||||||||
849 | // and we have accurately duplicated the control flow from the loop header | ||||||||
850 | // to that block. | ||||||||
851 | if (CurLoop->hasLoopInvariantOperands(&I) && | ||||||||
852 | canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, MSSAU, true, &Flags, | ||||||||
853 | ORE) && | ||||||||
854 | isSafeToExecuteUnconditionally( | ||||||||
855 | I, DT, CurLoop, SafetyInfo, ORE, | ||||||||
856 | CurLoop->getLoopPreheader()->getTerminator())) { | ||||||||
857 | hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo, | ||||||||
858 | MSSAU, ORE); | ||||||||
859 | HoistedInstructions.push_back(&I); | ||||||||
860 | Changed = true; | ||||||||
861 | continue; | ||||||||
862 | } | ||||||||
863 | |||||||||
864 | // Attempt to remove floating point division out of the loop by | ||||||||
865 | // converting it to a reciprocal multiplication. | ||||||||
866 | if (I.getOpcode() == Instruction::FDiv && | ||||||||
867 | CurLoop->isLoopInvariant(I.getOperand(1)) && | ||||||||
868 | I.hasAllowReciprocal()) { | ||||||||
869 | auto Divisor = I.getOperand(1); | ||||||||
870 | auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0); | ||||||||
871 | auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor); | ||||||||
872 | ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags()); | ||||||||
873 | SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent()); | ||||||||
874 | ReciprocalDivisor->insertBefore(&I); | ||||||||
875 | |||||||||
876 | auto Product = | ||||||||
877 | BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor); | ||||||||
878 | Product->setFastMathFlags(I.getFastMathFlags()); | ||||||||
879 | SafetyInfo->insertInstructionTo(Product, I.getParent()); | ||||||||
880 | Product->insertAfter(&I); | ||||||||
881 | I.replaceAllUsesWith(Product); | ||||||||
882 | eraseInstruction(I, *SafetyInfo, CurAST, MSSAU); | ||||||||
883 | |||||||||
884 | hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), | ||||||||
885 | SafetyInfo, MSSAU, ORE); | ||||||||
886 | HoistedInstructions.push_back(ReciprocalDivisor); | ||||||||
887 | Changed = true; | ||||||||
888 | continue; | ||||||||
889 | } | ||||||||
890 | |||||||||
891 | auto IsInvariantStart = [&](Instruction &I) { | ||||||||
892 | using namespace PatternMatch; | ||||||||
893 | return I.use_empty() && | ||||||||
894 | match(&I, m_Intrinsic<Intrinsic::invariant_start>()); | ||||||||
895 | }; | ||||||||
896 | auto MustExecuteWithoutWritesBefore = [&](Instruction &I) { | ||||||||
897 | return SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) && | ||||||||
898 | SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop); | ||||||||
899 | }; | ||||||||
900 | if ((IsInvariantStart(I) || isGuard(&I)) && | ||||||||
901 | CurLoop->hasLoopInvariantOperands(&I) && | ||||||||
902 | MustExecuteWithoutWritesBefore(I)) { | ||||||||
903 | hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo, | ||||||||
904 | MSSAU, ORE); | ||||||||
905 | HoistedInstructions.push_back(&I); | ||||||||
906 | Changed = true; | ||||||||
907 | continue; | ||||||||
908 | } | ||||||||
909 | |||||||||
910 | if (PHINode *PN = dyn_cast<PHINode>(&I)) { | ||||||||
911 | if (CFH.canHoistPHI(PN)) { | ||||||||
912 | // Redirect incoming blocks first to ensure that we create hoisted | ||||||||
913 | // versions of those blocks before we hoist the phi. | ||||||||
914 | for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i) | ||||||||
915 | PN->setIncomingBlock( | ||||||||
916 | i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i))); | ||||||||
917 | hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo, | ||||||||
918 | MSSAU, ORE); | ||||||||
919 | assert(DT->dominates(PN, BB) && "Conditional PHIs not expected")((DT->dominates(PN, BB) && "Conditional PHIs not expected" ) ? static_cast<void> (0) : __assert_fail ("DT->dominates(PN, BB) && \"Conditional PHIs not expected\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 919, __PRETTY_FUNCTION__)); | ||||||||
920 | Changed = true; | ||||||||
921 | continue; | ||||||||
922 | } | ||||||||
923 | } | ||||||||
924 | |||||||||
925 | // Remember possibly hoistable branches so we can actually hoist them | ||||||||
926 | // later if needed. | ||||||||
927 | if (BranchInst *BI = dyn_cast<BranchInst>(&I)) | ||||||||
928 | CFH.registerPossiblyHoistableBranch(BI); | ||||||||
929 | } | ||||||||
930 | } | ||||||||
931 | |||||||||
932 | // If we hoisted instructions to a conditional block they may not dominate | ||||||||
933 | // their uses that weren't hoisted (such as phis where some operands are not | ||||||||
934 | // loop invariant). If so make them unconditional by moving them to their | ||||||||
935 | // immediate dominator. We iterate through the instructions in reverse order | ||||||||
936 | // which ensures that when we rehoist an instruction we rehoist its operands, | ||||||||
937 | // and also keep track of where in the block we are rehoisting to to make sure | ||||||||
938 | // that we rehoist instructions before the instructions that use them. | ||||||||
939 | Instruction *HoistPoint = nullptr; | ||||||||
940 | if (ControlFlowHoisting) { | ||||||||
941 | for (Instruction *I : reverse(HoistedInstructions)) { | ||||||||
942 | if (!llvm::all_of(I->uses(), | ||||||||
943 | [&](Use &U) { return DT->dominates(I, U); })) { | ||||||||
944 | BasicBlock *Dominator = | ||||||||
945 | DT->getNode(I->getParent())->getIDom()->getBlock(); | ||||||||
946 | if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) { | ||||||||
947 | if (HoistPoint) | ||||||||
948 | assert(DT->dominates(Dominator, HoistPoint->getParent()) &&((DT->dominates(Dominator, HoistPoint->getParent()) && "New hoist point expected to dominate old hoist point") ? static_cast <void> (0) : __assert_fail ("DT->dominates(Dominator, HoistPoint->getParent()) && \"New hoist point expected to dominate old hoist point\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 949, __PRETTY_FUNCTION__)) | ||||||||
949 | "New hoist point expected to dominate old hoist point")((DT->dominates(Dominator, HoistPoint->getParent()) && "New hoist point expected to dominate old hoist point") ? static_cast <void> (0) : __assert_fail ("DT->dominates(Dominator, HoistPoint->getParent()) && \"New hoist point expected to dominate old hoist point\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 949, __PRETTY_FUNCTION__)); | ||||||||
950 | HoistPoint = Dominator->getTerminator(); | ||||||||
951 | } | ||||||||
952 | LLVM_DEBUG(dbgs() << "LICM rehoisting to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM rehoisting to " << HoistPoint ->getParent()->getName() << ": " << *I << "\n"; } } while (false) | ||||||||
953 | << HoistPoint->getParent()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM rehoisting to " << HoistPoint ->getParent()->getName() << ": " << *I << "\n"; } } while (false) | ||||||||
954 | << ": " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM rehoisting to " << HoistPoint ->getParent()->getName() << ": " << *I << "\n"; } } while (false); | ||||||||
955 | moveInstructionBefore(*I, *HoistPoint, *SafetyInfo, MSSAU); | ||||||||
956 | HoistPoint = I; | ||||||||
957 | Changed = true; | ||||||||
958 | } | ||||||||
959 | } | ||||||||
960 | } | ||||||||
961 | if (MSSAU && VerifyMemorySSA) | ||||||||
962 | MSSAU->getMemorySSA()->verifyMemorySSA(); | ||||||||
963 | |||||||||
964 | // Now that we've finished hoisting make sure that LI and DT are still | ||||||||
965 | // valid. | ||||||||
966 | #ifdef EXPENSIVE_CHECKS | ||||||||
967 | if (Changed) { | ||||||||
968 | assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&((DT->verify(DominatorTree::VerificationLevel::Fast) && "Dominator tree verification failed") ? static_cast<void> (0) : __assert_fail ("DT->verify(DominatorTree::VerificationLevel::Fast) && \"Dominator tree verification failed\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 969, __PRETTY_FUNCTION__)) | ||||||||
969 | "Dominator tree verification failed")((DT->verify(DominatorTree::VerificationLevel::Fast) && "Dominator tree verification failed") ? static_cast<void> (0) : __assert_fail ("DT->verify(DominatorTree::VerificationLevel::Fast) && \"Dominator tree verification failed\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 969, __PRETTY_FUNCTION__)); | ||||||||
970 | LI->verify(*DT); | ||||||||
971 | } | ||||||||
972 | #endif | ||||||||
973 | |||||||||
974 | return Changed; | ||||||||
975 | } | ||||||||
976 | |||||||||
977 | // Return true if LI is invariant within scope of the loop. LI is invariant if | ||||||||
978 | // CurLoop is dominated by an invariant.start representing the same memory | ||||||||
979 | // location and size as the memory location LI loads from, and also the | ||||||||
980 | // invariant.start has no uses. | ||||||||
981 | static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT, | ||||||||
982 | Loop *CurLoop) { | ||||||||
983 | Value *Addr = LI->getOperand(0); | ||||||||
984 | const DataLayout &DL = LI->getModule()->getDataLayout(); | ||||||||
985 | const uint32_t LocSizeInBits = DL.getTypeSizeInBits(LI->getType()); | ||||||||
986 | |||||||||
987 | // if the type is i8 addrspace(x)*, we know this is the type of | ||||||||
988 | // llvm.invariant.start operand | ||||||||
989 | auto *PtrInt8Ty = PointerType::get(Type::getInt8Ty(LI->getContext()), | ||||||||
990 | LI->getPointerAddressSpace()); | ||||||||
991 | unsigned BitcastsVisited = 0; | ||||||||
992 | // Look through bitcasts until we reach the i8* type (this is invariant.start | ||||||||
993 | // operand type). | ||||||||
994 | while (Addr->getType() != PtrInt8Ty) { | ||||||||
995 | auto *BC = dyn_cast<BitCastInst>(Addr); | ||||||||
996 | // Avoid traversing high number of bitcast uses. | ||||||||
997 | if (++BitcastsVisited > MaxNumUsesTraversed || !BC) | ||||||||
998 | return false; | ||||||||
999 | Addr = BC->getOperand(0); | ||||||||
1000 | } | ||||||||
1001 | |||||||||
1002 | unsigned UsesVisited = 0; | ||||||||
1003 | // Traverse all uses of the load operand value, to see if invariant.start is | ||||||||
1004 | // one of the uses, and whether it dominates the load instruction. | ||||||||
1005 | for (auto *U : Addr->users()) { | ||||||||
1006 | // Avoid traversing for Load operand with high number of users. | ||||||||
1007 | if (++UsesVisited > MaxNumUsesTraversed) | ||||||||
1008 | return false; | ||||||||
1009 | IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); | ||||||||
1010 | // If there are escaping uses of invariant.start instruction, the load maybe | ||||||||
1011 | // non-invariant. | ||||||||
1012 | if (!II || II->getIntrinsicID() != Intrinsic::invariant_start || | ||||||||
1013 | !II->use_empty()) | ||||||||
1014 | continue; | ||||||||
1015 | unsigned InvariantSizeInBits = | ||||||||
1016 | cast<ConstantInt>(II->getArgOperand(0))->getSExtValue() * 8; | ||||||||
1017 | // Confirm the invariant.start location size contains the load operand size | ||||||||
1018 | // in bits. Also, the invariant.start should dominate the load, and we | ||||||||
1019 | // should not hoist the load out of a loop that contains this dominating | ||||||||
1020 | // invariant.start. | ||||||||
1021 | if (LocSizeInBits <= InvariantSizeInBits && | ||||||||
1022 | DT->properlyDominates(II->getParent(), CurLoop->getHeader())) | ||||||||
1023 | return true; | ||||||||
1024 | } | ||||||||
1025 | |||||||||
1026 | return false; | ||||||||
1027 | } | ||||||||
1028 | |||||||||
1029 | namespace { | ||||||||
1030 | /// Return true if-and-only-if we know how to (mechanically) both hoist and | ||||||||
1031 | /// sink a given instruction out of a loop. Does not address legality | ||||||||
1032 | /// concerns such as aliasing or speculation safety. | ||||||||
1033 | bool isHoistableAndSinkableInst(Instruction &I) { | ||||||||
1034 | // Only these instructions are hoistable/sinkable. | ||||||||
1035 | return (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) || | ||||||||
1036 | isa<FenceInst>(I) || isa<CastInst>(I) || | ||||||||
1037 | isa<UnaryOperator>(I) || isa<BinaryOperator>(I) || | ||||||||
1038 | isa<SelectInst>(I) || isa<GetElementPtrInst>(I) || isa<CmpInst>(I) || | ||||||||
1039 | isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || | ||||||||
1040 | isa<ShuffleVectorInst>(I) || isa<ExtractValueInst>(I) || | ||||||||
1041 | isa<InsertValueInst>(I)); | ||||||||
1042 | } | ||||||||
1043 | /// Return true if all of the alias sets within this AST are known not to | ||||||||
1044 | /// contain a Mod, or if MSSA knows thare are no MemoryDefs in the loop. | ||||||||
1045 | bool isReadOnly(AliasSetTracker *CurAST, const MemorySSAUpdater *MSSAU, | ||||||||
1046 | const Loop *L) { | ||||||||
1047 | if (CurAST) { | ||||||||
1048 | for (AliasSet &AS : *CurAST) { | ||||||||
1049 | if (!AS.isForwardingAliasSet() && AS.isMod()) { | ||||||||
1050 | return false; | ||||||||
1051 | } | ||||||||
1052 | } | ||||||||
1053 | return true; | ||||||||
1054 | } else { /*MSSAU*/ | ||||||||
1055 | for (auto *BB : L->getBlocks()) | ||||||||
1056 | if (MSSAU->getMemorySSA()->getBlockDefs(BB)) | ||||||||
1057 | return false; | ||||||||
1058 | return true; | ||||||||
1059 | } | ||||||||
1060 | } | ||||||||
1061 | |||||||||
1062 | /// Return true if I is the only Instruction with a MemoryAccess in L. | ||||||||
1063 | bool isOnlyMemoryAccess(const Instruction *I, const Loop *L, | ||||||||
1064 | const MemorySSAUpdater *MSSAU) { | ||||||||
1065 | for (auto *BB : L->getBlocks()) | ||||||||
1066 | if (auto *Accs = MSSAU->getMemorySSA()->getBlockAccesses(BB)) { | ||||||||
1067 | int NotAPhi = 0; | ||||||||
1068 | for (const auto &Acc : *Accs) { | ||||||||
1069 | if (isa<MemoryPhi>(&Acc)) | ||||||||
1070 | continue; | ||||||||
1071 | const auto *MUD = cast<MemoryUseOrDef>(&Acc); | ||||||||
1072 | if (MUD->getMemoryInst() != I || NotAPhi++ == 1) | ||||||||
1073 | return false; | ||||||||
1074 | } | ||||||||
1075 | } | ||||||||
1076 | return true; | ||||||||
1077 | } | ||||||||
1078 | } | ||||||||
1079 | |||||||||
1080 | bool llvm::canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT, | ||||||||
1081 | Loop *CurLoop, AliasSetTracker *CurAST, | ||||||||
1082 | MemorySSAUpdater *MSSAU, | ||||||||
1083 | bool TargetExecutesOncePerLoop, | ||||||||
1084 | SinkAndHoistLICMFlags *Flags, | ||||||||
1085 | OptimizationRemarkEmitter *ORE) { | ||||||||
1086 | // If we don't understand the instruction, bail early. | ||||||||
1087 | if (!isHoistableAndSinkableInst(I)) | ||||||||
| |||||||||
1088 | return false; | ||||||||
1089 | |||||||||
1090 | MemorySSA *MSSA = MSSAU ? MSSAU->getMemorySSA() : nullptr; | ||||||||
1091 | if (MSSA
| ||||||||
1092 | assert(Flags != nullptr && "Flags cannot be null.")((Flags != nullptr && "Flags cannot be null.") ? static_cast <void> (0) : __assert_fail ("Flags != nullptr && \"Flags cannot be null.\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 1092, __PRETTY_FUNCTION__)); | ||||||||
1093 | |||||||||
1094 | // Loads have extra constraints we have to verify before we can hoist them. | ||||||||
1095 | if (LoadInst *LI
| ||||||||
1096 | if (!LI->isUnordered()) | ||||||||
1097 | return false; // Don't sink/hoist volatile or ordered atomic loads! | ||||||||
1098 | |||||||||
1099 | // Loads from constant memory are always safe to move, even if they end up | ||||||||
1100 | // in the same alias set as something that ends up being modified. | ||||||||
1101 | if (AA->pointsToConstantMemory(LI->getOperand(0))) | ||||||||
1102 | return true; | ||||||||
1103 | if (LI->hasMetadata(LLVMContext::MD_invariant_load)) | ||||||||
1104 | return true; | ||||||||
1105 | |||||||||
1106 | if (LI->isAtomic() && !TargetExecutesOncePerLoop) | ||||||||
1107 | return false; // Don't risk duplicating unordered loads | ||||||||
1108 | |||||||||
1109 | // This checks for an invariant.start dominating the load. | ||||||||
1110 | if (isLoadInvariantInLoop(LI, DT, CurLoop)) | ||||||||
1111 | return true; | ||||||||
1112 | |||||||||
1113 | bool Invalidated; | ||||||||
1114 | if (CurAST) | ||||||||
1115 | Invalidated = pointerInvalidatedByLoop(MemoryLocation::get(LI), CurAST, | ||||||||
1116 | CurLoop, AA); | ||||||||
1117 | else | ||||||||
1118 | Invalidated = pointerInvalidatedByLoopWithMSSA( | ||||||||
1119 | MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(LI)), CurLoop, *Flags); | ||||||||
1120 | // Check loop-invariant address because this may also be a sinkable load | ||||||||
1121 | // whose address is not necessarily loop-invariant. | ||||||||
1122 | if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand())) | ||||||||
1123 | ORE->emit([&]() { | ||||||||
1124 | return OptimizationRemarkMissed( | ||||||||
1125 | DEBUG_TYPE"licm", "LoadWithLoopInvariantAddressInvalidated", LI) | ||||||||
1126 | << "failed to move load with loop-invariant address " | ||||||||
1127 | "because the loop may invalidate its value"; | ||||||||
1128 | }); | ||||||||
1129 | |||||||||
1130 | return !Invalidated; | ||||||||
1131 | } else if (CallInst *CI
| ||||||||
1132 | // Don't sink or hoist dbg info; it's legal, but not useful. | ||||||||
1133 | if (isa<DbgInfoIntrinsic>(I)) | ||||||||
1134 | return false; | ||||||||
1135 | |||||||||
1136 | // Don't sink calls which can throw. | ||||||||
1137 | if (CI->mayThrow()) | ||||||||
1138 | return false; | ||||||||
1139 | |||||||||
1140 | using namespace PatternMatch; | ||||||||
1141 | if (match(CI, m_Intrinsic<Intrinsic::assume>())) | ||||||||
1142 | // Assumes don't actually alias anything or throw | ||||||||
1143 | return true; | ||||||||
1144 | |||||||||
1145 | // Handle simple cases by querying alias analysis. | ||||||||
1146 | FunctionModRefBehavior Behavior = AA->getModRefBehavior(CI); | ||||||||
1147 | if (Behavior == FMRB_DoesNotAccessMemory) | ||||||||
1148 | return true; | ||||||||
1149 | if (AliasAnalysis::onlyReadsMemory(Behavior)) { | ||||||||
1150 | // A readonly argmemonly function only reads from memory pointed to by | ||||||||
1151 | // it's arguments with arbitrary offsets. If we can prove there are no | ||||||||
1152 | // writes to this memory in the loop, we can hoist or sink. | ||||||||
1153 | if (AliasAnalysis::onlyAccessesArgPointees(Behavior)) { | ||||||||
1154 | // TODO: expand to writeable arguments | ||||||||
1155 | for (Value *Op : CI->arg_operands()) | ||||||||
1156 | if (Op->getType()->isPointerTy()) { | ||||||||
1157 | bool Invalidated; | ||||||||
1158 | if (CurAST) | ||||||||
1159 | Invalidated = pointerInvalidatedByLoop( | ||||||||
1160 | MemoryLocation(Op, LocationSize::unknown(), AAMDNodes()), | ||||||||
1161 | CurAST, CurLoop, AA); | ||||||||
1162 | else | ||||||||
1163 | Invalidated = pointerInvalidatedByLoopWithMSSA( | ||||||||
1164 | MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(CI)), CurLoop, | ||||||||
| |||||||||
1165 | *Flags); | ||||||||
1166 | if (Invalidated) | ||||||||
1167 | return false; | ||||||||
1168 | } | ||||||||
1169 | return true; | ||||||||
1170 | } | ||||||||
1171 | |||||||||
1172 | // If this call only reads from memory and there are no writes to memory | ||||||||
1173 | // in the loop, we can hoist or sink the call as appropriate. | ||||||||
1174 | if (isReadOnly(CurAST, MSSAU, CurLoop)) | ||||||||
1175 | return true; | ||||||||
1176 | } | ||||||||
1177 | |||||||||
1178 | // FIXME: This should use mod/ref information to see if we can hoist or | ||||||||
1179 | // sink the call. | ||||||||
1180 | |||||||||
1181 | return false; | ||||||||
1182 | } else if (auto *FI = dyn_cast<FenceInst>(&I)) { | ||||||||
1183 | // Fences alias (most) everything to provide ordering. For the moment, | ||||||||
1184 | // just give up if there are any other memory operations in the loop. | ||||||||
1185 | if (CurAST) { | ||||||||
1186 | auto Begin = CurAST->begin(); | ||||||||
1187 | assert(Begin != CurAST->end() && "must contain FI")((Begin != CurAST->end() && "must contain FI") ? static_cast <void> (0) : __assert_fail ("Begin != CurAST->end() && \"must contain FI\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 1187, __PRETTY_FUNCTION__)); | ||||||||
1188 | if (std::next(Begin) != CurAST->end()) | ||||||||
1189 | // constant memory for instance, TODO: handle better | ||||||||
1190 | return false; | ||||||||
1191 | auto *UniqueI = Begin->getUniqueInstruction(); | ||||||||
1192 | if (!UniqueI) | ||||||||
1193 | // other memory op, give up | ||||||||
1194 | return false; | ||||||||
1195 | (void)FI; // suppress unused variable warning | ||||||||
1196 | assert(UniqueI == FI && "AS must contain FI")((UniqueI == FI && "AS must contain FI") ? static_cast <void> (0) : __assert_fail ("UniqueI == FI && \"AS must contain FI\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 1196, __PRETTY_FUNCTION__)); | ||||||||
1197 | return true; | ||||||||
1198 | } else // MSSAU | ||||||||
1199 | return isOnlyMemoryAccess(FI, CurLoop, MSSAU); | ||||||||
1200 | } else if (auto *SI = dyn_cast<StoreInst>(&I)) { | ||||||||
1201 | if (!SI->isUnordered()) | ||||||||
1202 | return false; // Don't sink/hoist volatile or ordered atomic store! | ||||||||
1203 | |||||||||
1204 | // We can only hoist a store that we can prove writes a value which is not | ||||||||
1205 | // read or overwritten within the loop. For those cases, we fallback to | ||||||||
1206 | // load store promotion instead. TODO: We can extend this to cases where | ||||||||
1207 | // there is exactly one write to the location and that write dominates an | ||||||||
1208 | // arbitrary number of reads in the loop. | ||||||||
1209 | if (CurAST) { | ||||||||
1210 | auto &AS = CurAST->getAliasSetFor(MemoryLocation::get(SI)); | ||||||||
1211 | |||||||||
1212 | if (AS.isRef() || !AS.isMustAlias()) | ||||||||
1213 | // Quick exit test, handled by the full path below as well. | ||||||||
1214 | return false; | ||||||||
1215 | auto *UniqueI = AS.getUniqueInstruction(); | ||||||||
1216 | if (!UniqueI) | ||||||||
1217 | // other memory op, give up | ||||||||
1218 | return false; | ||||||||
1219 | assert(UniqueI == SI && "AS must contain SI")((UniqueI == SI && "AS must contain SI") ? static_cast <void> (0) : __assert_fail ("UniqueI == SI && \"AS must contain SI\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 1219, __PRETTY_FUNCTION__)); | ||||||||
1220 | return true; | ||||||||
1221 | } else { // MSSAU | ||||||||
1222 | if (isOnlyMemoryAccess(SI, CurLoop, MSSAU)) | ||||||||
1223 | return true; | ||||||||
1224 | // If there are more accesses than the Promotion cap, give up, we're not | ||||||||
1225 | // walking a list that long. | ||||||||
1226 | if (Flags->NoOfMemAccTooLarge) | ||||||||
1227 | return false; | ||||||||
1228 | // Check store only if there's still "quota" to check clobber. | ||||||||
1229 | if (Flags->LicmMssaOptCounter >= Flags->LicmMssaOptCap) | ||||||||
1230 | return false; | ||||||||
1231 | // If there are interfering Uses (i.e. their defining access is in the | ||||||||
1232 | // loop), or ordered loads (stored as Defs!), don't move this store. | ||||||||
1233 | // Could do better here, but this is conservatively correct. | ||||||||
1234 | // TODO: Cache set of Uses on the first walk in runOnLoop, update when | ||||||||
1235 | // moving accesses. Can also extend to dominating uses. | ||||||||
1236 | auto *SIMD = MSSA->getMemoryAccess(SI); | ||||||||
1237 | for (auto *BB : CurLoop->getBlocks()) | ||||||||
1238 | if (auto *Accesses = MSSA->getBlockAccesses(BB)) { | ||||||||
1239 | for (const auto &MA : *Accesses) | ||||||||
1240 | if (const auto *MU = dyn_cast<MemoryUse>(&MA)) { | ||||||||
1241 | auto *MD = MU->getDefiningAccess(); | ||||||||
1242 | if (!MSSA->isLiveOnEntryDef(MD) && | ||||||||
1243 | CurLoop->contains(MD->getBlock())) | ||||||||
1244 | return false; | ||||||||
1245 | // Disable hoisting past potentially interfering loads. Optimized | ||||||||
1246 | // Uses may point to an access outside the loop, as getClobbering | ||||||||
1247 | // checks the previous iteration when walking the backedge. | ||||||||
1248 | // FIXME: More precise: no Uses that alias SI. | ||||||||
1249 | if (!Flags->IsSink && !MSSA->dominates(SIMD, MU)) | ||||||||
1250 | return false; | ||||||||
1251 | } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) { | ||||||||
1252 | if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) { | ||||||||
1253 | (void)LI; // Silence warning. | ||||||||
1254 | assert(!LI->isUnordered() && "Expected unordered load")((!LI->isUnordered() && "Expected unordered load") ? static_cast<void> (0) : __assert_fail ("!LI->isUnordered() && \"Expected unordered load\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 1254, __PRETTY_FUNCTION__)); | ||||||||
1255 | return false; | ||||||||
1256 | } | ||||||||
1257 | // Any call, while it may not be clobbering SI, it may be a use. | ||||||||
1258 | if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) { | ||||||||
1259 | // Check if the call may read from the memory locattion written | ||||||||
1260 | // to by SI. Check CI's attributes and arguments; the number of | ||||||||
1261 | // such checks performed is limited above by NoOfMemAccTooLarge. | ||||||||
1262 | ModRefInfo MRI = AA->getModRefInfo(CI, MemoryLocation::get(SI)); | ||||||||
1263 | if (isModOrRefSet(MRI)) | ||||||||
1264 | return false; | ||||||||
1265 | } | ||||||||
1266 | } | ||||||||
1267 | } | ||||||||
1268 | |||||||||
1269 | auto *Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(SI); | ||||||||
1270 | Flags->LicmMssaOptCounter++; | ||||||||
1271 | // If there are no clobbering Defs in the loop, store is safe to hoist. | ||||||||
1272 | return MSSA->isLiveOnEntryDef(Source) || | ||||||||
1273 | !CurLoop->contains(Source->getBlock()); | ||||||||
1274 | } | ||||||||
1275 | } | ||||||||
1276 | |||||||||
1277 | assert(!I.mayReadOrWriteMemory() && "unhandled aliasing")((!I.mayReadOrWriteMemory() && "unhandled aliasing") ? static_cast<void> (0) : __assert_fail ("!I.mayReadOrWriteMemory() && \"unhandled aliasing\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 1277, __PRETTY_FUNCTION__)); | ||||||||
1278 | |||||||||
1279 | // We've established mechanical ability and aliasing, it's up to the caller | ||||||||
1280 | // to check fault safety | ||||||||
1281 | return true; | ||||||||
1282 | } | ||||||||
1283 | |||||||||
1284 | /// Returns true if a PHINode is a trivially replaceable with an | ||||||||
1285 | /// Instruction. | ||||||||
1286 | /// This is true when all incoming values are that instruction. | ||||||||
1287 | /// This pattern occurs most often with LCSSA PHI nodes. | ||||||||
1288 | /// | ||||||||
1289 | static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) { | ||||||||
1290 | for (const Value *IncValue : PN.incoming_values()) | ||||||||
1291 | if (IncValue != &I) | ||||||||
1292 | return false; | ||||||||
1293 | |||||||||
1294 | return true; | ||||||||
1295 | } | ||||||||
1296 | |||||||||
1297 | /// Return true if the instruction is free in the loop. | ||||||||
1298 | static bool isFreeInLoop(const Instruction &I, const Loop *CurLoop, | ||||||||
1299 | const TargetTransformInfo *TTI) { | ||||||||
1300 | |||||||||
1301 | if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) { | ||||||||
1302 | if (TTI->getUserCost(GEP) != TargetTransformInfo::TCC_Free) | ||||||||
1303 | return false; | ||||||||
1304 | // For a GEP, we cannot simply use getUserCost because currently it | ||||||||
1305 | // optimistically assume that a GEP will fold into addressing mode | ||||||||
1306 | // regardless of its users. | ||||||||
1307 | const BasicBlock *BB = GEP->getParent(); | ||||||||
1308 | for (const User *U : GEP->users()) { | ||||||||
1309 | const Instruction *UI = cast<Instruction>(U); | ||||||||
1310 | if (CurLoop->contains(UI) && | ||||||||
1311 | (BB != UI->getParent() || | ||||||||
1312 | (!isa<StoreInst>(UI) && !isa<LoadInst>(UI)))) | ||||||||
1313 | return false; | ||||||||
1314 | } | ||||||||
1315 | return true; | ||||||||
1316 | } else | ||||||||
1317 | return TTI->getUserCost(&I) == TargetTransformInfo::TCC_Free; | ||||||||
1318 | } | ||||||||
1319 | |||||||||
1320 | /// Return true if the only users of this instruction are outside of | ||||||||
1321 | /// the loop. If this is true, we can sink the instruction to the exit | ||||||||
1322 | /// blocks of the loop. | ||||||||
1323 | /// | ||||||||
1324 | /// We also return true if the instruction could be folded away in lowering. | ||||||||
1325 | /// (e.g., a GEP can be folded into a load as an addressing mode in the loop). | ||||||||
1326 | static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop, | ||||||||
1327 | const LoopSafetyInfo *SafetyInfo, | ||||||||
1328 | TargetTransformInfo *TTI, bool &FreeInLoop) { | ||||||||
1329 | const auto &BlockColors = SafetyInfo->getBlockColors(); | ||||||||
1330 | bool IsFree = isFreeInLoop(I, CurLoop, TTI); | ||||||||
1331 | for (const User *U : I.users()) { | ||||||||
1332 | const Instruction *UI = cast<Instruction>(U); | ||||||||
1333 | if (const PHINode *PN = dyn_cast<PHINode>(UI)) { | ||||||||
1334 | const BasicBlock *BB = PN->getParent(); | ||||||||
1335 | // We cannot sink uses in catchswitches. | ||||||||
1336 | if (isa<CatchSwitchInst>(BB->getTerminator())) | ||||||||
1337 | return false; | ||||||||
1338 | |||||||||
1339 | // We need to sink a callsite to a unique funclet. Avoid sinking if the | ||||||||
1340 | // phi use is too muddled. | ||||||||
1341 | if (isa<CallInst>(I)) | ||||||||
1342 | if (!BlockColors.empty() && | ||||||||
1343 | BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1) | ||||||||
1344 | return false; | ||||||||
1345 | } | ||||||||
1346 | |||||||||
1347 | if (CurLoop->contains(UI)) { | ||||||||
1348 | if (IsFree) { | ||||||||
1349 | FreeInLoop = true; | ||||||||
1350 | continue; | ||||||||
1351 | } | ||||||||
1352 | return false; | ||||||||
1353 | } | ||||||||
1354 | } | ||||||||
1355 | return true; | ||||||||
1356 | } | ||||||||
1357 | |||||||||
1358 | static Instruction *CloneInstructionInExitBlock( | ||||||||
1359 | Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI, | ||||||||
1360 | const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU) { | ||||||||
1361 | Instruction *New; | ||||||||
1362 | if (auto *CI = dyn_cast<CallInst>(&I)) { | ||||||||
1363 | const auto &BlockColors = SafetyInfo->getBlockColors(); | ||||||||
1364 | |||||||||
1365 | // Sinking call-sites need to be handled differently from other | ||||||||
1366 | // instructions. The cloned call-site needs a funclet bundle operand | ||||||||
1367 | // appropriate for its location in the CFG. | ||||||||
1368 | SmallVector<OperandBundleDef, 1> OpBundles; | ||||||||
1369 | for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles(); | ||||||||
1370 | BundleIdx != BundleEnd; ++BundleIdx) { | ||||||||
1371 | OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx); | ||||||||
1372 | if (Bundle.getTagID() == LLVMContext::OB_funclet) | ||||||||
1373 | continue; | ||||||||
1374 | |||||||||
1375 | OpBundles.emplace_back(Bundle); | ||||||||
1376 | } | ||||||||
1377 | |||||||||
1378 | if (!BlockColors.empty()) { | ||||||||
1379 | const ColorVector &CV = BlockColors.find(&ExitBlock)->second; | ||||||||
1380 | assert(CV.size() == 1 && "non-unique color for exit block!")((CV.size() == 1 && "non-unique color for exit block!" ) ? static_cast<void> (0) : __assert_fail ("CV.size() == 1 && \"non-unique color for exit block!\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 1380, __PRETTY_FUNCTION__)); | ||||||||
1381 | BasicBlock *BBColor = CV.front(); | ||||||||
1382 | Instruction *EHPad = BBColor->getFirstNonPHI(); | ||||||||
1383 | if (EHPad->isEHPad()) | ||||||||
1384 | OpBundles.emplace_back("funclet", EHPad); | ||||||||
1385 | } | ||||||||
1386 | |||||||||
1387 | New = CallInst::Create(CI, OpBundles); | ||||||||
1388 | } else { | ||||||||
1389 | New = I.clone(); | ||||||||
1390 | } | ||||||||
1391 | |||||||||
1392 | ExitBlock.getInstList().insert(ExitBlock.getFirstInsertionPt(), New); | ||||||||
1393 | if (!I.getName().empty()) | ||||||||
1394 | New->setName(I.getName() + ".le"); | ||||||||
1395 | |||||||||
1396 | if (MSSAU && MSSAU->getMemorySSA()->getMemoryAccess(&I)) { | ||||||||
1397 | // Create a new MemoryAccess and let MemorySSA set its defining access. | ||||||||
1398 | MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB( | ||||||||
1399 | New, nullptr, New->getParent(), MemorySSA::Beginning); | ||||||||
1400 | if (NewMemAcc) { | ||||||||
1401 | if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc)) | ||||||||
1402 | MSSAU->insertDef(MemDef, /*RenameUses=*/true); | ||||||||
1403 | else { | ||||||||
1404 | auto *MemUse = cast<MemoryUse>(NewMemAcc); | ||||||||
1405 | MSSAU->insertUse(MemUse, /*RenameUses=*/true); | ||||||||
1406 | } | ||||||||
1407 | } | ||||||||
1408 | } | ||||||||
1409 | |||||||||
1410 | // Build LCSSA PHI nodes for any in-loop operands. Note that this is | ||||||||
1411 | // particularly cheap because we can rip off the PHI node that we're | ||||||||
1412 | // replacing for the number and blocks of the predecessors. | ||||||||
1413 | // OPT: If this shows up in a profile, we can instead finish sinking all | ||||||||
1414 | // invariant instructions, and then walk their operands to re-establish | ||||||||
1415 | // LCSSA. That will eliminate creating PHI nodes just to nuke them when | ||||||||
1416 | // sinking bottom-up. | ||||||||
1417 | for (User::op_iterator OI = New->op_begin(), OE = New->op_end(); OI != OE; | ||||||||
1418 | ++OI) | ||||||||
1419 | if (Instruction *OInst = dyn_cast<Instruction>(*OI)) | ||||||||
1420 | if (Loop *OLoop = LI->getLoopFor(OInst->getParent())) | ||||||||
1421 | if (!OLoop->contains(&PN)) { | ||||||||
1422 | PHINode *OpPN = | ||||||||
1423 | PHINode::Create(OInst->getType(), PN.getNumIncomingValues(), | ||||||||
1424 | OInst->getName() + ".lcssa", &ExitBlock.front()); | ||||||||
1425 | for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) | ||||||||
1426 | OpPN->addIncoming(OInst, PN.getIncomingBlock(i)); | ||||||||
1427 | *OI = OpPN; | ||||||||
1428 | } | ||||||||
1429 | return New; | ||||||||
1430 | } | ||||||||
1431 | |||||||||
1432 | static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo, | ||||||||
1433 | AliasSetTracker *AST, MemorySSAUpdater *MSSAU) { | ||||||||
1434 | if (AST) | ||||||||
1435 | AST->deleteValue(&I); | ||||||||
1436 | if (MSSAU) | ||||||||
1437 | MSSAU->removeMemoryAccess(&I); | ||||||||
1438 | SafetyInfo.removeInstruction(&I); | ||||||||
1439 | I.eraseFromParent(); | ||||||||
1440 | } | ||||||||
1441 | |||||||||
1442 | static void moveInstructionBefore(Instruction &I, Instruction &Dest, | ||||||||
1443 | ICFLoopSafetyInfo &SafetyInfo, | ||||||||
1444 | MemorySSAUpdater *MSSAU) { | ||||||||
1445 | SafetyInfo.removeInstruction(&I); | ||||||||
1446 | SafetyInfo.insertInstructionTo(&I, Dest.getParent()); | ||||||||
1447 | I.moveBefore(&Dest); | ||||||||
1448 | if (MSSAU) | ||||||||
1449 | if (MemoryUseOrDef *OldMemAcc = cast_or_null<MemoryUseOrDef>( | ||||||||
1450 | MSSAU->getMemorySSA()->getMemoryAccess(&I))) | ||||||||
1451 | MSSAU->moveToPlace(OldMemAcc, Dest.getParent(), MemorySSA::End); | ||||||||
1452 | } | ||||||||
1453 | |||||||||
1454 | static Instruction *sinkThroughTriviallyReplaceablePHI( | ||||||||
1455 | PHINode *TPN, Instruction *I, LoopInfo *LI, | ||||||||
1456 | SmallDenseMap<BasicBlock *, Instruction *, 32> &SunkCopies, | ||||||||
1457 | const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop, | ||||||||
1458 | MemorySSAUpdater *MSSAU) { | ||||||||
1459 | assert(isTriviallyReplaceablePHI(*TPN, *I) &&((isTriviallyReplaceablePHI(*TPN, *I) && "Expect only trivially replaceable PHI" ) ? static_cast<void> (0) : __assert_fail ("isTriviallyReplaceablePHI(*TPN, *I) && \"Expect only trivially replaceable PHI\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 1460, __PRETTY_FUNCTION__)) | ||||||||
1460 | "Expect only trivially replaceable PHI")((isTriviallyReplaceablePHI(*TPN, *I) && "Expect only trivially replaceable PHI" ) ? static_cast<void> (0) : __assert_fail ("isTriviallyReplaceablePHI(*TPN, *I) && \"Expect only trivially replaceable PHI\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 1460, __PRETTY_FUNCTION__)); | ||||||||
1461 | BasicBlock *ExitBlock = TPN->getParent(); | ||||||||
1462 | Instruction *New; | ||||||||
1463 | auto It = SunkCopies.find(ExitBlock); | ||||||||
1464 | if (It != SunkCopies.end()) | ||||||||
1465 | New = It->second; | ||||||||
1466 | else | ||||||||
1467 | New = SunkCopies[ExitBlock] = CloneInstructionInExitBlock( | ||||||||
1468 | *I, *ExitBlock, *TPN, LI, SafetyInfo, MSSAU); | ||||||||
1469 | return New; | ||||||||
1470 | } | ||||||||
1471 | |||||||||
1472 | static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) { | ||||||||
1473 | BasicBlock *BB = PN->getParent(); | ||||||||
1474 | if (!BB->canSplitPredecessors()) | ||||||||
1475 | return false; | ||||||||
1476 | // It's not impossible to split EHPad blocks, but if BlockColors already exist | ||||||||
1477 | // it require updating BlockColors for all offspring blocks accordingly. By | ||||||||
1478 | // skipping such corner case, we can make updating BlockColors after splitting | ||||||||
1479 | // predecessor fairly simple. | ||||||||
1480 | if (!SafetyInfo->getBlockColors().empty() && BB->getFirstNonPHI()->isEHPad()) | ||||||||
1481 | return false; | ||||||||
1482 | for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) { | ||||||||
1483 | BasicBlock *BBPred = *PI; | ||||||||
1484 | if (isa<IndirectBrInst>(BBPred->getTerminator())) | ||||||||
1485 | return false; | ||||||||
1486 | } | ||||||||
1487 | return true; | ||||||||
1488 | } | ||||||||
1489 | |||||||||
1490 | static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT, | ||||||||
1491 | LoopInfo *LI, const Loop *CurLoop, | ||||||||
1492 | LoopSafetyInfo *SafetyInfo, | ||||||||
1493 | MemorySSAUpdater *MSSAU) { | ||||||||
1494 | #ifndef NDEBUG | ||||||||
1495 | SmallVector<BasicBlock *, 32> ExitBlocks; | ||||||||
1496 | CurLoop->getUniqueExitBlocks(ExitBlocks); | ||||||||
1497 | SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(), | ||||||||
1498 | ExitBlocks.end()); | ||||||||
1499 | #endif | ||||||||
1500 | BasicBlock *ExitBB = PN->getParent(); | ||||||||
1501 | assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block.")((ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block." ) ? static_cast<void> (0) : __assert_fail ("ExitBlockSet.count(ExitBB) && \"Expect the PHI is in an exit block.\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 1501, __PRETTY_FUNCTION__)); | ||||||||
1502 | |||||||||
1503 | // Split predecessors of the loop exit to make instructions in the loop are | ||||||||
1504 | // exposed to exit blocks through trivially replaceable PHIs while keeping the | ||||||||
1505 | // loop in the canonical form where each predecessor of each exit block should | ||||||||
1506 | // be contained within the loop. For example, this will convert the loop below | ||||||||
1507 | // from | ||||||||
1508 | // | ||||||||
1509 | // LB1: | ||||||||
1510 | // %v1 = | ||||||||
1511 | // br %LE, %LB2 | ||||||||
1512 | // LB2: | ||||||||
1513 | // %v2 = | ||||||||
1514 | // br %LE, %LB1 | ||||||||
1515 | // LE: | ||||||||
1516 | // %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable | ||||||||
1517 | // | ||||||||
1518 | // to | ||||||||
1519 | // | ||||||||
1520 | // LB1: | ||||||||
1521 | // %v1 = | ||||||||
1522 | // br %LE.split, %LB2 | ||||||||
1523 | // LB2: | ||||||||
1524 | // %v2 = | ||||||||
1525 | // br %LE.split2, %LB1 | ||||||||
1526 | // LE.split: | ||||||||
1527 | // %p1 = phi [%v1, %LB1] <-- trivially replaceable | ||||||||
1528 | // br %LE | ||||||||
1529 | // LE.split2: | ||||||||
1530 | // %p2 = phi [%v2, %LB2] <-- trivially replaceable | ||||||||
1531 | // br %LE | ||||||||
1532 | // LE: | ||||||||
1533 | // %p = phi [%p1, %LE.split], [%p2, %LE.split2] | ||||||||
1534 | // | ||||||||
1535 | const auto &BlockColors = SafetyInfo->getBlockColors(); | ||||||||
1536 | SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB)); | ||||||||
1537 | while (!PredBBs.empty()) { | ||||||||
1538 | BasicBlock *PredBB = *PredBBs.begin(); | ||||||||
1539 | assert(CurLoop->contains(PredBB) &&((CurLoop->contains(PredBB) && "Expect all predecessors are in the loop" ) ? static_cast<void> (0) : __assert_fail ("CurLoop->contains(PredBB) && \"Expect all predecessors are in the loop\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 1540, __PRETTY_FUNCTION__)) | ||||||||
1540 | "Expect all predecessors are in the loop")((CurLoop->contains(PredBB) && "Expect all predecessors are in the loop" ) ? static_cast<void> (0) : __assert_fail ("CurLoop->contains(PredBB) && \"Expect all predecessors are in the loop\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 1540, __PRETTY_FUNCTION__)); | ||||||||
1541 | if (PN->getBasicBlockIndex(PredBB) >= 0) { | ||||||||
1542 | BasicBlock *NewPred = SplitBlockPredecessors( | ||||||||
1543 | ExitBB, PredBB, ".split.loop.exit", DT, LI, MSSAU, true); | ||||||||
1544 | // Since we do not allow splitting EH-block with BlockColors in | ||||||||
1545 | // canSplitPredecessors(), we can simply assign predecessor's color to | ||||||||
1546 | // the new block. | ||||||||
1547 | if (!BlockColors.empty()) | ||||||||
1548 | // Grab a reference to the ColorVector to be inserted before getting the | ||||||||
1549 | // reference to the vector we are copying because inserting the new | ||||||||
1550 | // element in BlockColors might cause the map to be reallocated. | ||||||||
1551 | SafetyInfo->copyColors(NewPred, PredBB); | ||||||||
1552 | } | ||||||||
1553 | PredBBs.remove(PredBB); | ||||||||
1554 | } | ||||||||
1555 | } | ||||||||
1556 | |||||||||
1557 | /// When an instruction is found to only be used outside of the loop, this | ||||||||
1558 | /// function moves it to the exit blocks and patches up SSA form as needed. | ||||||||
1559 | /// This method is guaranteed to remove the original instruction from its | ||||||||
1560 | /// position, and may either delete it or move it to outside of the loop. | ||||||||
1561 | /// | ||||||||
1562 | static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT, | ||||||||
1563 | const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo, | ||||||||
1564 | MemorySSAUpdater *MSSAU, OptimizationRemarkEmitter *ORE) { | ||||||||
1565 | LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM sinking instruction: " << I << "\n"; } } while (false); | ||||||||
1566 | ORE->emit([&]() { | ||||||||
1567 | return OptimizationRemark(DEBUG_TYPE"licm", "InstSunk", &I) | ||||||||
1568 | << "sinking " << ore::NV("Inst", &I); | ||||||||
1569 | }); | ||||||||
1570 | bool Changed = false; | ||||||||
1571 | if (isa<LoadInst>(I)) | ||||||||
1572 | ++NumMovedLoads; | ||||||||
1573 | else if (isa<CallInst>(I)) | ||||||||
1574 | ++NumMovedCalls; | ||||||||
1575 | ++NumSunk; | ||||||||
1576 | |||||||||
1577 | // Iterate over users to be ready for actual sinking. Replace users via | ||||||||
1578 | // unreachable blocks with undef and make all user PHIs trivially replaceable. | ||||||||
1579 | SmallPtrSet<Instruction *, 8> VisitedUsers; | ||||||||
1580 | for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) { | ||||||||
1581 | auto *User = cast<Instruction>(*UI); | ||||||||
1582 | Use &U = UI.getUse(); | ||||||||
1583 | ++UI; | ||||||||
1584 | |||||||||
1585 | if (VisitedUsers.count(User) || CurLoop->contains(User)) | ||||||||
1586 | continue; | ||||||||
1587 | |||||||||
1588 | if (!DT->isReachableFromEntry(User->getParent())) { | ||||||||
1589 | U = UndefValue::get(I.getType()); | ||||||||
1590 | Changed = true; | ||||||||
1591 | continue; | ||||||||
1592 | } | ||||||||
1593 | |||||||||
1594 | // The user must be a PHI node. | ||||||||
1595 | PHINode *PN = cast<PHINode>(User); | ||||||||
1596 | |||||||||
1597 | // Surprisingly, instructions can be used outside of loops without any | ||||||||
1598 | // exits. This can only happen in PHI nodes if the incoming block is | ||||||||
1599 | // unreachable. | ||||||||
1600 | BasicBlock *BB = PN->getIncomingBlock(U); | ||||||||
1601 | if (!DT->isReachableFromEntry(BB)) { | ||||||||
1602 | U = UndefValue::get(I.getType()); | ||||||||
1603 | Changed = true; | ||||||||
1604 | continue; | ||||||||
1605 | } | ||||||||
1606 | |||||||||
1607 | VisitedUsers.insert(PN); | ||||||||
1608 | if (isTriviallyReplaceablePHI(*PN, I)) | ||||||||
1609 | continue; | ||||||||
1610 | |||||||||
1611 | if (!canSplitPredecessors(PN, SafetyInfo)) | ||||||||
1612 | return Changed; | ||||||||
1613 | |||||||||
1614 | // Split predecessors of the PHI so that we can make users trivially | ||||||||
1615 | // replaceable. | ||||||||
1616 | splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, MSSAU); | ||||||||
1617 | |||||||||
1618 | // Should rebuild the iterators, as they may be invalidated by | ||||||||
1619 | // splitPredecessorsOfLoopExit(). | ||||||||
1620 | UI = I.user_begin(); | ||||||||
1621 | UE = I.user_end(); | ||||||||
1622 | } | ||||||||
1623 | |||||||||
1624 | if (VisitedUsers.empty()) | ||||||||
1625 | return Changed; | ||||||||
1626 | |||||||||
1627 | #ifndef NDEBUG | ||||||||
1628 | SmallVector<BasicBlock *, 32> ExitBlocks; | ||||||||
1629 | CurLoop->getUniqueExitBlocks(ExitBlocks); | ||||||||
1630 | SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(), | ||||||||
1631 | ExitBlocks.end()); | ||||||||
1632 | #endif | ||||||||
1633 | |||||||||
1634 | // Clones of this instruction. Don't create more than one per exit block! | ||||||||
1635 | SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies; | ||||||||
1636 | |||||||||
1637 | // If this instruction is only used outside of the loop, then all users are | ||||||||
1638 | // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of | ||||||||
1639 | // the instruction. | ||||||||
1640 | SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end()); | ||||||||
1641 | for (auto *UI : Users) { | ||||||||
1642 | auto *User = cast<Instruction>(UI); | ||||||||
1643 | |||||||||
1644 | if (CurLoop->contains(User)) | ||||||||
1645 | continue; | ||||||||
1646 | |||||||||
1647 | PHINode *PN = cast<PHINode>(User); | ||||||||
1648 | assert(ExitBlockSet.count(PN->getParent()) &&((ExitBlockSet.count(PN->getParent()) && "The LCSSA PHI is not in an exit block!" ) ? static_cast<void> (0) : __assert_fail ("ExitBlockSet.count(PN->getParent()) && \"The LCSSA PHI is not in an exit block!\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 1649, __PRETTY_FUNCTION__)) | ||||||||
1649 | "The LCSSA PHI is not in an exit block!")((ExitBlockSet.count(PN->getParent()) && "The LCSSA PHI is not in an exit block!" ) ? static_cast<void> (0) : __assert_fail ("ExitBlockSet.count(PN->getParent()) && \"The LCSSA PHI is not in an exit block!\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 1649, __PRETTY_FUNCTION__)); | ||||||||
1650 | // The PHI must be trivially replaceable. | ||||||||
1651 | Instruction *New = sinkThroughTriviallyReplaceablePHI( | ||||||||
1652 | PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU); | ||||||||
1653 | PN->replaceAllUsesWith(New); | ||||||||
1654 | eraseInstruction(*PN, *SafetyInfo, nullptr, nullptr); | ||||||||
1655 | Changed = true; | ||||||||
1656 | } | ||||||||
1657 | return Changed; | ||||||||
1658 | } | ||||||||
1659 | |||||||||
1660 | /// When an instruction is found to only use loop invariant operands that | ||||||||
1661 | /// is safe to hoist, this instruction is called to do the dirty work. | ||||||||
1662 | /// | ||||||||
1663 | static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, | ||||||||
1664 | BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo, | ||||||||
1665 | MemorySSAUpdater *MSSAU, OptimizationRemarkEmitter *ORE) { | ||||||||
1666 | LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getName() << ": " << Ido { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM hoisting to " << Dest ->getName() << ": " << I << "\n"; } } while (false) | ||||||||
1667 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM hoisting to " << Dest ->getName() << ": " << I << "\n"; } } while (false); | ||||||||
1668 | ORE->emit([&]() { | ||||||||
1669 | return OptimizationRemark(DEBUG_TYPE"licm", "Hoisted", &I) << "hoisting " | ||||||||
1670 | << ore::NV("Inst", &I); | ||||||||
1671 | }); | ||||||||
1672 | |||||||||
1673 | // Metadata can be dependent on conditions we are hoisting above. | ||||||||
1674 | // Conservatively strip all metadata on the instruction unless we were | ||||||||
1675 | // guaranteed to execute I if we entered the loop, in which case the metadata | ||||||||
1676 | // is valid in the loop preheader. | ||||||||
1677 | if (I.hasMetadataOtherThanDebugLoc() && | ||||||||
1678 | // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning | ||||||||
1679 | // time in isGuaranteedToExecute if we don't actually have anything to | ||||||||
1680 | // drop. It is a compile time optimization, not required for correctness. | ||||||||
1681 | !SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop)) | ||||||||
1682 | I.dropUnknownNonDebugMetadata(); | ||||||||
1683 | |||||||||
1684 | if (isa<PHINode>(I)) | ||||||||
1685 | // Move the new node to the end of the phi list in the destination block. | ||||||||
1686 | moveInstructionBefore(I, *Dest->getFirstNonPHI(), *SafetyInfo, MSSAU); | ||||||||
1687 | else | ||||||||
1688 | // Move the new node to the destination block, before its terminator. | ||||||||
1689 | moveInstructionBefore(I, *Dest->getTerminator(), *SafetyInfo, MSSAU); | ||||||||
1690 | |||||||||
1691 | // Apply line 0 debug locations when we are moving instructions to different | ||||||||
1692 | // basic blocks because we want to avoid jumpy line tables. | ||||||||
1693 | if (const DebugLoc &DL = I.getDebugLoc()) | ||||||||
1694 | I.setDebugLoc(DebugLoc::get(0, 0, DL.getScope(), DL.getInlinedAt())); | ||||||||
1695 | |||||||||
1696 | if (isa<LoadInst>(I)) | ||||||||
1697 | ++NumMovedLoads; | ||||||||
1698 | else if (isa<CallInst>(I)) | ||||||||
1699 | ++NumMovedCalls; | ||||||||
1700 | ++NumHoisted; | ||||||||
1701 | } | ||||||||
1702 | |||||||||
1703 | /// Only sink or hoist an instruction if it is not a trapping instruction, | ||||||||
1704 | /// or if the instruction is known not to trap when moved to the preheader. | ||||||||
1705 | /// or if it is a trapping instruction and is guaranteed to execute. | ||||||||
1706 | static bool isSafeToExecuteUnconditionally(Instruction &Inst, | ||||||||
1707 | const DominatorTree *DT, | ||||||||
1708 | const Loop *CurLoop, | ||||||||
1709 | const LoopSafetyInfo *SafetyInfo, | ||||||||
1710 | OptimizationRemarkEmitter *ORE, | ||||||||
1711 | const Instruction *CtxI) { | ||||||||
1712 | if (isSafeToSpeculativelyExecute(&Inst, CtxI, DT)) | ||||||||
1713 | return true; | ||||||||
1714 | |||||||||
1715 | bool GuaranteedToExecute = | ||||||||
1716 | SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop); | ||||||||
1717 | |||||||||
1718 | if (!GuaranteedToExecute) { | ||||||||
1719 | auto *LI = dyn_cast<LoadInst>(&Inst); | ||||||||
1720 | if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand())) | ||||||||
1721 | ORE->emit([&]() { | ||||||||
1722 | return OptimizationRemarkMissed( | ||||||||
1723 | DEBUG_TYPE"licm", "LoadWithLoopInvariantAddressCondExecuted", LI) | ||||||||
1724 | << "failed to hoist load with loop-invariant address " | ||||||||
1725 | "because load is conditionally executed"; | ||||||||
1726 | }); | ||||||||
1727 | } | ||||||||
1728 | |||||||||
1729 | return GuaranteedToExecute; | ||||||||
1730 | } | ||||||||
1731 | |||||||||
1732 | namespace { | ||||||||
1733 | class LoopPromoter : public LoadAndStorePromoter { | ||||||||
1734 | Value *SomePtr; // Designated pointer to store to. | ||||||||
1735 | const SmallSetVector<Value *, 8> &PointerMustAliases; | ||||||||
1736 | SmallVectorImpl<BasicBlock *> &LoopExitBlocks; | ||||||||
1737 | SmallVectorImpl<Instruction *> &LoopInsertPts; | ||||||||
1738 | SmallVectorImpl<MemoryAccess *> &MSSAInsertPts; | ||||||||
1739 | PredIteratorCache &PredCache; | ||||||||
1740 | AliasSetTracker &AST; | ||||||||
1741 | MemorySSAUpdater *MSSAU; | ||||||||
1742 | LoopInfo &LI; | ||||||||
1743 | DebugLoc DL; | ||||||||
1744 | int Alignment; | ||||||||
1745 | bool UnorderedAtomic; | ||||||||
1746 | AAMDNodes AATags; | ||||||||
1747 | ICFLoopSafetyInfo &SafetyInfo; | ||||||||
1748 | |||||||||
1749 | Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const { | ||||||||
1750 | if (Instruction *I = dyn_cast<Instruction>(V)) | ||||||||
1751 | if (Loop *L = LI.getLoopFor(I->getParent())) | ||||||||
1752 | if (!L->contains(BB)) { | ||||||||
1753 | // We need to create an LCSSA PHI node for the incoming value and | ||||||||
1754 | // store that. | ||||||||
1755 | PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB), | ||||||||
1756 | I->getName() + ".lcssa", &BB->front()); | ||||||||
1757 | for (BasicBlock *Pred : PredCache.get(BB)) | ||||||||
1758 | PN->addIncoming(I, Pred); | ||||||||
1759 | return PN; | ||||||||
1760 | } | ||||||||
1761 | return V; | ||||||||
1762 | } | ||||||||
1763 | |||||||||
1764 | public: | ||||||||
1765 | LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S, | ||||||||
1766 | const SmallSetVector<Value *, 8> &PMA, | ||||||||
1767 | SmallVectorImpl<BasicBlock *> &LEB, | ||||||||
1768 | SmallVectorImpl<Instruction *> &LIP, | ||||||||
1769 | SmallVectorImpl<MemoryAccess *> &MSSAIP, PredIteratorCache &PIC, | ||||||||
1770 | AliasSetTracker &ast, MemorySSAUpdater *MSSAU, LoopInfo &li, | ||||||||
1771 | DebugLoc dl, int alignment, bool UnorderedAtomic, | ||||||||
1772 | const AAMDNodes &AATags, ICFLoopSafetyInfo &SafetyInfo) | ||||||||
1773 | : LoadAndStorePromoter(Insts, S), SomePtr(SP), PointerMustAliases(PMA), | ||||||||
1774 | LoopExitBlocks(LEB), LoopInsertPts(LIP), MSSAInsertPts(MSSAIP), | ||||||||
1775 | PredCache(PIC), AST(ast), MSSAU(MSSAU), LI(li), DL(std::move(dl)), | ||||||||
1776 | Alignment(alignment), UnorderedAtomic(UnorderedAtomic), AATags(AATags), | ||||||||
1777 | SafetyInfo(SafetyInfo) {} | ||||||||
1778 | |||||||||
1779 | bool isInstInList(Instruction *I, | ||||||||
1780 | const SmallVectorImpl<Instruction *> &) const override { | ||||||||
1781 | Value *Ptr; | ||||||||
1782 | if (LoadInst *LI = dyn_cast<LoadInst>(I)) | ||||||||
1783 | Ptr = LI->getOperand(0); | ||||||||
1784 | else | ||||||||
1785 | Ptr = cast<StoreInst>(I)->getPointerOperand(); | ||||||||
1786 | return PointerMustAliases.count(Ptr); | ||||||||
1787 | } | ||||||||
1788 | |||||||||
1789 | void doExtraRewritesBeforeFinalDeletion() override { | ||||||||
1790 | // Insert stores after in the loop exit blocks. Each exit block gets a | ||||||||
1791 | // store of the live-out values that feed them. Since we've already told | ||||||||
1792 | // the SSA updater about the defs in the loop and the preheader | ||||||||
1793 | // definition, it is all set and we can start using it. | ||||||||
1794 | for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) { | ||||||||
1795 | BasicBlock *ExitBlock = LoopExitBlocks[i]; | ||||||||
1796 | Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock); | ||||||||
1797 | LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock); | ||||||||
1798 | Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock); | ||||||||
1799 | Instruction *InsertPos = LoopInsertPts[i]; | ||||||||
1800 | StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos); | ||||||||
1801 | if (UnorderedAtomic) | ||||||||
1802 | NewSI->setOrdering(AtomicOrdering::Unordered); | ||||||||
1803 | NewSI->setAlignment(MaybeAlign(Alignment)); | ||||||||
1804 | NewSI->setDebugLoc(DL); | ||||||||
1805 | if (AATags) | ||||||||
1806 | NewSI->setAAMetadata(AATags); | ||||||||
1807 | |||||||||
1808 | if (MSSAU) { | ||||||||
1809 | MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i]; | ||||||||
1810 | MemoryAccess *NewMemAcc; | ||||||||
1811 | if (!MSSAInsertPoint) { | ||||||||
1812 | NewMemAcc = MSSAU->createMemoryAccessInBB( | ||||||||
1813 | NewSI, nullptr, NewSI->getParent(), MemorySSA::Beginning); | ||||||||
1814 | } else { | ||||||||
1815 | NewMemAcc = | ||||||||
1816 | MSSAU->createMemoryAccessAfter(NewSI, nullptr, MSSAInsertPoint); | ||||||||
1817 | } | ||||||||
1818 | MSSAInsertPts[i] = NewMemAcc; | ||||||||
1819 | MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true); | ||||||||
1820 | // FIXME: true for safety, false may still be correct. | ||||||||
1821 | } | ||||||||
1822 | } | ||||||||
1823 | } | ||||||||
1824 | |||||||||
1825 | void replaceLoadWithValue(LoadInst *LI, Value *V) const override { | ||||||||
1826 | // Update alias analysis. | ||||||||
1827 | AST.copyValue(LI, V); | ||||||||
1828 | } | ||||||||
1829 | void instructionDeleted(Instruction *I) const override { | ||||||||
1830 | SafetyInfo.removeInstruction(I); | ||||||||
1831 | AST.deleteValue(I); | ||||||||
1832 | if (MSSAU) | ||||||||
1833 | MSSAU->removeMemoryAccess(I); | ||||||||
1834 | } | ||||||||
1835 | }; | ||||||||
1836 | |||||||||
1837 | |||||||||
1838 | /// Return true iff we can prove that a caller of this function can not inspect | ||||||||
1839 | /// the contents of the provided object in a well defined program. | ||||||||
1840 | bool isKnownNonEscaping(Value *Object, const TargetLibraryInfo *TLI) { | ||||||||
1841 | if (isa<AllocaInst>(Object)) | ||||||||
1842 | // Since the alloca goes out of scope, we know the caller can't retain a | ||||||||
1843 | // reference to it and be well defined. Thus, we don't need to check for | ||||||||
1844 | // capture. | ||||||||
1845 | return true; | ||||||||
1846 | |||||||||
1847 | // For all other objects we need to know that the caller can't possibly | ||||||||
1848 | // have gotten a reference to the object. There are two components of | ||||||||
1849 | // that: | ||||||||
1850 | // 1) Object can't be escaped by this function. This is what | ||||||||
1851 | // PointerMayBeCaptured checks. | ||||||||
1852 | // 2) Object can't have been captured at definition site. For this, we | ||||||||
1853 | // need to know the return value is noalias. At the moment, we use a | ||||||||
1854 | // weaker condition and handle only AllocLikeFunctions (which are | ||||||||
1855 | // known to be noalias). TODO | ||||||||
1856 | return isAllocLikeFn(Object, TLI) && | ||||||||
1857 | !PointerMayBeCaptured(Object, true, true); | ||||||||
1858 | } | ||||||||
1859 | |||||||||
1860 | } // namespace | ||||||||
1861 | |||||||||
1862 | /// Try to promote memory values to scalars by sinking stores out of the | ||||||||
1863 | /// loop and moving loads to before the loop. We do this by looping over | ||||||||
1864 | /// the stores in the loop, looking for stores to Must pointers which are | ||||||||
1865 | /// loop invariant. | ||||||||
1866 | /// | ||||||||
1867 | bool llvm::promoteLoopAccessesToScalars( | ||||||||
1868 | const SmallSetVector<Value *, 8> &PointerMustAliases, | ||||||||
1869 | SmallVectorImpl<BasicBlock *> &ExitBlocks, | ||||||||
1870 | SmallVectorImpl<Instruction *> &InsertPts, | ||||||||
1871 | SmallVectorImpl<MemoryAccess *> &MSSAInsertPts, PredIteratorCache &PIC, | ||||||||
1872 | LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, | ||||||||
1873 | Loop *CurLoop, AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU, | ||||||||
1874 | ICFLoopSafetyInfo *SafetyInfo, OptimizationRemarkEmitter *ORE) { | ||||||||
1875 | // Verify inputs. | ||||||||
1876 | assert(LI != nullptr && DT != nullptr && CurLoop != nullptr &&((LI != nullptr && DT != nullptr && CurLoop != nullptr && CurAST != nullptr && SafetyInfo != nullptr && "Unexpected Input to promoteLoopAccessesToScalars" ) ? static_cast<void> (0) : __assert_fail ("LI != nullptr && DT != nullptr && CurLoop != nullptr && CurAST != nullptr && SafetyInfo != nullptr && \"Unexpected Input to promoteLoopAccessesToScalars\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 1878, __PRETTY_FUNCTION__)) | ||||||||
1877 | CurAST != nullptr && SafetyInfo != nullptr &&((LI != nullptr && DT != nullptr && CurLoop != nullptr && CurAST != nullptr && SafetyInfo != nullptr && "Unexpected Input to promoteLoopAccessesToScalars" ) ? static_cast<void> (0) : __assert_fail ("LI != nullptr && DT != nullptr && CurLoop != nullptr && CurAST != nullptr && SafetyInfo != nullptr && \"Unexpected Input to promoteLoopAccessesToScalars\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 1878, __PRETTY_FUNCTION__)) | ||||||||
1878 | "Unexpected Input to promoteLoopAccessesToScalars")((LI != nullptr && DT != nullptr && CurLoop != nullptr && CurAST != nullptr && SafetyInfo != nullptr && "Unexpected Input to promoteLoopAccessesToScalars" ) ? static_cast<void> (0) : __assert_fail ("LI != nullptr && DT != nullptr && CurLoop != nullptr && CurAST != nullptr && SafetyInfo != nullptr && \"Unexpected Input to promoteLoopAccessesToScalars\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 1878, __PRETTY_FUNCTION__)); | ||||||||
1879 | |||||||||
1880 | Value *SomePtr = *PointerMustAliases.begin(); | ||||||||
1881 | BasicBlock *Preheader = CurLoop->getLoopPreheader(); | ||||||||
1882 | |||||||||
1883 | // It is not safe to promote a load/store from the loop if the load/store is | ||||||||
1884 | // conditional. For example, turning: | ||||||||
1885 | // | ||||||||
1886 | // for () { if (c) *P += 1; } | ||||||||
1887 | // | ||||||||
1888 | // into: | ||||||||
1889 | // | ||||||||
1890 | // tmp = *P; for () { if (c) tmp +=1; } *P = tmp; | ||||||||
1891 | // | ||||||||
1892 | // is not safe, because *P may only be valid to access if 'c' is true. | ||||||||
1893 | // | ||||||||
1894 | // The safety property divides into two parts: | ||||||||
1895 | // p1) The memory may not be dereferenceable on entry to the loop. In this | ||||||||
1896 | // case, we can't insert the required load in the preheader. | ||||||||
1897 | // p2) The memory model does not allow us to insert a store along any dynamic | ||||||||
1898 | // path which did not originally have one. | ||||||||
1899 | // | ||||||||
1900 | // If at least one store is guaranteed to execute, both properties are | ||||||||
1901 | // satisfied, and promotion is legal. | ||||||||
1902 | // | ||||||||
1903 | // This, however, is not a necessary condition. Even if no store/load is | ||||||||
1904 | // guaranteed to execute, we can still establish these properties. | ||||||||
1905 | // We can establish (p1) by proving that hoisting the load into the preheader | ||||||||
1906 | // is safe (i.e. proving dereferenceability on all paths through the loop). We | ||||||||
1907 | // can use any access within the alias set to prove dereferenceability, | ||||||||
1908 | // since they're all must alias. | ||||||||
1909 | // | ||||||||
1910 | // There are two ways establish (p2): | ||||||||
1911 | // a) Prove the location is thread-local. In this case the memory model | ||||||||
1912 | // requirement does not apply, and stores are safe to insert. | ||||||||
1913 | // b) Prove a store dominates every exit block. In this case, if an exit | ||||||||
1914 | // blocks is reached, the original dynamic path would have taken us through | ||||||||
1915 | // the store, so inserting a store into the exit block is safe. Note that this | ||||||||
1916 | // is different from the store being guaranteed to execute. For instance, | ||||||||
1917 | // if an exception is thrown on the first iteration of the loop, the original | ||||||||
1918 | // store is never executed, but the exit blocks are not executed either. | ||||||||
1919 | |||||||||
1920 | bool DereferenceableInPH = false; | ||||||||
1921 | bool SafeToInsertStore = false; | ||||||||
1922 | |||||||||
1923 | SmallVector<Instruction *, 64> LoopUses; | ||||||||
1924 | |||||||||
1925 | // We start with an alignment of one and try to find instructions that allow | ||||||||
1926 | // us to prove better alignment. | ||||||||
1927 | unsigned Alignment = 1; | ||||||||
1928 | // Keep track of which types of access we see | ||||||||
1929 | bool SawUnorderedAtomic = false; | ||||||||
1930 | bool SawNotAtomic = false; | ||||||||
1931 | AAMDNodes AATags; | ||||||||
1932 | |||||||||
1933 | const DataLayout &MDL = Preheader->getModule()->getDataLayout(); | ||||||||
1934 | |||||||||
1935 | bool IsKnownThreadLocalObject = false; | ||||||||
1936 | if (SafetyInfo->anyBlockMayThrow()) { | ||||||||
1937 | // If a loop can throw, we have to insert a store along each unwind edge. | ||||||||
1938 | // That said, we can't actually make the unwind edge explicit. Therefore, | ||||||||
1939 | // we have to prove that the store is dead along the unwind edge. We do | ||||||||
1940 | // this by proving that the caller can't have a reference to the object | ||||||||
1941 | // after return and thus can't possibly load from the object. | ||||||||
1942 | Value *Object = GetUnderlyingObject(SomePtr, MDL); | ||||||||
1943 | if (!isKnownNonEscaping(Object, TLI)) | ||||||||
1944 | return false; | ||||||||
1945 | // Subtlety: Alloca's aren't visible to callers, but *are* potentially | ||||||||
1946 | // visible to other threads if captured and used during their lifetimes. | ||||||||
1947 | IsKnownThreadLocalObject = !isa<AllocaInst>(Object); | ||||||||
1948 | } | ||||||||
1949 | |||||||||
1950 | // Check that all of the pointers in the alias set have the same type. We | ||||||||
1951 | // cannot (yet) promote a memory location that is loaded and stored in | ||||||||
1952 | // different sizes. While we are at it, collect alignment and AA info. | ||||||||
1953 | for (Value *ASIV : PointerMustAliases) { | ||||||||
1954 | // Check that all of the pointers in the alias set have the same type. We | ||||||||
1955 | // cannot (yet) promote a memory location that is loaded and stored in | ||||||||
1956 | // different sizes. | ||||||||
1957 | if (SomePtr->getType() != ASIV->getType()) | ||||||||
1958 | return false; | ||||||||
1959 | |||||||||
1960 | for (User *U : ASIV->users()) { | ||||||||
1961 | // Ignore instructions that are outside the loop. | ||||||||
1962 | Instruction *UI = dyn_cast<Instruction>(U); | ||||||||
1963 | if (!UI || !CurLoop->contains(UI)) | ||||||||
1964 | continue; | ||||||||
1965 | |||||||||
1966 | // If there is an non-load/store instruction in the loop, we can't promote | ||||||||
1967 | // it. | ||||||||
1968 | if (LoadInst *Load = dyn_cast<LoadInst>(UI)) { | ||||||||
1969 | if (!Load->isUnordered()) | ||||||||
1970 | return false; | ||||||||
1971 | |||||||||
1972 | SawUnorderedAtomic |= Load->isAtomic(); | ||||||||
1973 | SawNotAtomic |= !Load->isAtomic(); | ||||||||
1974 | |||||||||
1975 | unsigned InstAlignment = Load->getAlignment(); | ||||||||
1976 | if (!InstAlignment) | ||||||||
1977 | InstAlignment = | ||||||||
1978 | MDL.getABITypeAlignment(Load->getType()); | ||||||||
1979 | |||||||||
1980 | // Note that proving a load safe to speculate requires proving | ||||||||
1981 | // sufficient alignment at the target location. Proving it guaranteed | ||||||||
1982 | // to execute does as well. Thus we can increase our guaranteed | ||||||||
1983 | // alignment as well. | ||||||||
1984 | if (!DereferenceableInPH || (InstAlignment > Alignment)) | ||||||||
1985 | if (isSafeToExecuteUnconditionally(*Load, DT, CurLoop, SafetyInfo, | ||||||||
1986 | ORE, Preheader->getTerminator())) { | ||||||||
1987 | DereferenceableInPH = true; | ||||||||
1988 | Alignment = std::max(Alignment, InstAlignment); | ||||||||
1989 | } | ||||||||
1990 | } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) { | ||||||||
1991 | // Stores *of* the pointer are not interesting, only stores *to* the | ||||||||
1992 | // pointer. | ||||||||
1993 | if (UI->getOperand(1) != ASIV) | ||||||||
1994 | continue; | ||||||||
1995 | if (!Store->isUnordered()) | ||||||||
1996 | return false; | ||||||||
1997 | |||||||||
1998 | SawUnorderedAtomic |= Store->isAtomic(); | ||||||||
1999 | SawNotAtomic |= !Store->isAtomic(); | ||||||||
2000 | |||||||||
2001 | // If the store is guaranteed to execute, both properties are satisfied. | ||||||||
2002 | // We may want to check if a store is guaranteed to execute even if we | ||||||||
2003 | // already know that promotion is safe, since it may have higher | ||||||||
2004 | // alignment than any other guaranteed stores, in which case we can | ||||||||
2005 | // raise the alignment on the promoted store. | ||||||||
2006 | unsigned InstAlignment = Store->getAlignment(); | ||||||||
2007 | if (!InstAlignment) | ||||||||
2008 | InstAlignment = | ||||||||
2009 | MDL.getABITypeAlignment(Store->getValueOperand()->getType()); | ||||||||
2010 | |||||||||
2011 | if (!DereferenceableInPH || !SafeToInsertStore || | ||||||||
2012 | (InstAlignment > Alignment)) { | ||||||||
2013 | if (SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop)) { | ||||||||
2014 | DereferenceableInPH = true; | ||||||||
2015 | SafeToInsertStore = true; | ||||||||
2016 | Alignment = std::max(Alignment, InstAlignment); | ||||||||
2017 | } | ||||||||
2018 | } | ||||||||
2019 | |||||||||
2020 | // If a store dominates all exit blocks, it is safe to sink. | ||||||||
2021 | // As explained above, if an exit block was executed, a dominating | ||||||||
2022 | // store must have been executed at least once, so we are not | ||||||||
2023 | // introducing stores on paths that did not have them. | ||||||||
2024 | // Note that this only looks at explicit exit blocks. If we ever | ||||||||
2025 | // start sinking stores into unwind edges (see above), this will break. | ||||||||
2026 | if (!SafeToInsertStore) | ||||||||
2027 | SafeToInsertStore = llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) { | ||||||||
2028 | return DT->dominates(Store->getParent(), Exit); | ||||||||
2029 | }); | ||||||||
2030 | |||||||||
2031 | // If the store is not guaranteed to execute, we may still get | ||||||||
2032 | // deref info through it. | ||||||||
2033 | if (!DereferenceableInPH) { | ||||||||
2034 | DereferenceableInPH = isDereferenceableAndAlignedPointer( | ||||||||
2035 | Store->getPointerOperand(), Store->getValueOperand()->getType(), | ||||||||
2036 | Store->getAlignment(), MDL, Preheader->getTerminator(), DT); | ||||||||
2037 | } | ||||||||
2038 | } else | ||||||||
2039 | return false; // Not a load or store. | ||||||||
2040 | |||||||||
2041 | // Merge the AA tags. | ||||||||
2042 | if (LoopUses.empty()) { | ||||||||
2043 | // On the first load/store, just take its AA tags. | ||||||||
2044 | UI->getAAMetadata(AATags); | ||||||||
2045 | } else if (AATags) { | ||||||||
2046 | UI->getAAMetadata(AATags, /* Merge = */ true); | ||||||||
2047 | } | ||||||||
2048 | |||||||||
2049 | LoopUses.push_back(UI); | ||||||||
2050 | } | ||||||||
2051 | } | ||||||||
2052 | |||||||||
2053 | // If we found both an unordered atomic instruction and a non-atomic memory | ||||||||
2054 | // access, bail. We can't blindly promote non-atomic to atomic since we | ||||||||
2055 | // might not be able to lower the result. We can't downgrade since that | ||||||||
2056 | // would violate memory model. Also, align 0 is an error for atomics. | ||||||||
2057 | if (SawUnorderedAtomic && SawNotAtomic) | ||||||||
2058 | return false; | ||||||||
2059 | |||||||||
2060 | // If we're inserting an atomic load in the preheader, we must be able to | ||||||||
2061 | // lower it. We're only guaranteed to be able to lower naturally aligned | ||||||||
2062 | // atomics. | ||||||||
2063 | auto *SomePtrElemType = SomePtr->getType()->getPointerElementType(); | ||||||||
2064 | if (SawUnorderedAtomic && | ||||||||
2065 | Alignment < MDL.getTypeStoreSize(SomePtrElemType)) | ||||||||
2066 | return false; | ||||||||
2067 | |||||||||
2068 | // If we couldn't prove we can hoist the load, bail. | ||||||||
2069 | if (!DereferenceableInPH) | ||||||||
2070 | return false; | ||||||||
2071 | |||||||||
2072 | // We know we can hoist the load, but don't have a guaranteed store. | ||||||||
2073 | // Check whether the location is thread-local. If it is, then we can insert | ||||||||
2074 | // stores along paths which originally didn't have them without violating the | ||||||||
2075 | // memory model. | ||||||||
2076 | if (!SafeToInsertStore) { | ||||||||
2077 | if (IsKnownThreadLocalObject) | ||||||||
2078 | SafeToInsertStore = true; | ||||||||
2079 | else { | ||||||||
2080 | Value *Object = GetUnderlyingObject(SomePtr, MDL); | ||||||||
2081 | SafeToInsertStore = | ||||||||
2082 | (isAllocLikeFn(Object, TLI) || isa<AllocaInst>(Object)) && | ||||||||
2083 | !PointerMayBeCaptured(Object, true, true); | ||||||||
2084 | } | ||||||||
2085 | } | ||||||||
2086 | |||||||||
2087 | // If we've still failed to prove we can sink the store, give up. | ||||||||
2088 | if (!SafeToInsertStore) | ||||||||
2089 | return false; | ||||||||
2090 | |||||||||
2091 | // Otherwise, this is safe to promote, lets do it! | ||||||||
2092 | LLVM_DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " << *SomePtrdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM: Promoting value stored to in loop: " << *SomePtr << '\n'; } } while (false) | ||||||||
2093 | << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM: Promoting value stored to in loop: " << *SomePtr << '\n'; } } while (false); | ||||||||
2094 | ORE->emit([&]() { | ||||||||
2095 | return OptimizationRemark(DEBUG_TYPE"licm", "PromoteLoopAccessesToScalar", | ||||||||
2096 | LoopUses[0]) | ||||||||
2097 | << "Moving accesses to memory location out of the loop"; | ||||||||
2098 | }); | ||||||||
2099 | ++NumPromoted; | ||||||||
2100 | |||||||||
2101 | // Grab a debug location for the inserted loads/stores; given that the | ||||||||
2102 | // inserted loads/stores have little relation to the original loads/stores, | ||||||||
2103 | // this code just arbitrarily picks a location from one, since any debug | ||||||||
2104 | // location is better than none. | ||||||||
2105 | DebugLoc DL = LoopUses[0]->getDebugLoc(); | ||||||||
2106 | |||||||||
2107 | // We use the SSAUpdater interface to insert phi nodes as required. | ||||||||
2108 | SmallVector<PHINode *, 16> NewPHIs; | ||||||||
2109 | SSAUpdater SSA(&NewPHIs); | ||||||||
2110 | LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks, | ||||||||
2111 | InsertPts, MSSAInsertPts, PIC, *CurAST, MSSAU, *LI, DL, | ||||||||
2112 | Alignment, SawUnorderedAtomic, AATags, *SafetyInfo); | ||||||||
2113 | |||||||||
2114 | // Set up the preheader to have a definition of the value. It is the live-out | ||||||||
2115 | // value from the preheader that uses in the loop will use. | ||||||||
2116 | LoadInst *PreheaderLoad = new LoadInst( | ||||||||
2117 | SomePtr->getType()->getPointerElementType(), SomePtr, | ||||||||
2118 | SomePtr->getName() + ".promoted", Preheader->getTerminator()); | ||||||||
2119 | if (SawUnorderedAtomic) | ||||||||
2120 | PreheaderLoad->setOrdering(AtomicOrdering::Unordered); | ||||||||
2121 | PreheaderLoad->setAlignment(MaybeAlign(Alignment)); | ||||||||
2122 | PreheaderLoad->setDebugLoc(DL); | ||||||||
2123 | if (AATags) | ||||||||
2124 | PreheaderLoad->setAAMetadata(AATags); | ||||||||
2125 | SSA.AddAvailableValue(Preheader, PreheaderLoad); | ||||||||
2126 | |||||||||
2127 | if (MSSAU) { | ||||||||
2128 | MemoryAccess *PreheaderLoadMemoryAccess = MSSAU->createMemoryAccessInBB( | ||||||||
2129 | PreheaderLoad, nullptr, PreheaderLoad->getParent(), MemorySSA::End); | ||||||||
2130 | MemoryUse *NewMemUse = cast<MemoryUse>(PreheaderLoadMemoryAccess); | ||||||||
2131 | MSSAU->insertUse(NewMemUse, /*RenameUses=*/true); | ||||||||
2132 | } | ||||||||
2133 | |||||||||
2134 | if (MSSAU && VerifyMemorySSA) | ||||||||
2135 | MSSAU->getMemorySSA()->verifyMemorySSA(); | ||||||||
2136 | // Rewrite all the loads in the loop and remember all the definitions from | ||||||||
2137 | // stores in the loop. | ||||||||
2138 | Promoter.run(LoopUses); | ||||||||
2139 | |||||||||
2140 | if (MSSAU && VerifyMemorySSA) | ||||||||
2141 | MSSAU->getMemorySSA()->verifyMemorySSA(); | ||||||||
2142 | // If the SSAUpdater didn't use the load in the preheader, just zap it now. | ||||||||
2143 | if (PreheaderLoad->use_empty()) | ||||||||
2144 | eraseInstruction(*PreheaderLoad, *SafetyInfo, CurAST, MSSAU); | ||||||||
2145 | |||||||||
2146 | return true; | ||||||||
2147 | } | ||||||||
2148 | |||||||||
2149 | /// Returns an owning pointer to an alias set which incorporates aliasing info | ||||||||
2150 | /// from L and all subloops of L. | ||||||||
2151 | /// FIXME: In new pass manager, there is no helper function to handle loop | ||||||||
2152 | /// analysis such as cloneBasicBlockAnalysis, so the AST needs to be recomputed | ||||||||
2153 | /// from scratch for every loop. Hook up with the helper functions when | ||||||||
2154 | /// available in the new pass manager to avoid redundant computation. | ||||||||
2155 | std::unique_ptr<AliasSetTracker> | ||||||||
2156 | LoopInvariantCodeMotion::collectAliasInfoForLoop(Loop *L, LoopInfo *LI, | ||||||||
2157 | AliasAnalysis *AA) { | ||||||||
2158 | std::unique_ptr<AliasSetTracker> CurAST; | ||||||||
2159 | SmallVector<Loop *, 4> RecomputeLoops; | ||||||||
2160 | for (Loop *InnerL : L->getSubLoops()) { | ||||||||
2161 | auto MapI = LoopToAliasSetMap.find(InnerL); | ||||||||
2162 | // If the AST for this inner loop is missing it may have been merged into | ||||||||
2163 | // some other loop's AST and then that loop unrolled, and so we need to | ||||||||
2164 | // recompute it. | ||||||||
2165 | if (MapI == LoopToAliasSetMap.end()) { | ||||||||
2166 | RecomputeLoops.push_back(InnerL); | ||||||||
2167 | continue; | ||||||||
2168 | } | ||||||||
2169 | std::unique_ptr<AliasSetTracker> InnerAST = std::move(MapI->second); | ||||||||
2170 | |||||||||
2171 | if (CurAST) { | ||||||||
2172 | // What if InnerLoop was modified by other passes ? | ||||||||
2173 | // Once we've incorporated the inner loop's AST into ours, we don't need | ||||||||
2174 | // the subloop's anymore. | ||||||||
2175 | CurAST->add(*InnerAST); | ||||||||
2176 | } else { | ||||||||
2177 | CurAST = std::move(InnerAST); | ||||||||
2178 | } | ||||||||
2179 | LoopToAliasSetMap.erase(MapI); | ||||||||
2180 | } | ||||||||
2181 | if (!CurAST) | ||||||||
2182 | CurAST = std::make_unique<AliasSetTracker>(*AA); | ||||||||
2183 | |||||||||
2184 | // Add everything from the sub loops that are no longer directly available. | ||||||||
2185 | for (Loop *InnerL : RecomputeLoops) | ||||||||
2186 | for (BasicBlock *BB : InnerL->blocks()) | ||||||||
2187 | CurAST->add(*BB); | ||||||||
2188 | |||||||||
2189 | // And merge in this loop (without anything from inner loops). | ||||||||
2190 | for (BasicBlock *BB : L->blocks()) | ||||||||
2191 | if (LI->getLoopFor(BB) == L) | ||||||||
2192 | CurAST->add(*BB); | ||||||||
2193 | |||||||||
2194 | return CurAST; | ||||||||
2195 | } | ||||||||
2196 | |||||||||
2197 | std::unique_ptr<AliasSetTracker> | ||||||||
2198 | LoopInvariantCodeMotion::collectAliasInfoForLoopWithMSSA( | ||||||||
2199 | Loop *L, AliasAnalysis *AA, MemorySSAUpdater *MSSAU) { | ||||||||
2200 | auto *MSSA = MSSAU->getMemorySSA(); | ||||||||
2201 | auto CurAST = std::make_unique<AliasSetTracker>(*AA, MSSA, L); | ||||||||
2202 | CurAST->addAllInstructionsInLoopUsingMSSA(); | ||||||||
2203 | return CurAST; | ||||||||
2204 | } | ||||||||
2205 | |||||||||
2206 | /// Simple analysis hook. Clone alias set info. | ||||||||
2207 | /// | ||||||||
2208 | void LegacyLICMPass::cloneBasicBlockAnalysis(BasicBlock *From, BasicBlock *To, | ||||||||
2209 | Loop *L) { | ||||||||
2210 | auto ASTIt = LICM.getLoopToAliasSetMap().find(L); | ||||||||
2211 | if (ASTIt == LICM.getLoopToAliasSetMap().end()) | ||||||||
2212 | return; | ||||||||
2213 | |||||||||
2214 | ASTIt->second->copyValue(From, To); | ||||||||
2215 | } | ||||||||
2216 | |||||||||
2217 | /// Simple Analysis hook. Delete value V from alias set | ||||||||
2218 | /// | ||||||||
2219 | void LegacyLICMPass::deleteAnalysisValue(Value *V, Loop *L) { | ||||||||
2220 | auto ASTIt = LICM.getLoopToAliasSetMap().find(L); | ||||||||
2221 | if (ASTIt == LICM.getLoopToAliasSetMap().end()) | ||||||||
2222 | return; | ||||||||
2223 | |||||||||
2224 | ASTIt->second->deleteValue(V); | ||||||||
2225 | } | ||||||||
2226 | |||||||||
2227 | /// Simple Analysis hook. Delete value L from alias set map. | ||||||||
2228 | /// | ||||||||
2229 | void LegacyLICMPass::deleteAnalysisLoop(Loop *L) { | ||||||||
2230 | if (!LICM.getLoopToAliasSetMap().count(L)) | ||||||||
2231 | return; | ||||||||
2232 | |||||||||
2233 | LICM.getLoopToAliasSetMap().erase(L); | ||||||||
2234 | } | ||||||||
2235 | |||||||||
2236 | static bool pointerInvalidatedByLoop(MemoryLocation MemLoc, | ||||||||
2237 | AliasSetTracker *CurAST, Loop *CurLoop, | ||||||||
2238 | AliasAnalysis *AA) { | ||||||||
2239 | // First check to see if any of the basic blocks in CurLoop invalidate *V. | ||||||||
2240 | bool isInvalidatedAccordingToAST = CurAST->getAliasSetFor(MemLoc).isMod(); | ||||||||
2241 | |||||||||
2242 | if (!isInvalidatedAccordingToAST || !LICMN2Theshold) | ||||||||
2243 | return isInvalidatedAccordingToAST; | ||||||||
2244 | |||||||||
2245 | // Check with a diagnostic analysis if we can refine the information above. | ||||||||
2246 | // This is to identify the limitations of using the AST. | ||||||||
2247 | // The alias set mechanism used by LICM has a major weakness in that it | ||||||||
2248 | // combines all things which may alias into a single set *before* asking | ||||||||
2249 | // modref questions. As a result, a single readonly call within a loop will | ||||||||
2250 | // collapse all loads and stores into a single alias set and report | ||||||||
2251 | // invalidation if the loop contains any store. For example, readonly calls | ||||||||
2252 | // with deopt states have this form and create a general alias set with all | ||||||||
2253 | // loads and stores. In order to get any LICM in loops containing possible | ||||||||
2254 | // deopt states we need a more precise invalidation of checking the mod ref | ||||||||
2255 | // info of each instruction within the loop and LI. This has a complexity of | ||||||||
2256 | // O(N^2), so currently, it is used only as a diagnostic tool since the | ||||||||
2257 | // default value of LICMN2Threshold is zero. | ||||||||
2258 | |||||||||
2259 | // Don't look at nested loops. | ||||||||
2260 | if (CurLoop->begin() != CurLoop->end()) | ||||||||
2261 | return true; | ||||||||
2262 | |||||||||
2263 | int N = 0; | ||||||||
2264 | for (BasicBlock *BB : CurLoop->getBlocks()) | ||||||||
2265 | for (Instruction &I : *BB) { | ||||||||
2266 | if (N >= LICMN2Theshold) { | ||||||||
2267 | LLVM_DEBUG(dbgs() << "Alasing N2 threshold exhausted for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "Alasing N2 threshold exhausted for " << *(MemLoc.Ptr) << "\n"; } } while (false) | ||||||||
2268 | << *(MemLoc.Ptr) << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "Alasing N2 threshold exhausted for " << *(MemLoc.Ptr) << "\n"; } } while (false); | ||||||||
2269 | return true; | ||||||||
2270 | } | ||||||||
2271 | N++; | ||||||||
2272 | auto Res = AA->getModRefInfo(&I, MemLoc); | ||||||||
2273 | if (isModSet(Res)) { | ||||||||
2274 | LLVM_DEBUG(dbgs() << "Aliasing failed on " << I << " for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "Aliasing failed on " << I << " for " << *(MemLoc.Ptr) << "\n"; } } while (false ) | ||||||||
2275 | << *(MemLoc.Ptr) << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "Aliasing failed on " << I << " for " << *(MemLoc.Ptr) << "\n"; } } while (false ); | ||||||||
2276 | return true; | ||||||||
2277 | } | ||||||||
2278 | } | ||||||||
2279 | LLVM_DEBUG(dbgs() << "Aliasing okay for " << *(MemLoc.Ptr) << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "Aliasing okay for " << *(MemLoc .Ptr) << "\n"; } } while (false); | ||||||||
2280 | return false; | ||||||||
2281 | } | ||||||||
2282 | |||||||||
2283 | static bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU, | ||||||||
2284 | Loop *CurLoop, | ||||||||
2285 | SinkAndHoistLICMFlags &Flags) { | ||||||||
2286 | // For hoisting, use the walker to determine safety | ||||||||
2287 | if (!Flags.IsSink) { | ||||||||
2288 | MemoryAccess *Source; | ||||||||
2289 | // See declaration of SetLicmMssaOptCap for usage details. | ||||||||
2290 | if (Flags.LicmMssaOptCounter >= Flags.LicmMssaOptCap) | ||||||||
2291 | Source = MU->getDefiningAccess(); | ||||||||
2292 | else { | ||||||||
2293 | Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(MU); | ||||||||
2294 | Flags.LicmMssaOptCounter++; | ||||||||
2295 | } | ||||||||
2296 | return !MSSA->isLiveOnEntryDef(Source) && | ||||||||
2297 | CurLoop->contains(Source->getBlock()); | ||||||||
2298 | } | ||||||||
2299 | |||||||||
2300 | // For sinking, we'd need to check all Defs below this use. The getClobbering | ||||||||
2301 | // call will look on the backedge of the loop, but will check aliasing with | ||||||||
2302 | // the instructions on the previous iteration. | ||||||||
2303 | // For example: | ||||||||
2304 | // for (i ... ) | ||||||||
2305 | // load a[i] ( Use (LoE) | ||||||||
2306 | // store a[i] ( 1 = Def (2), with 2 = Phi for the loop. | ||||||||
2307 | // i++; | ||||||||
2308 | // The load sees no clobbering inside the loop, as the backedge alias check | ||||||||
2309 | // does phi translation, and will check aliasing against store a[i-1]. | ||||||||
2310 | // However sinking the load outside the loop, below the store is incorrect. | ||||||||
2311 | |||||||||
2312 | // For now, only sink if there are no Defs in the loop, and the existing ones | ||||||||
2313 | // precede the use and are in the same block. | ||||||||
2314 | // FIXME: Increase precision: Safe to sink if Use post dominates the Def; | ||||||||
2315 | // needs PostDominatorTreeAnalysis. | ||||||||
2316 | // FIXME: More precise: no Defs that alias this Use. | ||||||||
2317 | if (Flags.NoOfMemAccTooLarge) | ||||||||
2318 | return true; | ||||||||
2319 | for (auto *BB : CurLoop->getBlocks()) | ||||||||
2320 | if (auto *Accesses = MSSA->getBlockDefs(BB)) | ||||||||
2321 | for (const auto &MA : *Accesses) | ||||||||
2322 | if (const auto *MD = dyn_cast<MemoryDef>(&MA)) | ||||||||
2323 | if (MU->getBlock() != MD->getBlock() || | ||||||||
2324 | !MSSA->locallyDominates(MD, MU)) | ||||||||
2325 | return true; | ||||||||
2326 | return false; | ||||||||
2327 | } | ||||||||
2328 | |||||||||
2329 | /// Little predicate that returns true if the specified basic block is in | ||||||||
2330 | /// a subloop of the current one, not the current one itself. | ||||||||
2331 | /// | ||||||||
2332 | static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) { | ||||||||
2333 | assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop")((CurLoop->contains(BB) && "Only valid if BB is IN the loop" ) ? static_cast<void> (0) : __assert_fail ("CurLoop->contains(BB) && \"Only valid if BB is IN the loop\"" , "/build/llvm-toolchain-snapshot-10~svn374877/lib/Transforms/Scalar/LICM.cpp" , 2333, __PRETTY_FUNCTION__)); | ||||||||
2334 | return LI->getLoopFor(BB) != CurLoop; | ||||||||
2335 | } |
1 | //===- PatternMatch.h - Match on the LLVM IR --------------------*- C++ -*-===// | ||||||||
2 | // | ||||||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||||||
6 | // | ||||||||
7 | //===----------------------------------------------------------------------===// | ||||||||
8 | // | ||||||||
9 | // This file provides a simple and efficient mechanism for performing general | ||||||||
10 | // tree-based pattern matches on the LLVM IR. The power of these routines is | ||||||||
11 | // that it allows you to write concise patterns that are expressive and easy to | ||||||||
12 | // understand. The other major advantage of this is that it allows you to | ||||||||
13 | // trivially capture/bind elements in the pattern to variables. For example, | ||||||||
14 | // you can do something like this: | ||||||||
15 | // | ||||||||
16 | // Value *Exp = ... | ||||||||
17 | // Value *X, *Y; ConstantInt *C1, *C2; // (X & C1) | (Y & C2) | ||||||||
18 | // if (match(Exp, m_Or(m_And(m_Value(X), m_ConstantInt(C1)), | ||||||||
19 | // m_And(m_Value(Y), m_ConstantInt(C2))))) { | ||||||||
20 | // ... Pattern is matched and variables are bound ... | ||||||||
21 | // } | ||||||||
22 | // | ||||||||
23 | // This is primarily useful to things like the instruction combiner, but can | ||||||||
24 | // also be useful for static analysis tools or code generators. | ||||||||
25 | // | ||||||||
26 | //===----------------------------------------------------------------------===// | ||||||||
27 | |||||||||
28 | #ifndef LLVM_IR_PATTERNMATCH_H | ||||||||
29 | #define LLVM_IR_PATTERNMATCH_H | ||||||||
30 | |||||||||
31 | #include "llvm/ADT/APFloat.h" | ||||||||
32 | #include "llvm/ADT/APInt.h" | ||||||||
33 | #include "llvm/IR/Constant.h" | ||||||||
34 | #include "llvm/IR/Constants.h" | ||||||||
35 | #include "llvm/IR/InstrTypes.h" | ||||||||
36 | #include "llvm/IR/Instruction.h" | ||||||||
37 | #include "llvm/IR/Instructions.h" | ||||||||
38 | #include "llvm/IR/Intrinsics.h" | ||||||||
39 | #include "llvm/IR/Operator.h" | ||||||||
40 | #include "llvm/IR/Value.h" | ||||||||
41 | #include "llvm/Support/Casting.h" | ||||||||
42 | #include <cstdint> | ||||||||
43 | |||||||||
44 | namespace llvm { | ||||||||
45 | namespace PatternMatch { | ||||||||
46 | |||||||||
47 | template <typename Val, typename Pattern> bool match(Val *V, const Pattern &P) { | ||||||||
48 | return const_cast<Pattern &>(P).match(V); | ||||||||
49 | } | ||||||||
50 | |||||||||
51 | template <typename SubPattern_t> struct OneUse_match { | ||||||||
52 | SubPattern_t SubPattern; | ||||||||
53 | |||||||||
54 | OneUse_match(const SubPattern_t &SP) : SubPattern(SP) {} | ||||||||
55 | |||||||||
56 | template <typename OpTy> bool match(OpTy *V) { | ||||||||
57 | return V->hasOneUse() && SubPattern.match(V); | ||||||||
58 | } | ||||||||
59 | }; | ||||||||
60 | |||||||||
61 | template <typename T> inline OneUse_match<T> m_OneUse(const T &SubPattern) { | ||||||||
62 | return SubPattern; | ||||||||
63 | } | ||||||||
64 | |||||||||
65 | template <typename Class> struct class_match { | ||||||||
66 | template <typename ITy> bool match(ITy *V) { return isa<Class>(V); } | ||||||||
67 | }; | ||||||||
68 | |||||||||
69 | /// Match an arbitrary value and ignore it. | ||||||||
70 | inline class_match<Value> m_Value() { return class_match<Value>(); } | ||||||||
71 | |||||||||
72 | /// Match an arbitrary binary operation and ignore it. | ||||||||
73 | inline class_match<BinaryOperator> m_BinOp() { | ||||||||
74 | return class_match<BinaryOperator>(); | ||||||||
75 | } | ||||||||
76 | |||||||||
77 | /// Matches any compare instruction and ignore it. | ||||||||
78 | inline class_match<CmpInst> m_Cmp() { return class_match<CmpInst>(); } | ||||||||
79 | |||||||||
80 | /// Match an arbitrary ConstantInt and ignore it. | ||||||||
81 | inline class_match<ConstantInt> m_ConstantInt() { | ||||||||
82 | return class_match<ConstantInt>(); | ||||||||
83 | } | ||||||||
84 | |||||||||
85 | /// Match an arbitrary undef constant. | ||||||||
86 | inline class_match<UndefValue> m_Undef() { return class_match<UndefValue>(); } | ||||||||
87 | |||||||||
88 | /// Match an arbitrary Constant and ignore it. | ||||||||
89 | inline class_match<Constant> m_Constant() { return class_match<Constant>(); } | ||||||||
90 | |||||||||
91 | /// Match an arbitrary basic block value and ignore it. | ||||||||
92 | inline class_match<BasicBlock> m_BasicBlock() { | ||||||||
93 | return class_match<BasicBlock>(); | ||||||||
94 | } | ||||||||
95 | |||||||||
96 | /// Inverting matcher | ||||||||
97 | template <typename Ty> struct match_unless { | ||||||||
98 | Ty M; | ||||||||
99 | |||||||||
100 | match_unless(const Ty &Matcher) : M(Matcher) {} | ||||||||
101 | |||||||||
102 | template <typename ITy> bool match(ITy *V) { return !M.match(V); } | ||||||||
103 | }; | ||||||||
104 | |||||||||
105 | /// Match if the inner matcher does *NOT* match. | ||||||||
106 | template <typename Ty> inline match_unless<Ty> m_Unless(const Ty &M) { | ||||||||
107 | return match_unless<Ty>(M); | ||||||||
108 | } | ||||||||
109 | |||||||||
110 | /// Matching combinators | ||||||||
111 | template <typename LTy, typename RTy> struct match_combine_or { | ||||||||
112 | LTy L; | ||||||||
113 | RTy R; | ||||||||
114 | |||||||||
115 | match_combine_or(const LTy &Left, const RTy &Right) : L(Left), R(Right) {} | ||||||||
116 | |||||||||
117 | template <typename ITy> bool match(ITy *V) { | ||||||||
118 | if (L.match(V)) | ||||||||
119 | return true; | ||||||||
120 | if (R.match(V)) | ||||||||
121 | return true; | ||||||||
122 | return false; | ||||||||
123 | } | ||||||||
124 | }; | ||||||||
125 | |||||||||
126 | template <typename LTy, typename RTy> struct match_combine_and { | ||||||||
127 | LTy L; | ||||||||
128 | RTy R; | ||||||||
129 | |||||||||
130 | match_combine_and(const LTy &Left, const RTy &Right) : L(Left), R(Right) {} | ||||||||
131 | |||||||||
132 | template <typename ITy> bool match(ITy *V) { | ||||||||
133 | if (L.match(V)) | ||||||||
134 | if (R.match(V)) | ||||||||
135 | return true; | ||||||||
136 | return false; | ||||||||
137 | } | ||||||||
138 | }; | ||||||||
139 | |||||||||
140 | /// Combine two pattern matchers matching L || R | ||||||||
141 | template <typename LTy, typename RTy> | ||||||||
142 | inline match_combine_or<LTy, RTy> m_CombineOr(const LTy &L, const RTy &R) { | ||||||||
143 | return match_combine_or<LTy, RTy>(L, R); | ||||||||
144 | } | ||||||||
145 | |||||||||
146 | /// Combine two pattern matchers matching L && R | ||||||||
147 | template <typename LTy, typename RTy> | ||||||||
148 | inline match_combine_and<LTy, RTy> m_CombineAnd(const LTy &L, const RTy &R) { | ||||||||
149 | return match_combine_and<LTy, RTy>(L, R); | ||||||||
150 | } | ||||||||
151 | |||||||||
152 | struct apint_match { | ||||||||
153 | const APInt *&Res; | ||||||||
154 | |||||||||
155 | apint_match(const APInt *&R) : Res(R) {} | ||||||||
156 | |||||||||
157 | template <typename ITy> bool match(ITy *V) { | ||||||||
158 | if (auto *CI = dyn_cast<ConstantInt>(V)) { | ||||||||
159 | Res = &CI->getValue(); | ||||||||
160 | return true; | ||||||||
161 | } | ||||||||
162 | if (V->getType()->isVectorTy()) | ||||||||
163 | if (const auto *C = dyn_cast<Constant>(V)) | ||||||||
164 | if (auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue())) { | ||||||||
165 | Res = &CI->getValue(); | ||||||||
166 | return true; | ||||||||
167 | } | ||||||||
168 | return false; | ||||||||
169 | } | ||||||||
170 | }; | ||||||||
171 | // Either constexpr if or renaming ConstantFP::getValueAPF to | ||||||||
172 | // ConstantFP::getValue is needed to do it via single template | ||||||||
173 | // function for both apint/apfloat. | ||||||||
174 | struct apfloat_match { | ||||||||
175 | const APFloat *&Res; | ||||||||
176 | apfloat_match(const APFloat *&R) : Res(R) {} | ||||||||
177 | template <typename ITy> bool match(ITy *V) { | ||||||||
178 | if (auto *CI = dyn_cast<ConstantFP>(V)) { | ||||||||
179 | Res = &CI->getValueAPF(); | ||||||||
180 | return true; | ||||||||
181 | } | ||||||||
182 | if (V->getType()->isVectorTy()) | ||||||||
183 | if (const auto *C = dyn_cast<Constant>(V)) | ||||||||
184 | if (auto *CI = dyn_cast_or_null<ConstantFP>(C->getSplatValue())) { | ||||||||
185 | Res = &CI->getValueAPF(); | ||||||||
186 | return true; | ||||||||
187 | } | ||||||||
188 | return false; | ||||||||
189 | } | ||||||||
190 | }; | ||||||||
191 | |||||||||
192 | /// Match a ConstantInt or splatted ConstantVector, binding the | ||||||||
193 | /// specified pointer to the contained APInt. | ||||||||
194 | inline apint_match m_APInt(const APInt *&Res) { return Res; } | ||||||||
195 | |||||||||
196 | /// Match a ConstantFP or splatted ConstantVector, binding the | ||||||||
197 | /// specified pointer to the contained APFloat. | ||||||||
198 | inline apfloat_match m_APFloat(const APFloat *&Res) { return Res; } | ||||||||
199 | |||||||||
200 | template <int64_t Val> struct constantint_match { | ||||||||
201 | template <typename ITy> bool match(ITy *V) { | ||||||||
202 | if (const auto *CI = dyn_cast<ConstantInt>(V)) { | ||||||||
203 | const APInt &CIV = CI->getValue(); | ||||||||
204 | if (Val >= 0) | ||||||||
205 | return CIV == static_cast<uint64_t>(Val); | ||||||||
206 | // If Val is negative, and CI is shorter than it, truncate to the right | ||||||||
207 | // number of bits. If it is larger, then we have to sign extend. Just | ||||||||
208 | // compare their negated values. | ||||||||
209 | return -CIV == -Val; | ||||||||
210 | } | ||||||||
211 | return false; | ||||||||
212 | } | ||||||||
213 | }; | ||||||||
214 | |||||||||
215 | /// Match a ConstantInt with a specific value. | ||||||||
216 | template <int64_t Val> inline constantint_match<Val> m_ConstantInt() { | ||||||||
217 | return constantint_match<Val>(); | ||||||||
218 | } | ||||||||
219 | |||||||||
220 | /// This helper class is used to match scalar and vector integer constants that | ||||||||
221 | /// satisfy a specified predicate. | ||||||||
222 | /// For vector constants, undefined elements are ignored. | ||||||||
223 | template <typename Predicate> struct cst_pred_ty : public Predicate { | ||||||||
224 | template <typename ITy> bool match(ITy *V) { | ||||||||
225 | if (const auto *CI = dyn_cast<ConstantInt>(V)) | ||||||||
226 | return this->isValue(CI->getValue()); | ||||||||
227 | if (V->getType()->isVectorTy()) { | ||||||||
228 | if (const auto *C = dyn_cast<Constant>(V)) { | ||||||||
229 | if (const auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue())) | ||||||||
230 | return this->isValue(CI->getValue()); | ||||||||
231 | |||||||||
232 | // Non-splat vector constant: check each element for a match. | ||||||||
233 | unsigned NumElts = V->getType()->getVectorNumElements(); | ||||||||
234 | assert(NumElts != 0 && "Constant vector with no elements?")((NumElts != 0 && "Constant vector with no elements?" ) ? static_cast<void> (0) : __assert_fail ("NumElts != 0 && \"Constant vector with no elements?\"" , "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/IR/PatternMatch.h" , 234, __PRETTY_FUNCTION__)); | ||||||||
235 | bool HasNonUndefElements = false; | ||||||||
236 | for (unsigned i = 0; i != NumElts; ++i) { | ||||||||
237 | Constant *Elt = C->getAggregateElement(i); | ||||||||
238 | if (!Elt) | ||||||||
239 | return false; | ||||||||
240 | if (isa<UndefValue>(Elt)) | ||||||||
241 | continue; | ||||||||
242 | auto *CI = dyn_cast<ConstantInt>(Elt); | ||||||||
243 | if (!CI || !this->isValue(CI->getValue())) | ||||||||
244 | return false; | ||||||||
245 | HasNonUndefElements = true; | ||||||||
246 | } | ||||||||
247 | return HasNonUndefElements; | ||||||||
248 | } | ||||||||
249 | } | ||||||||
250 | return false; | ||||||||
251 | } | ||||||||
252 | }; | ||||||||
253 | |||||||||
254 | /// This helper class is used to match scalar and vector constants that | ||||||||
255 | /// satisfy a specified predicate, and bind them to an APInt. | ||||||||
256 | template <typename Predicate> struct api_pred_ty : public Predicate { | ||||||||
257 | const APInt *&Res; | ||||||||
258 | |||||||||
259 | api_pred_ty(const APInt *&R) : Res(R) {} | ||||||||
260 | |||||||||
261 | template <typename ITy> bool match(ITy *V) { | ||||||||
262 | if (const auto *CI = dyn_cast<ConstantInt>(V)) | ||||||||
263 | if (this->isValue(CI->getValue())) { | ||||||||
264 | Res = &CI->getValue(); | ||||||||
265 | return true; | ||||||||
266 | } | ||||||||
267 | if (V->getType()->isVectorTy()) | ||||||||
268 | if (const auto *C = dyn_cast<Constant>(V)) | ||||||||
269 | if (auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue())) | ||||||||
270 | if (this->isValue(CI->getValue())) { | ||||||||
271 | Res = &CI->getValue(); | ||||||||
272 | return true; | ||||||||
273 | } | ||||||||
274 | |||||||||
275 | return false; | ||||||||
276 | } | ||||||||
277 | }; | ||||||||
278 | |||||||||
279 | /// This helper class is used to match scalar and vector floating-point | ||||||||
280 | /// constants that satisfy a specified predicate. | ||||||||
281 | /// For vector constants, undefined elements are ignored. | ||||||||
282 | template <typename Predicate> struct cstfp_pred_ty : public Predicate { | ||||||||
283 | template <typename ITy> bool match(ITy *V) { | ||||||||
284 | if (const auto *CF = dyn_cast<ConstantFP>(V)) | ||||||||
285 | return this->isValue(CF->getValueAPF()); | ||||||||
286 | if (V->getType()->isVectorTy()) { | ||||||||
287 | if (const auto *C = dyn_cast<Constant>(V)) { | ||||||||
288 | if (const auto *CF = dyn_cast_or_null<ConstantFP>(C->getSplatValue())) | ||||||||
289 | return this->isValue(CF->getValueAPF()); | ||||||||
290 | |||||||||
291 | // Non-splat vector constant: check each element for a match. | ||||||||
292 | unsigned NumElts = V->getType()->getVectorNumElements(); | ||||||||
293 | assert(NumElts != 0 && "Constant vector with no elements?")((NumElts != 0 && "Constant vector with no elements?" ) ? static_cast<void> (0) : __assert_fail ("NumElts != 0 && \"Constant vector with no elements?\"" , "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/IR/PatternMatch.h" , 293, __PRETTY_FUNCTION__)); | ||||||||
294 | bool HasNonUndefElements = false; | ||||||||
295 | for (unsigned i = 0; i != NumElts; ++i) { | ||||||||
296 | Constant *Elt = C->getAggregateElement(i); | ||||||||
297 | if (!Elt) | ||||||||
298 | return false; | ||||||||
299 | if (isa<UndefValue>(Elt)) | ||||||||
300 | continue; | ||||||||
301 | auto *CF = dyn_cast<ConstantFP>(Elt); | ||||||||
302 | if (!CF || !this->isValue(CF->getValueAPF())) | ||||||||
303 | return false; | ||||||||
304 | HasNonUndefElements = true; | ||||||||
305 | } | ||||||||
306 | return HasNonUndefElements; | ||||||||
307 | } | ||||||||
308 | } | ||||||||
309 | return false; | ||||||||
310 | } | ||||||||
311 | }; | ||||||||
312 | |||||||||
313 | /////////////////////////////////////////////////////////////////////////////// | ||||||||
314 | // | ||||||||
315 | // Encapsulate constant value queries for use in templated predicate matchers. | ||||||||
316 | // This allows checking if constants match using compound predicates and works | ||||||||
317 | // with vector constants, possibly with relaxed constraints. For example, ignore | ||||||||
318 | // undef values. | ||||||||
319 | // | ||||||||
320 | /////////////////////////////////////////////////////////////////////////////// | ||||||||
321 | |||||||||
322 | struct is_any_apint { | ||||||||
323 | bool isValue(const APInt &C) { return true; } | ||||||||
324 | }; | ||||||||
325 | /// Match an integer or vector with any integral constant. | ||||||||
326 | /// For vectors, this includes constants with undefined elements. | ||||||||
327 | inline cst_pred_ty<is_any_apint> m_AnyIntegralConstant() { | ||||||||
328 | return cst_pred_ty<is_any_apint>(); | ||||||||
329 | } | ||||||||
330 | |||||||||
331 | struct is_all_ones { | ||||||||
332 | bool isValue(const APInt &C) { return C.isAllOnesValue(); } | ||||||||
333 | }; | ||||||||
334 | /// Match an integer or vector with all bits set. | ||||||||
335 | /// For vectors, this includes constants with undefined elements. | ||||||||
336 | inline cst_pred_ty<is_all_ones> m_AllOnes() { | ||||||||
337 | return cst_pred_ty<is_all_ones>(); | ||||||||
338 | } | ||||||||
339 | |||||||||
340 | struct is_maxsignedvalue { | ||||||||
341 | bool isValue(const APInt &C) { return C.isMaxSignedValue(); } | ||||||||
342 | }; | ||||||||
343 | /// Match an integer or vector with values having all bits except for the high | ||||||||
344 | /// bit set (0x7f...). | ||||||||
345 | /// For vectors, this includes constants with undefined elements. | ||||||||
346 | inline cst_pred_ty<is_maxsignedvalue> m_MaxSignedValue() { | ||||||||
347 | return cst_pred_ty<is_maxsignedvalue>(); | ||||||||
348 | } | ||||||||
349 | inline api_pred_ty<is_maxsignedvalue> m_MaxSignedValue(const APInt *&V) { | ||||||||
350 | return V; | ||||||||
351 | } | ||||||||
352 | |||||||||
353 | struct is_negative { | ||||||||
354 | bool isValue(const APInt &C) { return C.isNegative(); } | ||||||||
355 | }; | ||||||||
356 | /// Match an integer or vector of negative values. | ||||||||
357 | /// For vectors, this includes constants with undefined elements. | ||||||||
358 | inline cst_pred_ty<is_negative> m_Negative() { | ||||||||
359 | return cst_pred_ty<is_negative>(); | ||||||||
360 | } | ||||||||
361 | inline api_pred_ty<is_negative> m_Negative(const APInt *&V) { | ||||||||
362 | return V; | ||||||||
363 | } | ||||||||
364 | |||||||||
365 | struct is_nonnegative { | ||||||||
366 | bool isValue(const APInt &C) { return C.isNonNegative(); } | ||||||||
367 | }; | ||||||||
368 | /// Match an integer or vector of nonnegative values. | ||||||||
369 | /// For vectors, this includes constants with undefined elements. | ||||||||
370 | inline cst_pred_ty<is_nonnegative> m_NonNegative() { | ||||||||
371 | return cst_pred_ty<is_nonnegative>(); | ||||||||
372 | } | ||||||||
373 | inline api_pred_ty<is_nonnegative> m_NonNegative(const APInt *&V) { | ||||||||
374 | return V; | ||||||||
375 | } | ||||||||
376 | |||||||||
377 | struct is_one { | ||||||||
378 | bool isValue(const APInt &C) { return C.isOneValue(); } | ||||||||
379 | }; | ||||||||
380 | /// Match an integer 1 or a vector with all elements equal to 1. | ||||||||
381 | /// For vectors, this includes constants with undefined elements. | ||||||||
382 | inline cst_pred_ty<is_one> m_One() { | ||||||||
383 | return cst_pred_ty<is_one>(); | ||||||||
384 | } | ||||||||
385 | |||||||||
386 | struct is_zero_int { | ||||||||
387 | bool isValue(const APInt &C) { return C.isNullValue(); } | ||||||||
388 | }; | ||||||||
389 | /// Match an integer 0 or a vector with all elements equal to 0. | ||||||||
390 | /// For vectors, this includes constants with undefined elements. | ||||||||
391 | inline cst_pred_ty<is_zero_int> m_ZeroInt() { | ||||||||
392 | return cst_pred_ty<is_zero_int>(); | ||||||||
393 | } | ||||||||
394 | |||||||||
395 | struct is_zero { | ||||||||
396 | template <typename ITy> bool match(ITy *V) { | ||||||||
397 | auto *C = dyn_cast<Constant>(V); | ||||||||
398 | return C && (C->isNullValue() || cst_pred_ty<is_zero_int>().match(C)); | ||||||||
399 | } | ||||||||
400 | }; | ||||||||
401 | /// Match any null constant or a vector with all elements equal to 0. | ||||||||
402 | /// For vectors, this includes constants with undefined elements. | ||||||||
403 | inline is_zero m_Zero() { | ||||||||
404 | return is_zero(); | ||||||||
405 | } | ||||||||
406 | |||||||||
407 | struct is_power2 { | ||||||||
408 | bool isValue(const APInt &C) { return C.isPowerOf2(); } | ||||||||
409 | }; | ||||||||
410 | /// Match an integer or vector power-of-2. | ||||||||
411 | /// For vectors, this includes constants with undefined elements. | ||||||||
412 | inline cst_pred_ty<is_power2> m_Power2() { | ||||||||
413 | return cst_pred_ty<is_power2>(); | ||||||||
414 | } | ||||||||
415 | inline api_pred_ty<is_power2> m_Power2(const APInt *&V) { | ||||||||
416 | return V; | ||||||||
417 | } | ||||||||
418 | |||||||||
419 | struct is_negated_power2 { | ||||||||
420 | bool isValue(const APInt &C) { return (-C).isPowerOf2(); } | ||||||||
421 | }; | ||||||||
422 | /// Match a integer or vector negated power-of-2. | ||||||||
423 | /// For vectors, this includes constants with undefined elements. | ||||||||
424 | inline cst_pred_ty<is_negated_power2> m_NegatedPower2() { | ||||||||
425 | return cst_pred_ty<is_negated_power2>(); | ||||||||
426 | } | ||||||||
427 | inline api_pred_ty<is_negated_power2> m_NegatedPower2(const APInt *&V) { | ||||||||
428 | return V; | ||||||||
429 | } | ||||||||
430 | |||||||||
431 | struct is_power2_or_zero { | ||||||||
432 | bool isValue(const APInt &C) { return !C || C.isPowerOf2(); } | ||||||||
433 | }; | ||||||||
434 | /// Match an integer or vector of 0 or power-of-2 values. | ||||||||
435 | /// For vectors, this includes constants with undefined elements. | ||||||||
436 | inline cst_pred_ty<is_power2_or_zero> m_Power2OrZero() { | ||||||||
437 | return cst_pred_ty<is_power2_or_zero>(); | ||||||||
438 | } | ||||||||
439 | inline api_pred_ty<is_power2_or_zero> m_Power2OrZero(const APInt *&V) { | ||||||||
440 | return V; | ||||||||
441 | } | ||||||||
442 | |||||||||
443 | struct is_sign_mask { | ||||||||
444 | bool isValue(const APInt &C) { return C.isSignMask(); } | ||||||||
445 | }; | ||||||||
446 | /// Match an integer or vector with only the sign bit(s) set. | ||||||||
447 | /// For vectors, this includes constants with undefined elements. | ||||||||
448 | inline cst_pred_ty<is_sign_mask> m_SignMask() { | ||||||||
449 | return cst_pred_ty<is_sign_mask>(); | ||||||||
450 | } | ||||||||
451 | |||||||||
452 | struct is_lowbit_mask { | ||||||||
453 | bool isValue(const APInt &C) { return C.isMask(); } | ||||||||
454 | }; | ||||||||
455 | /// Match an integer or vector with only the low bit(s) set. | ||||||||
456 | /// For vectors, this includes constants with undefined elements. | ||||||||
457 | inline cst_pred_ty<is_lowbit_mask> m_LowBitMask() { | ||||||||
458 | return cst_pred_ty<is_lowbit_mask>(); | ||||||||
459 | } | ||||||||
460 | |||||||||
461 | struct icmp_pred_with_threshold { | ||||||||
462 | ICmpInst::Predicate Pred; | ||||||||
463 | const APInt *Thr; | ||||||||
464 | bool isValue(const APInt &C) { | ||||||||
465 | switch (Pred) { | ||||||||
466 | case ICmpInst::Predicate::ICMP_EQ: | ||||||||
467 | return C.eq(*Thr); | ||||||||
468 | case ICmpInst::Predicate::ICMP_NE: | ||||||||
469 | return C.ne(*Thr); | ||||||||
470 | case ICmpInst::Predicate::ICMP_UGT: | ||||||||
471 | return C.ugt(*Thr); | ||||||||
472 | case ICmpInst::Predicate::ICMP_UGE: | ||||||||
473 | return C.uge(*Thr); | ||||||||
474 | case ICmpInst::Predicate::ICMP_ULT: | ||||||||
475 | return C.ult(*Thr); | ||||||||
476 | case ICmpInst::Predicate::ICMP_ULE: | ||||||||
477 | return C.ule(*Thr); | ||||||||
478 | case ICmpInst::Predicate::ICMP_SGT: | ||||||||
479 | return C.sgt(*Thr); | ||||||||
480 | case ICmpInst::Predicate::ICMP_SGE: | ||||||||
481 | return C.sge(*Thr); | ||||||||
482 | case ICmpInst::Predicate::ICMP_SLT: | ||||||||
483 | return C.slt(*Thr); | ||||||||
484 | case ICmpInst::Predicate::ICMP_SLE: | ||||||||
485 | return C.sle(*Thr); | ||||||||
486 | default: | ||||||||
487 | llvm_unreachable("Unhandled ICmp predicate")::llvm::llvm_unreachable_internal("Unhandled ICmp predicate", "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/IR/PatternMatch.h" , 487); | ||||||||
488 | } | ||||||||
489 | } | ||||||||
490 | }; | ||||||||
491 | /// Match an integer or vector with every element comparing 'pred' (eg/ne/...) | ||||||||
492 | /// to Threshold. For vectors, this includes constants with undefined elements. | ||||||||
493 | inline cst_pred_ty<icmp_pred_with_threshold> | ||||||||
494 | m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold) { | ||||||||
495 | cst_pred_ty<icmp_pred_with_threshold> P; | ||||||||
496 | P.Pred = Predicate; | ||||||||
497 | P.Thr = &Threshold; | ||||||||
498 | return P; | ||||||||
499 | } | ||||||||
500 | |||||||||
501 | struct is_nan { | ||||||||
502 | bool isValue(const APFloat &C) { return C.isNaN(); } | ||||||||
503 | }; | ||||||||
504 | /// Match an arbitrary NaN constant. This includes quiet and signalling nans. | ||||||||
505 | /// For vectors, this includes constants with undefined elements. | ||||||||
506 | inline cstfp_pred_ty<is_nan> m_NaN() { | ||||||||
507 | return cstfp_pred_ty<is_nan>(); | ||||||||
508 | } | ||||||||
509 | |||||||||
510 | struct is_any_zero_fp { | ||||||||
511 | bool isValue(const APFloat &C) { return C.isZero(); } | ||||||||
512 | }; | ||||||||
513 | /// Match a floating-point negative zero or positive zero. | ||||||||
514 | /// For vectors, this includes constants with undefined elements. | ||||||||
515 | inline cstfp_pred_ty<is_any_zero_fp> m_AnyZeroFP() { | ||||||||
516 | return cstfp_pred_ty<is_any_zero_fp>(); | ||||||||
517 | } | ||||||||
518 | |||||||||
519 | struct is_pos_zero_fp { | ||||||||
520 | bool isValue(const APFloat &C) { return C.isPosZero(); } | ||||||||
521 | }; | ||||||||
522 | /// Match a floating-point positive zero. | ||||||||
523 | /// For vectors, this includes constants with undefined elements. | ||||||||
524 | inline cstfp_pred_ty<is_pos_zero_fp> m_PosZeroFP() { | ||||||||
525 | return cstfp_pred_ty<is_pos_zero_fp>(); | ||||||||
526 | } | ||||||||
527 | |||||||||
528 | struct is_neg_zero_fp { | ||||||||
529 | bool isValue(const APFloat &C) { return C.isNegZero(); } | ||||||||
530 | }; | ||||||||
531 | /// Match a floating-point negative zero. | ||||||||
532 | /// For vectors, this includes constants with undefined elements. | ||||||||
533 | inline cstfp_pred_ty<is_neg_zero_fp> m_NegZeroFP() { | ||||||||
534 | return cstfp_pred_ty<is_neg_zero_fp>(); | ||||||||
535 | } | ||||||||
536 | |||||||||
537 | /////////////////////////////////////////////////////////////////////////////// | ||||||||
538 | |||||||||
539 | template <typename Class> struct bind_ty { | ||||||||
540 | Class *&VR; | ||||||||
541 | |||||||||
542 | bind_ty(Class *&V) : VR(V) {} | ||||||||
543 | |||||||||
544 | template <typename ITy> bool match(ITy *V) { | ||||||||
545 | if (auto *CV = dyn_cast<Class>(V)) { | ||||||||
546 | VR = CV; | ||||||||
547 | return true; | ||||||||
548 | } | ||||||||
549 | return false; | ||||||||
550 | } | ||||||||
551 | }; | ||||||||
552 | |||||||||
553 | /// Match a value, capturing it if we match. | ||||||||
554 | inline bind_ty<Value> m_Value(Value *&V) { return V; } | ||||||||
555 | inline bind_ty<const Value> m_Value(const Value *&V) { return V; } | ||||||||
556 | |||||||||
557 | /// Match an instruction, capturing it if we match. | ||||||||
558 | inline bind_ty<Instruction> m_Instruction(Instruction *&I) { return I; } | ||||||||
559 | /// Match a binary operator, capturing it if we match. | ||||||||
560 | inline bind_ty<BinaryOperator> m_BinOp(BinaryOperator *&I) { return I; } | ||||||||
561 | |||||||||
562 | /// Match a ConstantInt, capturing the value if we match. | ||||||||
563 | inline bind_ty<ConstantInt> m_ConstantInt(ConstantInt *&CI) { return CI; } | ||||||||
564 | |||||||||
565 | /// Match a Constant, capturing the value if we match. | ||||||||
566 | inline bind_ty<Constant> m_Constant(Constant *&C) { return C; } | ||||||||
567 | |||||||||
568 | /// Match a ConstantFP, capturing the value if we match. | ||||||||
569 | inline bind_ty<ConstantFP> m_ConstantFP(ConstantFP *&C) { return C; } | ||||||||
570 | |||||||||
571 | /// Match a basic block value, capturing it if we match. | ||||||||
572 | inline bind_ty<BasicBlock> m_BasicBlock(BasicBlock *&V) { return V; } | ||||||||
573 | inline bind_ty<const BasicBlock> m_BasicBlock(const BasicBlock *&V) { | ||||||||
574 | return V; | ||||||||
575 | } | ||||||||
576 | |||||||||
577 | /// Match a specified Value*. | ||||||||
578 | struct specificval_ty { | ||||||||
579 | const Value *Val; | ||||||||
580 | |||||||||
581 | specificval_ty(const Value *V) : Val(V) {} | ||||||||
582 | |||||||||
583 | template <typename ITy> bool match(ITy *V) { return V == Val; } | ||||||||
584 | }; | ||||||||
585 | |||||||||
586 | /// Match if we have a specific specified value. | ||||||||
587 | inline specificval_ty m_Specific(const Value *V) { return V; } | ||||||||
588 | |||||||||
589 | /// Stores a reference to the Value *, not the Value * itself, | ||||||||
590 | /// thus can be used in commutative matchers. | ||||||||
591 | template <typename Class> struct deferredval_ty { | ||||||||
592 | Class *const &Val; | ||||||||
593 | |||||||||
594 | deferredval_ty(Class *const &V) : Val(V) {} | ||||||||
595 | |||||||||
596 | template <typename ITy> bool match(ITy *const V) { return V == Val; } | ||||||||
597 | }; | ||||||||
598 | |||||||||
599 | /// A commutative-friendly version of m_Specific(). | ||||||||
600 | inline deferredval_ty<Value> m_Deferred(Value *const &V) { return V; } | ||||||||
601 | inline deferredval_ty<const Value> m_Deferred(const Value *const &V) { | ||||||||
602 | return V; | ||||||||
603 | } | ||||||||
604 | |||||||||
605 | /// Match a specified floating point value or vector of all elements of | ||||||||
606 | /// that value. | ||||||||
607 | struct specific_fpval { | ||||||||
608 | double Val; | ||||||||
609 | |||||||||
610 | specific_fpval(double V) : Val(V) {} | ||||||||
611 | |||||||||
612 | template <typename ITy> bool match(ITy *V) { | ||||||||
613 | if (const auto *CFP = dyn_cast<ConstantFP>(V)) | ||||||||
614 | return CFP->isExactlyValue(Val); | ||||||||
615 | if (V->getType()->isVectorTy()) | ||||||||
616 | if (const auto *C = dyn_cast<Constant>(V)) | ||||||||
617 | if (auto *CFP = dyn_cast_or_null<ConstantFP>(C->getSplatValue())) | ||||||||
618 | return CFP->isExactlyValue(Val); | ||||||||
619 | return false; | ||||||||
620 | } | ||||||||
621 | }; | ||||||||
622 | |||||||||
623 | /// Match a specific floating point value or vector with all elements | ||||||||
624 | /// equal to the value. | ||||||||
625 | inline specific_fpval m_SpecificFP(double V) { return specific_fpval(V); } | ||||||||
626 | |||||||||
627 | /// Match a float 1.0 or vector with all elements equal to 1.0. | ||||||||
628 | inline specific_fpval m_FPOne() { return m_SpecificFP(1.0); } | ||||||||
629 | |||||||||
630 | struct bind_const_intval_ty { | ||||||||
631 | uint64_t &VR; | ||||||||
632 | |||||||||
633 | bind_const_intval_ty(uint64_t &V) : VR(V) {} | ||||||||
634 | |||||||||
635 | template <typename ITy> bool match(ITy *V) { | ||||||||
636 | if (const auto *CV = dyn_cast<ConstantInt>(V)) | ||||||||
637 | if (CV->getValue().ule(UINT64_MAX(18446744073709551615UL))) { | ||||||||
638 | VR = CV->getZExtValue(); | ||||||||
639 | return true; | ||||||||
640 | } | ||||||||
641 | return false; | ||||||||
642 | } | ||||||||
643 | }; | ||||||||
644 | |||||||||
645 | /// Match a specified integer value or vector of all elements of that | ||||||||
646 | /// value. | ||||||||
647 | struct specific_intval { | ||||||||
648 | APInt Val; | ||||||||
649 | |||||||||
650 | specific_intval(APInt V) : Val(std::move(V)) {} | ||||||||
651 | |||||||||
652 | template <typename ITy> bool match(ITy *V) { | ||||||||
653 | const auto *CI = dyn_cast<ConstantInt>(V); | ||||||||
654 | if (!CI && V->getType()->isVectorTy()) | ||||||||
655 | if (const auto *C = dyn_cast<Constant>(V)) | ||||||||
656 | CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue()); | ||||||||
657 | |||||||||
658 | return CI && APInt::isSameValue(CI->getValue(), Val); | ||||||||
659 | } | ||||||||
660 | }; | ||||||||
661 | |||||||||
662 | /// Match a specific integer value or vector with all elements equal to | ||||||||
663 | /// the value. | ||||||||
664 | inline specific_intval m_SpecificInt(APInt V) { | ||||||||
665 | return specific_intval(std::move(V)); | ||||||||
666 | } | ||||||||
667 | |||||||||
668 | inline specific_intval m_SpecificInt(uint64_t V) { | ||||||||
669 | return m_SpecificInt(APInt(64, V)); | ||||||||
670 | } | ||||||||
671 | |||||||||
672 | /// Match a ConstantInt and bind to its value. This does not match | ||||||||
673 | /// ConstantInts wider than 64-bits. | ||||||||
674 | inline bind_const_intval_ty m_ConstantInt(uint64_t &V) { return V; } | ||||||||
675 | |||||||||
676 | /// Match a specified basic block value. | ||||||||
677 | struct specific_bbval { | ||||||||
678 | BasicBlock *Val; | ||||||||
679 | |||||||||
680 | specific_bbval(BasicBlock *Val) : Val(Val) {} | ||||||||
681 | |||||||||
682 | template <typename ITy> bool match(ITy *V) { | ||||||||
683 | const auto *BB = dyn_cast<BasicBlock>(V); | ||||||||
684 | return BB && BB == Val; | ||||||||
685 | } | ||||||||
686 | }; | ||||||||
687 | |||||||||
688 | /// Match a specific basic block value. | ||||||||
689 | inline specific_bbval m_SpecificBB(BasicBlock *BB) { | ||||||||
690 | return specific_bbval(BB); | ||||||||
691 | } | ||||||||
692 | |||||||||
693 | /// A commutative-friendly version of m_Specific(). | ||||||||
694 | inline deferredval_ty<BasicBlock> m_Deferred(BasicBlock *const &BB) { | ||||||||
695 | return BB; | ||||||||
696 | } | ||||||||
697 | inline deferredval_ty<const BasicBlock> | ||||||||
698 | m_Deferred(const BasicBlock *const &BB) { | ||||||||
699 | return BB; | ||||||||
700 | } | ||||||||
701 | |||||||||
702 | //===----------------------------------------------------------------------===// | ||||||||
703 | // Matcher for any binary operator. | ||||||||
704 | // | ||||||||
705 | template <typename LHS_t, typename RHS_t, bool Commutable = false> | ||||||||
706 | struct AnyBinaryOp_match { | ||||||||
707 | LHS_t L; | ||||||||
708 | RHS_t R; | ||||||||
709 | |||||||||
710 | // The evaluation order is always stable, regardless of Commutability. | ||||||||
711 | // The LHS is always matched first. | ||||||||
712 | AnyBinaryOp_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {} | ||||||||
713 | |||||||||
714 | template <typename OpTy> bool match(OpTy *V) { | ||||||||
715 | if (auto *I = dyn_cast<BinaryOperator>(V)) | ||||||||
716 | return (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) || | ||||||||
717 | (Commutable && L.match(I->getOperand(1)) && | ||||||||
718 | R.match(I->getOperand(0))); | ||||||||
719 | return false; | ||||||||
720 | } | ||||||||
721 | }; | ||||||||
722 | |||||||||
723 | template <typename LHS, typename RHS> | ||||||||
724 | inline AnyBinaryOp_match<LHS, RHS> m_BinOp(const LHS &L, const RHS &R) { | ||||||||
725 | return AnyBinaryOp_match<LHS, RHS>(L, R); | ||||||||
726 | } | ||||||||
727 | |||||||||
728 | //===----------------------------------------------------------------------===// | ||||||||
729 | // Matchers for specific binary operators. | ||||||||
730 | // | ||||||||
731 | |||||||||
732 | template <typename LHS_t, typename RHS_t, unsigned Opcode, | ||||||||
733 | bool Commutable = false> | ||||||||
734 | struct BinaryOp_match { | ||||||||
735 | LHS_t L; | ||||||||
736 | RHS_t R; | ||||||||
737 | |||||||||
738 | // The evaluation order is always stable, regardless of Commutability. | ||||||||
739 | // The LHS is always matched first. | ||||||||
740 | BinaryOp_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {} | ||||||||
741 | |||||||||
742 | template <typename OpTy> bool match(OpTy *V) { | ||||||||
743 | if (V->getValueID() == Value::InstructionVal + Opcode) { | ||||||||
744 | auto *I = cast<BinaryOperator>(V); | ||||||||
745 | return (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) || | ||||||||
746 | (Commutable && L.match(I->getOperand(1)) && | ||||||||
747 | R.match(I->getOperand(0))); | ||||||||
748 | } | ||||||||
749 | if (auto *CE = dyn_cast<ConstantExpr>(V)) | ||||||||
750 | return CE->getOpcode() == Opcode && | ||||||||
751 | ((L.match(CE->getOperand(0)) && R.match(CE->getOperand(1))) || | ||||||||
752 | (Commutable && L.match(CE->getOperand(1)) && | ||||||||
753 | R.match(CE->getOperand(0)))); | ||||||||
754 | return false; | ||||||||
755 | } | ||||||||
756 | }; | ||||||||
757 | |||||||||
758 | template <typename LHS, typename RHS> | ||||||||
759 | inline BinaryOp_match<LHS, RHS, Instruction::Add> m_Add(const LHS &L, | ||||||||
760 | const RHS &R) { | ||||||||
761 | return BinaryOp_match<LHS, RHS, Instruction::Add>(L, R); | ||||||||
762 | } | ||||||||
763 | |||||||||
764 | template <typename LHS, typename RHS> | ||||||||
765 | inline BinaryOp_match<LHS, RHS, Instruction::FAdd> m_FAdd(const LHS &L, | ||||||||
766 | const RHS &R) { | ||||||||
767 | return BinaryOp_match<LHS, RHS, Instruction::FAdd>(L, R); | ||||||||
768 | } | ||||||||
769 | |||||||||
770 | template <typename LHS, typename RHS> | ||||||||
771 | inline BinaryOp_match<LHS, RHS, Instruction::Sub> m_Sub(const LHS &L, | ||||||||
772 | const RHS &R) { | ||||||||
773 | return BinaryOp_match<LHS, RHS, Instruction::Sub>(L, R); | ||||||||
774 | } | ||||||||
775 | |||||||||
776 | template <typename LHS, typename RHS> | ||||||||
777 | inline BinaryOp_match<LHS, RHS, Instruction::FSub> m_FSub(const LHS &L, | ||||||||
778 | const RHS &R) { | ||||||||
779 | return BinaryOp_match<LHS, RHS, Instruction::FSub>(L, R); | ||||||||
780 | } | ||||||||
781 | |||||||||
782 | template <typename Op_t> struct FNeg_match { | ||||||||
783 | Op_t X; | ||||||||
784 | |||||||||
785 | FNeg_match(const Op_t &Op) : X(Op) {} | ||||||||
786 | template <typename OpTy> bool match(OpTy *V) { | ||||||||
787 | auto *FPMO = dyn_cast<FPMathOperator>(V); | ||||||||
788 | if (!FPMO) return false; | ||||||||
789 | |||||||||
790 | if (FPMO->getOpcode() == Instruction::FNeg) | ||||||||
791 | return X.match(FPMO->getOperand(0)); | ||||||||
792 | |||||||||
793 | if (FPMO->getOpcode() == Instruction::FSub) { | ||||||||
794 | if (FPMO->hasNoSignedZeros()) { | ||||||||
795 | // With 'nsz', any zero goes. | ||||||||
796 | if (!cstfp_pred_ty<is_any_zero_fp>().match(FPMO->getOperand(0))) | ||||||||
797 | return false; | ||||||||
798 | } else { | ||||||||
799 | // Without 'nsz', we need fsub -0.0, X exactly. | ||||||||
800 | if (!cstfp_pred_ty<is_neg_zero_fp>().match(FPMO->getOperand(0))) | ||||||||
801 | return false; | ||||||||
802 | } | ||||||||
803 | |||||||||
804 | return X.match(FPMO->getOperand(1)); | ||||||||
805 | } | ||||||||
806 | |||||||||
807 | return false; | ||||||||
808 | } | ||||||||
809 | }; | ||||||||
810 | |||||||||
811 | /// Match 'fneg X' as 'fsub -0.0, X'. | ||||||||
812 | template <typename OpTy> | ||||||||
813 | inline FNeg_match<OpTy> | ||||||||
814 | m_FNeg(const OpTy &X) { | ||||||||
815 | return FNeg_match<OpTy>(X); | ||||||||
816 | } | ||||||||
817 | |||||||||
818 | /// Match 'fneg X' as 'fsub +-0.0, X'. | ||||||||
819 | template <typename RHS> | ||||||||
820 | inline BinaryOp_match<cstfp_pred_ty<is_any_zero_fp>, RHS, Instruction::FSub> | ||||||||
821 | m_FNegNSZ(const RHS &X) { | ||||||||
822 | return m_FSub(m_AnyZeroFP(), X); | ||||||||
823 | } | ||||||||
824 | |||||||||
825 | template <typename LHS, typename RHS> | ||||||||
826 | inline BinaryOp_match<LHS, RHS, Instruction::Mul> m_Mul(const LHS &L, | ||||||||
827 | const RHS &R) { | ||||||||
828 | return BinaryOp_match<LHS, RHS, Instruction::Mul>(L, R); | ||||||||
829 | } | ||||||||
830 | |||||||||
831 | template <typename LHS, typename RHS> | ||||||||
832 | inline BinaryOp_match<LHS, RHS, Instruction::FMul> m_FMul(const LHS &L, | ||||||||
833 | const RHS &R) { | ||||||||
834 | return BinaryOp_match<LHS, RHS, Instruction::FMul>(L, R); | ||||||||
835 | } | ||||||||
836 | |||||||||
837 | template <typename LHS, typename RHS> | ||||||||
838 | inline BinaryOp_match<LHS, RHS, Instruction::UDiv> m_UDiv(const LHS &L, | ||||||||
839 | const RHS &R) { | ||||||||
840 | return BinaryOp_match<LHS, RHS, Instruction::UDiv>(L, R); | ||||||||
841 | } | ||||||||
842 | |||||||||
843 | template <typename LHS, typename RHS> | ||||||||
844 | inline BinaryOp_match<LHS, RHS, Instruction::SDiv> m_SDiv(const LHS &L, | ||||||||
845 | const RHS &R) { | ||||||||
846 | return BinaryOp_match<LHS, RHS, Instruction::SDiv>(L, R); | ||||||||
847 | } | ||||||||
848 | |||||||||
849 | template <typename LHS, typename RHS> | ||||||||
850 | inline BinaryOp_match<LHS, RHS, Instruction::FDiv> m_FDiv(const LHS &L, | ||||||||
851 | const RHS &R) { | ||||||||
852 | return BinaryOp_match<LHS, RHS, Instruction::FDiv>(L, R); | ||||||||
853 | } | ||||||||
854 | |||||||||
855 | template <typename LHS, typename RHS> | ||||||||
856 | inline BinaryOp_match<LHS, RHS, Instruction::URem> m_URem(const LHS &L, | ||||||||
857 | const RHS &R) { | ||||||||
858 | return BinaryOp_match<LHS, RHS, Instruction::URem>(L, R); | ||||||||
859 | } | ||||||||
860 | |||||||||
861 | template <typename LHS, typename RHS> | ||||||||
862 | inline BinaryOp_match<LHS, RHS, Instruction::SRem> m_SRem(const LHS &L, | ||||||||
863 | const RHS &R) { | ||||||||
864 | return BinaryOp_match<LHS, RHS, Instruction::SRem>(L, R); | ||||||||
865 | } | ||||||||
866 | |||||||||
867 | template <typename LHS, typename RHS> | ||||||||
868 | inline BinaryOp_match<LHS, RHS, Instruction::FRem> m_FRem(const LHS &L, | ||||||||
869 | const RHS &R) { | ||||||||
870 | return BinaryOp_match<LHS, RHS, Instruction::FRem>(L, R); | ||||||||
871 | } | ||||||||
872 | |||||||||
873 | template <typename LHS, typename RHS> | ||||||||
874 | inline BinaryOp_match<LHS, RHS, Instruction::And> m_And(const LHS &L, | ||||||||
875 | const RHS &R) { | ||||||||
876 | return BinaryOp_match<LHS, RHS, Instruction::And>(L, R); | ||||||||
877 | } | ||||||||
878 | |||||||||
879 | template <typename LHS, typename RHS> | ||||||||
880 | inline BinaryOp_match<LHS, RHS, Instruction::Or> m_Or(const LHS &L, | ||||||||
881 | const RHS &R) { | ||||||||
882 | return BinaryOp_match<LHS, RHS, Instruction::Or>(L, R); | ||||||||
883 | } | ||||||||
884 | |||||||||
885 | template <typename LHS, typename RHS> | ||||||||
886 | inline BinaryOp_match<LHS, RHS, Instruction::Xor> m_Xor(const LHS &L, | ||||||||
887 | const RHS &R) { | ||||||||
888 | return BinaryOp_match<LHS, RHS, Instruction::Xor>(L, R); | ||||||||
889 | } | ||||||||
890 | |||||||||
891 | template <typename LHS, typename RHS> | ||||||||
892 | inline BinaryOp_match<LHS, RHS, Instruction::Shl> m_Shl(const LHS &L, | ||||||||
893 | const RHS &R) { | ||||||||
894 | return BinaryOp_match<LHS, RHS, Instruction::Shl>(L, R); | ||||||||
895 | } | ||||||||
896 | |||||||||
897 | template <typename LHS, typename RHS> | ||||||||
898 | inline BinaryOp_match<LHS, RHS, Instruction::LShr> m_LShr(const LHS &L, | ||||||||
899 | const RHS &R) { | ||||||||
900 | return BinaryOp_match<LHS, RHS, Instruction::LShr>(L, R); | ||||||||
901 | } | ||||||||
902 | |||||||||
903 | template <typename LHS, typename RHS> | ||||||||
904 | inline BinaryOp_match<LHS, RHS, Instruction::AShr> m_AShr(const LHS &L, | ||||||||
905 | const RHS &R) { | ||||||||
906 | return BinaryOp_match<LHS, RHS, Instruction::AShr>(L, R); | ||||||||
907 | } | ||||||||
908 | |||||||||
909 | template <typename LHS_t, typename RHS_t, unsigned Opcode, | ||||||||
910 | unsigned WrapFlags = 0> | ||||||||
911 | struct OverflowingBinaryOp_match { | ||||||||
912 | LHS_t L; | ||||||||
913 | RHS_t R; | ||||||||
914 | |||||||||
915 | OverflowingBinaryOp_match(const LHS_t &LHS, const RHS_t &RHS) | ||||||||
916 | : L(LHS), R(RHS) {} | ||||||||
917 | |||||||||
918 | template <typename OpTy> bool match(OpTy *V) { | ||||||||
919 | if (auto *Op = dyn_cast<OverflowingBinaryOperator>(V)) { | ||||||||
920 | if (Op->getOpcode() != Opcode) | ||||||||
921 | return false; | ||||||||
922 | if (WrapFlags & OverflowingBinaryOperator::NoUnsignedWrap && | ||||||||
923 | !Op->hasNoUnsignedWrap()) | ||||||||
924 | return false; | ||||||||
925 | if (WrapFlags & OverflowingBinaryOperator::NoSignedWrap && | ||||||||
926 | !Op->hasNoSignedWrap()) | ||||||||
927 | return false; | ||||||||
928 | return L.match(Op->getOperand(0)) && R.match(Op->getOperand(1)); | ||||||||
929 | } | ||||||||
930 | return false; | ||||||||
931 | } | ||||||||
932 | }; | ||||||||
933 | |||||||||
934 | template <typename LHS, typename RHS> | ||||||||
935 | inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Add, | ||||||||
936 | OverflowingBinaryOperator::NoSignedWrap> | ||||||||
937 | m_NSWAdd(const LHS &L, const RHS &R) { | ||||||||
938 | return OverflowingBinaryOp_match<LHS, RHS, Instruction::Add, | ||||||||
939 | OverflowingBinaryOperator::NoSignedWrap>( | ||||||||
940 | L, R); | ||||||||
941 | } | ||||||||
942 | template <typename LHS, typename RHS> | ||||||||
943 | inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub, | ||||||||
944 | OverflowingBinaryOperator::NoSignedWrap> | ||||||||
945 | m_NSWSub(const LHS &L, const RHS &R) { | ||||||||
946 | return OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub, | ||||||||
947 | OverflowingBinaryOperator::NoSignedWrap>( | ||||||||
948 | L, R); | ||||||||
949 | } | ||||||||
950 | template <typename LHS, typename RHS> | ||||||||
951 | inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul, | ||||||||
952 | OverflowingBinaryOperator::NoSignedWrap> | ||||||||
953 | m_NSWMul(const LHS &L, const RHS &R) { | ||||||||
954 | return OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul, | ||||||||
955 | OverflowingBinaryOperator::NoSignedWrap>( | ||||||||
956 | L, R); | ||||||||
957 | } | ||||||||
958 | template <typename LHS, typename RHS> | ||||||||
959 | inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl, | ||||||||
960 | OverflowingBinaryOperator::NoSignedWrap> | ||||||||
961 | m_NSWShl(const LHS &L, const RHS &R) { | ||||||||
962 | return OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl, | ||||||||
963 | OverflowingBinaryOperator::NoSignedWrap>( | ||||||||
964 | L, R); | ||||||||
965 | } | ||||||||
966 | |||||||||
967 | template <typename LHS, typename RHS> | ||||||||
968 | inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Add, | ||||||||
969 | OverflowingBinaryOperator::NoUnsignedWrap> | ||||||||
970 | m_NUWAdd(const LHS &L, const RHS &R) { | ||||||||
971 | return OverflowingBinaryOp_match<LHS, RHS, Instruction::Add, | ||||||||
972 | OverflowingBinaryOperator::NoUnsignedWrap>( | ||||||||
973 | L, R); | ||||||||
974 | } | ||||||||
975 | template <typename LHS, typename RHS> | ||||||||
976 | inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub, | ||||||||
977 | OverflowingBinaryOperator::NoUnsignedWrap> | ||||||||
978 | m_NUWSub(const LHS &L, const RHS &R) { | ||||||||
979 | return OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub, | ||||||||
980 | OverflowingBinaryOperator::NoUnsignedWrap>( | ||||||||
981 | L, R); | ||||||||
982 | } | ||||||||
983 | template <typename LHS, typename RHS> | ||||||||
984 | inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul, | ||||||||
985 | OverflowingBinaryOperator::NoUnsignedWrap> | ||||||||
986 | m_NUWMul(const LHS &L, const RHS &R) { | ||||||||
987 | return OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul, | ||||||||
988 | OverflowingBinaryOperator::NoUnsignedWrap>( | ||||||||
989 | L, R); | ||||||||
990 | } | ||||||||
991 | template <typename LHS, typename RHS> | ||||||||
992 | inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl, | ||||||||
993 | OverflowingBinaryOperator::NoUnsignedWrap> | ||||||||
994 | m_NUWShl(const LHS &L, const RHS &R) { | ||||||||
995 | return OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl, | ||||||||
996 | OverflowingBinaryOperator::NoUnsignedWrap>( | ||||||||
997 | L, R); | ||||||||
998 | } | ||||||||
999 | |||||||||
1000 | //===----------------------------------------------------------------------===// | ||||||||
1001 | // Class that matches a group of binary opcodes. | ||||||||
1002 | // | ||||||||
1003 | template <typename LHS_t, typename RHS_t, typename Predicate> | ||||||||
1004 | struct BinOpPred_match : Predicate { | ||||||||
1005 | LHS_t L; | ||||||||
1006 | RHS_t R; | ||||||||
1007 | |||||||||
1008 | BinOpPred_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {} | ||||||||
1009 | |||||||||
1010 | template <typename OpTy> bool match(OpTy *V) { | ||||||||
1011 | if (auto *I = dyn_cast<Instruction>(V)) | ||||||||
1012 | return this->isOpType(I->getOpcode()) && L.match(I->getOperand(0)) && | ||||||||
1013 | R.match(I->getOperand(1)); | ||||||||
1014 | if (auto *CE = dyn_cast<ConstantExpr>(V)) | ||||||||
1015 | return this->isOpType(CE->getOpcode()) && L.match(CE->getOperand(0)) && | ||||||||
1016 | R.match(CE->getOperand(1)); | ||||||||
1017 | return false; | ||||||||
1018 | } | ||||||||
1019 | }; | ||||||||
1020 | |||||||||
1021 | struct is_shift_op { | ||||||||
1022 | bool isOpType(unsigned Opcode) { return Instruction::isShift(Opcode); } | ||||||||
1023 | }; | ||||||||
1024 | |||||||||
1025 | struct is_right_shift_op { | ||||||||
1026 | bool isOpType(unsigned Opcode) { | ||||||||
1027 | return Opcode == Instruction::LShr || Opcode == Instruction::AShr; | ||||||||
1028 | } | ||||||||
1029 | }; | ||||||||
1030 | |||||||||
1031 | struct is_logical_shift_op { | ||||||||
1032 | bool isOpType(unsigned Opcode) { | ||||||||
1033 | return Opcode == Instruction::LShr || Opcode == Instruction::Shl; | ||||||||
1034 | } | ||||||||
1035 | }; | ||||||||
1036 | |||||||||
1037 | struct is_bitwiselogic_op { | ||||||||
1038 | bool isOpType(unsigned Opcode) { | ||||||||
1039 | return Instruction::isBitwiseLogicOp(Opcode); | ||||||||
1040 | } | ||||||||
1041 | }; | ||||||||
1042 | |||||||||
1043 | struct is_idiv_op { | ||||||||
1044 | bool isOpType(unsigned Opcode) { | ||||||||
1045 | return Opcode == Instruction::SDiv || Opcode == Instruction::UDiv; | ||||||||
1046 | } | ||||||||
1047 | }; | ||||||||
1048 | |||||||||
1049 | struct is_irem_op { | ||||||||
1050 | bool isOpType(unsigned Opcode) { | ||||||||
1051 | return Opcode == Instruction::SRem || Opcode == Instruction::URem; | ||||||||
1052 | } | ||||||||
1053 | }; | ||||||||
1054 | |||||||||
1055 | /// Matches shift operations. | ||||||||
1056 | template <typename LHS, typename RHS> | ||||||||
1057 | inline BinOpPred_match<LHS, RHS, is_shift_op> m_Shift(const LHS &L, | ||||||||
1058 | const RHS &R) { | ||||||||
1059 | return BinOpPred_match<LHS, RHS, is_shift_op>(L, R); | ||||||||
1060 | } | ||||||||
1061 | |||||||||
1062 | /// Matches logical shift operations. | ||||||||
1063 | template <typename LHS, typename RHS> | ||||||||
1064 | inline BinOpPred_match<LHS, RHS, is_right_shift_op> m_Shr(const LHS &L, | ||||||||
1065 | const RHS &R) { | ||||||||
1066 | return BinOpPred_match<LHS, RHS, is_right_shift_op>(L, R); | ||||||||
1067 | } | ||||||||
1068 | |||||||||
1069 | /// Matches logical shift operations. | ||||||||
1070 | template <typename LHS, typename RHS> | ||||||||
1071 | inline BinOpPred_match<LHS, RHS, is_logical_shift_op> | ||||||||
1072 | m_LogicalShift(const LHS &L, const RHS &R) { | ||||||||
1073 | return BinOpPred_match<LHS, RHS, is_logical_shift_op>(L, R); | ||||||||
1074 | } | ||||||||
1075 | |||||||||
1076 | /// Matches bitwise logic operations. | ||||||||
1077 | template <typename LHS, typename RHS> | ||||||||
1078 | inline BinOpPred_match<LHS, RHS, is_bitwiselogic_op> | ||||||||
1079 | m_BitwiseLogic(const LHS &L, const RHS &R) { | ||||||||
1080 | return BinOpPred_match<LHS, RHS, is_bitwiselogic_op>(L, R); | ||||||||
1081 | } | ||||||||
1082 | |||||||||
1083 | /// Matches integer division operations. | ||||||||
1084 | template <typename LHS, typename RHS> | ||||||||
1085 | inline BinOpPred_match<LHS, RHS, is_idiv_op> m_IDiv(const LHS &L, | ||||||||
1086 | const RHS &R) { | ||||||||
1087 | return BinOpPred_match<LHS, RHS, is_idiv_op>(L, R); | ||||||||
1088 | } | ||||||||
1089 | |||||||||
1090 | /// Matches integer remainder operations. | ||||||||
1091 | template <typename LHS, typename RHS> | ||||||||
1092 | inline BinOpPred_match<LHS, RHS, is_irem_op> m_IRem(const LHS &L, | ||||||||
1093 | const RHS &R) { | ||||||||
1094 | return BinOpPred_match<LHS, RHS, is_irem_op>(L, R); | ||||||||
1095 | } | ||||||||
1096 | |||||||||
1097 | //===----------------------------------------------------------------------===// | ||||||||
1098 | // Class that matches exact binary ops. | ||||||||
1099 | // | ||||||||
1100 | template <typename SubPattern_t> struct Exact_match { | ||||||||
1101 | SubPattern_t SubPattern; | ||||||||
1102 | |||||||||
1103 | Exact_match(const SubPattern_t &SP) : SubPattern(SP) {} | ||||||||
1104 | |||||||||
1105 | template <typename OpTy> bool match(OpTy *V) { | ||||||||
1106 | if (auto *PEO = dyn_cast<PossiblyExactOperator>(V)) | ||||||||
1107 | return PEO->isExact() && SubPattern.match(V); | ||||||||
1108 | return false; | ||||||||
1109 | } | ||||||||
1110 | }; | ||||||||
1111 | |||||||||
1112 | template <typename T> inline Exact_match<T> m_Exact(const T &SubPattern) { | ||||||||
1113 | return SubPattern; | ||||||||
1114 | } | ||||||||
1115 | |||||||||
1116 | //===----------------------------------------------------------------------===// | ||||||||
1117 | // Matchers for CmpInst classes | ||||||||
1118 | // | ||||||||
1119 | |||||||||
1120 | template <typename LHS_t, typename RHS_t, typename Class, typename PredicateTy, | ||||||||
1121 | bool Commutable = false> | ||||||||
1122 | struct CmpClass_match { | ||||||||
1123 | PredicateTy &Predicate; | ||||||||
1124 | LHS_t L; | ||||||||
1125 | RHS_t R; | ||||||||
1126 | |||||||||
1127 | // The evaluation order is always stable, regardless of Commutability. | ||||||||
1128 | // The LHS is always matched first. | ||||||||
1129 | CmpClass_match(PredicateTy &Pred, const LHS_t &LHS, const RHS_t &RHS) | ||||||||
1130 | : Predicate(Pred), L(LHS), R(RHS) {} | ||||||||
1131 | |||||||||
1132 | template <typename OpTy> bool match(OpTy *V) { | ||||||||
1133 | if (auto *I = dyn_cast<Class>(V)) | ||||||||
1134 | if ((L.match(I->getOperand(0)) && R.match(I->getOperand(1))) || | ||||||||
1135 | (Commutable && L.match(I->getOperand(1)) && | ||||||||
1136 | R.match(I->getOperand(0)))) { | ||||||||
1137 | Predicate = I->getPredicate(); | ||||||||
1138 | return true; | ||||||||
1139 | } | ||||||||
1140 | return false; | ||||||||
1141 | } | ||||||||
1142 | }; | ||||||||
1143 | |||||||||
1144 | template <typename LHS, typename RHS> | ||||||||
1145 | inline CmpClass_match<LHS, RHS, CmpInst, CmpInst::Predicate> | ||||||||
1146 | m_Cmp(CmpInst::Predicate &Pred, const LHS &L, const RHS &R) { | ||||||||
1147 | return CmpClass_match<LHS, RHS, CmpInst, CmpInst::Predicate>(Pred, L, R); | ||||||||
1148 | } | ||||||||
1149 | |||||||||
1150 | template <typename LHS, typename RHS> | ||||||||
1151 | inline CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate> | ||||||||
1152 | m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) { | ||||||||
1153 | return CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>(Pred, L, R); | ||||||||
1154 | } | ||||||||
1155 | |||||||||
1156 | template <typename LHS, typename RHS> | ||||||||
1157 | inline CmpClass_match<LHS, RHS, FCmpInst, FCmpInst::Predicate> | ||||||||
1158 | m_FCmp(FCmpInst::Predicate &Pred, const LHS &L, const RHS &R) { | ||||||||
1159 | return CmpClass_match<LHS, RHS, FCmpInst, FCmpInst::Predicate>(Pred, L, R); | ||||||||
1160 | } | ||||||||
1161 | |||||||||
1162 | //===----------------------------------------------------------------------===// | ||||||||
1163 | // Matchers for instructions with a given opcode and number of operands. | ||||||||
1164 | // | ||||||||
1165 | |||||||||
1166 | /// Matches instructions with Opcode and three operands. | ||||||||
1167 | template <typename T0, unsigned Opcode> struct OneOps_match { | ||||||||
1168 | T0 Op1; | ||||||||
1169 | |||||||||
1170 | OneOps_match(const T0 &Op1) : Op1(Op1) {} | ||||||||
1171 | |||||||||
1172 | template <typename OpTy> bool match(OpTy *V) { | ||||||||
1173 | if (V->getValueID() == Value::InstructionVal + Opcode) { | ||||||||
1174 | auto *I = cast<Instruction>(V); | ||||||||
1175 | return Op1.match(I->getOperand(0)); | ||||||||
1176 | } | ||||||||
1177 | return false; | ||||||||
1178 | } | ||||||||
1179 | }; | ||||||||
1180 | |||||||||
1181 | /// Matches instructions with Opcode and three operands. | ||||||||
1182 | template <typename T0, typename T1, unsigned Opcode> struct TwoOps_match { | ||||||||
1183 | T0 Op1; | ||||||||
1184 | T1 Op2; | ||||||||
1185 | |||||||||
1186 | TwoOps_match(const T0 &Op1, const T1 &Op2) : Op1(Op1), Op2(Op2) {} | ||||||||
1187 | |||||||||
1188 | template <typename OpTy> bool match(OpTy *V) { | ||||||||
1189 | if (V->getValueID() == Value::InstructionVal + Opcode) { | ||||||||
1190 | auto *I = cast<Instruction>(V); | ||||||||
1191 | return Op1.match(I->getOperand(0)) && Op2.match(I->getOperand(1)); | ||||||||
1192 | } | ||||||||
1193 | return false; | ||||||||
1194 | } | ||||||||
1195 | }; | ||||||||
1196 | |||||||||
1197 | /// Matches instructions with Opcode and three operands. | ||||||||
1198 | template <typename T0, typename T1, typename T2, unsigned Opcode> | ||||||||
1199 | struct ThreeOps_match { | ||||||||
1200 | T0 Op1; | ||||||||
1201 | T1 Op2; | ||||||||
1202 | T2 Op3; | ||||||||
1203 | |||||||||
1204 | ThreeOps_match(const T0 &Op1, const T1 &Op2, const T2 &Op3) | ||||||||
1205 | : Op1(Op1), Op2(Op2), Op3(Op3) {} | ||||||||
1206 | |||||||||
1207 | template <typename OpTy> bool match(OpTy *V) { | ||||||||
1208 | if (V->getValueID() == Value::InstructionVal + Opcode) { | ||||||||
1209 | auto *I = cast<Instruction>(V); | ||||||||
1210 | return Op1.match(I->getOperand(0)) && Op2.match(I->getOperand(1)) && | ||||||||
1211 | Op3.match(I->getOperand(2)); | ||||||||
1212 | } | ||||||||
1213 | return false; | ||||||||
1214 | } | ||||||||
1215 | }; | ||||||||
1216 | |||||||||
1217 | /// Matches SelectInst. | ||||||||
1218 | template <typename Cond, typename LHS, typename RHS> | ||||||||
1219 | inline ThreeOps_match<Cond, LHS, RHS, Instruction::Select> | ||||||||
1220 | m_Select(const Cond &C, const LHS &L, const RHS &R) { | ||||||||
1221 | return ThreeOps_match<Cond, LHS, RHS, Instruction::Select>(C, L, R); | ||||||||
1222 | } | ||||||||
1223 | |||||||||
1224 | /// This matches a select of two constants, e.g.: | ||||||||
1225 | /// m_SelectCst<-1, 0>(m_Value(V)) | ||||||||
1226 | template <int64_t L, int64_t R, typename Cond> | ||||||||
1227 | inline ThreeOps_match<Cond, constantint_match<L>, constantint_match<R>, | ||||||||
1228 | Instruction::Select> | ||||||||
1229 | m_SelectCst(const Cond &C) { | ||||||||
1230 | return m_Select(C, m_ConstantInt<L>(), m_ConstantInt<R>()); | ||||||||
1231 | } | ||||||||
1232 | |||||||||
1233 | /// Matches InsertElementInst. | ||||||||
1234 | template <typename Val_t, typename Elt_t, typename Idx_t> | ||||||||
1235 | inline ThreeOps_match<Val_t, Elt_t, Idx_t, Instruction::InsertElement> | ||||||||
1236 | m_InsertElement(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx) { | ||||||||
1237 | return ThreeOps_match<Val_t, Elt_t, Idx_t, Instruction::InsertElement>( | ||||||||
1238 | Val, Elt, Idx); | ||||||||
1239 | } | ||||||||
1240 | |||||||||
1241 | /// Matches ExtractElementInst. | ||||||||
1242 | template <typename Val_t, typename Idx_t> | ||||||||
1243 | inline TwoOps_match<Val_t, Idx_t, Instruction::ExtractElement> | ||||||||
1244 | m_ExtractElement(const Val_t &Val, const Idx_t &Idx) { | ||||||||
1245 | return TwoOps_match<Val_t, Idx_t, Instruction::ExtractElement>(Val, Idx); | ||||||||
1246 | } | ||||||||
1247 | |||||||||
1248 | /// Matches ShuffleVectorInst. | ||||||||
1249 | template <typename V1_t, typename V2_t, typename Mask_t> | ||||||||
1250 | inline ThreeOps_match<V1_t, V2_t, Mask_t, Instruction::ShuffleVector> | ||||||||
1251 | m_ShuffleVector(const V1_t &v1, const V2_t &v2, const Mask_t &m) { | ||||||||
1252 | return ThreeOps_match<V1_t, V2_t, Mask_t, Instruction::ShuffleVector>(v1, v2, | ||||||||
1253 | m); | ||||||||
1254 | } | ||||||||
1255 | |||||||||
1256 | /// Matches LoadInst. | ||||||||
1257 | template <typename OpTy> | ||||||||
1258 | inline OneOps_match<OpTy, Instruction::Load> m_Load(const OpTy &Op) { | ||||||||
1259 | return OneOps_match<OpTy, Instruction::Load>(Op); | ||||||||
1260 | } | ||||||||
1261 | |||||||||
1262 | /// Matches StoreInst. | ||||||||
1263 | template <typename ValueOpTy, typename PointerOpTy> | ||||||||
1264 | inline TwoOps_match<ValueOpTy, PointerOpTy, Instruction::Store> | ||||||||
1265 | m_Store(const ValueOpTy &ValueOp, const PointerOpTy &PointerOp) { | ||||||||
1266 | return TwoOps_match<ValueOpTy, PointerOpTy, Instruction::Store>(ValueOp, | ||||||||
1267 | PointerOp); | ||||||||
1268 | } | ||||||||
1269 | |||||||||
1270 | //===----------------------------------------------------------------------===// | ||||||||
1271 | // Matchers for CastInst classes | ||||||||
1272 | // | ||||||||
1273 | |||||||||
1274 | template <typename Op_t, unsigned Opcode> struct CastClass_match { | ||||||||
1275 | Op_t Op; | ||||||||
1276 | |||||||||
1277 | CastClass_match(const Op_t &OpMatch) : Op(OpMatch) {} | ||||||||
1278 | |||||||||
1279 | template <typename OpTy> bool match(OpTy *V) { | ||||||||
1280 | if (auto *O = dyn_cast<Operator>(V)) | ||||||||
1281 | return O->getOpcode() == Opcode && Op.match(O->getOperand(0)); | ||||||||
1282 | return false; | ||||||||
1283 | } | ||||||||
1284 | }; | ||||||||
1285 | |||||||||
1286 | /// Matches BitCast. | ||||||||
1287 | template <typename OpTy> | ||||||||
1288 | inline CastClass_match<OpTy, Instruction::BitCast> m_BitCast(const OpTy &Op) { | ||||||||
1289 | return CastClass_match<OpTy, Instruction::BitCast>(Op); | ||||||||
1290 | } | ||||||||
1291 | |||||||||
1292 | /// Matches PtrToInt. | ||||||||
1293 | template <typename OpTy> | ||||||||
1294 | inline CastClass_match<OpTy, Instruction::PtrToInt> m_PtrToInt(const OpTy &Op) { | ||||||||
1295 | return CastClass_match<OpTy, Instruction::PtrToInt>(Op); | ||||||||
1296 | } | ||||||||
1297 | |||||||||
1298 | /// Matches Trunc. | ||||||||
1299 | template <typename OpTy> | ||||||||
1300 | inline CastClass_match<OpTy, Instruction::Trunc> m_Trunc(const OpTy &Op) { | ||||||||
1301 | return CastClass_match<OpTy, Instruction::Trunc>(Op); | ||||||||
1302 | } | ||||||||
1303 | |||||||||
1304 | template <typename OpTy> | ||||||||
1305 | inline match_combine_or<CastClass_match<OpTy, Instruction::Trunc>, OpTy> | ||||||||
1306 | m_TruncOrSelf(const OpTy &Op) { | ||||||||
1307 | return m_CombineOr(m_Trunc(Op), Op); | ||||||||
1308 | } | ||||||||
1309 | |||||||||
1310 | /// Matches SExt. | ||||||||
1311 | template <typename OpTy> | ||||||||
1312 | inline CastClass_match<OpTy, Instruction::SExt> m_SExt(const OpTy &Op) { | ||||||||
1313 | return CastClass_match<OpTy, Instruction::SExt>(Op); | ||||||||
1314 | } | ||||||||
1315 | |||||||||
1316 | /// Matches ZExt. | ||||||||
1317 | template <typename OpTy> | ||||||||
1318 | inline CastClass_match<OpTy, Instruction::ZExt> m_ZExt(const OpTy &Op) { | ||||||||
1319 | return CastClass_match<OpTy, Instruction::ZExt>(Op); | ||||||||
1320 | } | ||||||||
1321 | |||||||||
1322 | template <typename OpTy> | ||||||||
1323 | inline match_combine_or<CastClass_match<OpTy, Instruction::ZExt>, OpTy> | ||||||||
1324 | m_ZExtOrSelf(const OpTy &Op) { | ||||||||
1325 | return m_CombineOr(m_ZExt(Op), Op); | ||||||||
1326 | } | ||||||||
1327 | |||||||||
1328 | template <typename OpTy> | ||||||||
1329 | inline match_combine_or<CastClass_match<OpTy, Instruction::SExt>, OpTy> | ||||||||
1330 | m_SExtOrSelf(const OpTy &Op) { | ||||||||
1331 | return m_CombineOr(m_SExt(Op), Op); | ||||||||
1332 | } | ||||||||
1333 | |||||||||
1334 | template <typename OpTy> | ||||||||
1335 | inline match_combine_or<CastClass_match<OpTy, Instruction::ZExt>, | ||||||||
1336 | CastClass_match<OpTy, Instruction::SExt>> | ||||||||
1337 | m_ZExtOrSExt(const OpTy &Op) { | ||||||||
1338 | return m_CombineOr(m_ZExt(Op), m_SExt(Op)); | ||||||||
1339 | } | ||||||||
1340 | |||||||||
1341 | template <typename OpTy> | ||||||||
1342 | inline match_combine_or< | ||||||||
1343 | match_combine_or<CastClass_match<OpTy, Instruction::ZExt>, | ||||||||
1344 | CastClass_match<OpTy, Instruction::SExt>>, | ||||||||
1345 | OpTy> | ||||||||
1346 | m_ZExtOrSExtOrSelf(const OpTy &Op) { | ||||||||
1347 | return m_CombineOr(m_ZExtOrSExt(Op), Op); | ||||||||
1348 | } | ||||||||
1349 | |||||||||
1350 | /// Matches UIToFP. | ||||||||
1351 | template <typename OpTy> | ||||||||
1352 | inline CastClass_match<OpTy, Instruction::UIToFP> m_UIToFP(const OpTy &Op) { | ||||||||
1353 | return CastClass_match<OpTy, Instruction::UIToFP>(Op); | ||||||||
1354 | } | ||||||||
1355 | |||||||||
1356 | /// Matches SIToFP. | ||||||||
1357 | template <typename OpTy> | ||||||||
1358 | inline CastClass_match<OpTy, Instruction::SIToFP> m_SIToFP(const OpTy &Op) { | ||||||||
1359 | return CastClass_match<OpTy, Instruction::SIToFP>(Op); | ||||||||
1360 | } | ||||||||
1361 | |||||||||
1362 | /// Matches FPTrunc | ||||||||
1363 | template <typename OpTy> | ||||||||
1364 | inline CastClass_match<OpTy, Instruction::FPTrunc> m_FPTrunc(const OpTy &Op) { | ||||||||
1365 | return CastClass_match<OpTy, Instruction::FPTrunc>(Op); | ||||||||
1366 | } | ||||||||
1367 | |||||||||
1368 | /// Matches FPExt | ||||||||
1369 | template <typename OpTy> | ||||||||
1370 | inline CastClass_match<OpTy, Instruction::FPExt> m_FPExt(const OpTy &Op) { | ||||||||
1371 | return CastClass_match<OpTy, Instruction::FPExt>(Op); | ||||||||
1372 | } | ||||||||
1373 | |||||||||
1374 | //===----------------------------------------------------------------------===// | ||||||||
1375 | // Matchers for control flow. | ||||||||
1376 | // | ||||||||
1377 | |||||||||
1378 | struct br_match { | ||||||||
1379 | BasicBlock *&Succ; | ||||||||
1380 | |||||||||
1381 | br_match(BasicBlock *&Succ) : Succ(Succ) {} | ||||||||
1382 | |||||||||
1383 | template <typename OpTy> bool match(OpTy *V) { | ||||||||
1384 | if (auto *BI = dyn_cast<BranchInst>(V)) | ||||||||
1385 | if (BI->isUnconditional()) { | ||||||||
1386 | Succ = BI->getSuccessor(0); | ||||||||
1387 | return true; | ||||||||
1388 | } | ||||||||
1389 | return false; | ||||||||
1390 | } | ||||||||
1391 | }; | ||||||||
1392 | |||||||||
1393 | inline br_match m_UnconditionalBr(BasicBlock *&Succ) { return br_match(Succ); } | ||||||||
1394 | |||||||||
1395 | template <typename Cond_t, typename TrueBlock_t, typename FalseBlock_t> | ||||||||
1396 | struct brc_match { | ||||||||
1397 | Cond_t Cond; | ||||||||
1398 | TrueBlock_t T; | ||||||||
1399 | FalseBlock_t F; | ||||||||
1400 | |||||||||
1401 | brc_match(const Cond_t &C, const TrueBlock_t &t, const FalseBlock_t &f) | ||||||||
1402 | : Cond(C), T(t), F(f) {} | ||||||||
1403 | |||||||||
1404 | template <typename OpTy> bool match(OpTy *V) { | ||||||||
1405 | if (auto *BI = dyn_cast<BranchInst>(V)) | ||||||||
1406 | if (BI->isConditional() && Cond.match(BI->getCondition())) | ||||||||
1407 | return T.match(BI->getSuccessor(0)) && F.match(BI->getSuccessor(1)); | ||||||||
1408 | return false; | ||||||||
1409 | } | ||||||||
1410 | }; | ||||||||
1411 | |||||||||
1412 | template <typename Cond_t> | ||||||||
1413 | inline brc_match<Cond_t, bind_ty<BasicBlock>, bind_ty<BasicBlock>> | ||||||||
1414 | m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F) { | ||||||||
1415 | return brc_match<Cond_t, bind_ty<BasicBlock>, bind_ty<BasicBlock>>( | ||||||||
1416 | C, m_BasicBlock(T), m_BasicBlock(F)); | ||||||||
1417 | } | ||||||||
1418 | |||||||||
1419 | template <typename Cond_t, typename TrueBlock_t, typename FalseBlock_t> | ||||||||
1420 | inline brc_match<Cond_t, TrueBlock_t, FalseBlock_t> | ||||||||
1421 | m_Br(const Cond_t &C, const TrueBlock_t &T, const FalseBlock_t &F) { | ||||||||
1422 | return brc_match<Cond_t, TrueBlock_t, FalseBlock_t>(C, T, F); | ||||||||
1423 | } | ||||||||
1424 | |||||||||
1425 | //===----------------------------------------------------------------------===// | ||||||||
1426 | // Matchers for max/min idioms, eg: "select (sgt x, y), x, y" -> smax(x,y). | ||||||||
1427 | // | ||||||||
1428 | |||||||||
1429 | template <typename CmpInst_t, typename LHS_t, typename RHS_t, typename Pred_t, | ||||||||
1430 | bool Commutable = false> | ||||||||
1431 | struct MaxMin_match { | ||||||||
1432 | LHS_t L; | ||||||||
1433 | RHS_t R; | ||||||||
1434 | |||||||||
1435 | // The evaluation order is always stable, regardless of Commutability. | ||||||||
1436 | // The LHS is always matched first. | ||||||||
1437 | MaxMin_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {} | ||||||||
1438 | |||||||||
1439 | template <typename OpTy> bool match(OpTy *V) { | ||||||||
1440 | // Look for "(x pred y) ? x : y" or "(x pred y) ? y : x". | ||||||||
1441 | auto *SI = dyn_cast<SelectInst>(V); | ||||||||
1442 | if (!SI) | ||||||||
1443 | return false; | ||||||||
1444 | auto *Cmp = dyn_cast<CmpInst_t>(SI->getCondition()); | ||||||||
1445 | if (!Cmp) | ||||||||
1446 | return false; | ||||||||
1447 | // At this point we have a select conditioned on a comparison. Check that | ||||||||
1448 | // it is the values returned by the select that are being compared. | ||||||||
1449 | Value *TrueVal = SI->getTrueValue(); | ||||||||
1450 | Value *FalseVal = SI->getFalseValue(); | ||||||||
1451 | Value *LHS = Cmp->getOperand(0); | ||||||||
1452 | Value *RHS = Cmp->getOperand(1); | ||||||||
1453 | if ((TrueVal != LHS || FalseVal != RHS) && | ||||||||
1454 | (TrueVal != RHS || FalseVal != LHS)) | ||||||||
1455 | return false; | ||||||||
1456 | typename CmpInst_t::Predicate Pred = | ||||||||
1457 | LHS == TrueVal ? Cmp->getPredicate() : Cmp->getInversePredicate(); | ||||||||
1458 | // Does "(x pred y) ? x : y" represent the desired max/min operation? | ||||||||
1459 | if (!Pred_t::match(Pred)) | ||||||||
1460 | return false; | ||||||||
1461 | // It does! Bind the operands. | ||||||||
1462 | return (L.match(LHS) && R.match(RHS)) || | ||||||||
1463 | (Commutable && L.match(RHS) && R.match(LHS)); | ||||||||
1464 | } | ||||||||
1465 | }; | ||||||||
1466 | |||||||||
1467 | /// Helper class for identifying signed max predicates. | ||||||||
1468 | struct smax_pred_ty { | ||||||||
1469 | static bool match(ICmpInst::Predicate Pred) { | ||||||||
1470 | return Pred == CmpInst::ICMP_SGT || Pred == CmpInst::ICMP_SGE; | ||||||||
1471 | } | ||||||||
1472 | }; | ||||||||
1473 | |||||||||
1474 | /// Helper class for identifying signed min predicates. | ||||||||
1475 | struct smin_pred_ty { | ||||||||
1476 | static bool match(ICmpInst::Predicate Pred) { | ||||||||
1477 | return Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_SLE; | ||||||||
1478 | } | ||||||||
1479 | }; | ||||||||
1480 | |||||||||
1481 | /// Helper class for identifying unsigned max predicates. | ||||||||
1482 | struct umax_pred_ty { | ||||||||
1483 | static bool match(ICmpInst::Predicate Pred) { | ||||||||
1484 | return Pred == CmpInst::ICMP_UGT || Pred == CmpInst::ICMP_UGE; | ||||||||
1485 | } | ||||||||
1486 | }; | ||||||||
1487 | |||||||||
1488 | /// Helper class for identifying unsigned min predicates. | ||||||||
1489 | struct umin_pred_ty { | ||||||||
1490 | static bool match(ICmpInst::Predicate Pred) { | ||||||||
1491 | return Pred == CmpInst::ICMP_ULT || Pred == CmpInst::ICMP_ULE; | ||||||||
1492 | } | ||||||||
1493 | }; | ||||||||
1494 | |||||||||
1495 | /// Helper class for identifying ordered max predicates. | ||||||||
1496 | struct ofmax_pred_ty { | ||||||||
1497 | static bool match(FCmpInst::Predicate Pred) { | ||||||||
1498 | return Pred == CmpInst::FCMP_OGT || Pred == CmpInst::FCMP_OGE; | ||||||||
1499 | } | ||||||||
1500 | }; | ||||||||
1501 | |||||||||
1502 | /// Helper class for identifying ordered min predicates. | ||||||||
1503 | struct ofmin_pred_ty { | ||||||||
1504 | static bool match(FCmpInst::Predicate Pred) { | ||||||||
1505 | return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE; | ||||||||
1506 | } | ||||||||
1507 | }; | ||||||||
1508 | |||||||||
1509 | /// Helper class for identifying unordered max predicates. | ||||||||
1510 | struct ufmax_pred_ty { | ||||||||
1511 | static bool match(FCmpInst::Predicate Pred) { | ||||||||
1512 | return Pred == CmpInst::FCMP_UGT || Pred == CmpInst::FCMP_UGE; | ||||||||
1513 | } | ||||||||
1514 | }; | ||||||||
1515 | |||||||||
1516 | /// Helper class for identifying unordered min predicates. | ||||||||
1517 | struct ufmin_pred_ty { | ||||||||
1518 | static bool match(FCmpInst::Predicate Pred) { | ||||||||
1519 | return Pred == CmpInst::FCMP_ULT || Pred == CmpInst::FCMP_ULE; | ||||||||
1520 | } | ||||||||
1521 | }; | ||||||||
1522 | |||||||||
1523 | template <typename LHS, typename RHS> | ||||||||
1524 | inline MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty> m_SMax(const LHS &L, | ||||||||
1525 | const RHS &R) { | ||||||||
1526 | return MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty>(L, R); | ||||||||
1527 | } | ||||||||
1528 | |||||||||
1529 | template <typename LHS, typename RHS> | ||||||||
1530 | inline MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty> m_SMin(const LHS &L, | ||||||||
1531 | const RHS &R) { | ||||||||
1532 | return MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty>(L, R); | ||||||||
1533 | } | ||||||||
1534 | |||||||||
1535 | template <typename LHS, typename RHS> | ||||||||
1536 | inline MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty> m_UMax(const LHS &L, | ||||||||
1537 | const RHS &R) { | ||||||||
1538 | return MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty>(L, R); | ||||||||
1539 | } | ||||||||
1540 | |||||||||
1541 | template <typename LHS, typename RHS> | ||||||||
1542 | inline MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty> m_UMin(const LHS &L, | ||||||||
1543 | const RHS &R) { | ||||||||
1544 | return MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty>(L, R); | ||||||||
1545 | } | ||||||||
1546 | |||||||||
1547 | /// Match an 'ordered' floating point maximum function. | ||||||||
1548 | /// Floating point has one special value 'NaN'. Therefore, there is no total | ||||||||
1549 | /// order. However, if we can ignore the 'NaN' value (for example, because of a | ||||||||
1550 | /// 'no-nans-float-math' flag) a combination of a fcmp and select has 'maximum' | ||||||||
1551 | /// semantics. In the presence of 'NaN' we have to preserve the original | ||||||||
1552 | /// select(fcmp(ogt/ge, L, R), L, R) semantics matched by this predicate. | ||||||||
1553 | /// | ||||||||
1554 | /// max(L, R) iff L and R are not NaN | ||||||||
1555 | /// m_OrdFMax(L, R) = R iff L or R are NaN | ||||||||
1556 | template <typename LHS, typename RHS> | ||||||||
1557 | inline MaxMin_match<FCmpInst, LHS, RHS, ofmax_pred_ty> m_OrdFMax(const LHS &L, | ||||||||
1558 | const RHS &R) { | ||||||||
1559 | return MaxMin_match<FCmpInst, LHS, RHS, ofmax_pred_ty>(L, R); | ||||||||
1560 | } | ||||||||
1561 | |||||||||
1562 | /// Match an 'ordered' floating point minimum function. | ||||||||
1563 | /// Floating point has one special value 'NaN'. Therefore, there is no total | ||||||||
1564 | /// order. However, if we can ignore the 'NaN' value (for example, because of a | ||||||||
1565 | /// 'no-nans-float-math' flag) a combination of a fcmp and select has 'minimum' | ||||||||
1566 | /// semantics. In the presence of 'NaN' we have to preserve the original | ||||||||
1567 | /// select(fcmp(olt/le, L, R), L, R) semantics matched by this predicate. | ||||||||
1568 | /// | ||||||||
1569 | /// min(L, R) iff L and R are not NaN | ||||||||
1570 | /// m_OrdFMin(L, R) = R iff L or R are NaN | ||||||||
1571 | template <typename LHS, typename RHS> | ||||||||
1572 | inline MaxMin_match<FCmpInst, LHS, RHS, ofmin_pred_ty> m_OrdFMin(const LHS &L, | ||||||||
1573 | const RHS &R) { | ||||||||
1574 | return MaxMin_match<FCmpInst, LHS, RHS, ofmin_pred_ty>(L, R); | ||||||||
1575 | } | ||||||||
1576 | |||||||||
1577 | /// Match an 'unordered' floating point maximum function. | ||||||||
1578 | /// Floating point has one special value 'NaN'. Therefore, there is no total | ||||||||
1579 | /// order. However, if we can ignore the 'NaN' value (for example, because of a | ||||||||
1580 | /// 'no-nans-float-math' flag) a combination of a fcmp and select has 'maximum' | ||||||||
1581 | /// semantics. In the presence of 'NaN' we have to preserve the original | ||||||||
1582 | /// select(fcmp(ugt/ge, L, R), L, R) semantics matched by this predicate. | ||||||||
1583 | /// | ||||||||
1584 | /// max(L, R) iff L and R are not NaN | ||||||||
1585 | /// m_UnordFMax(L, R) = L iff L or R are NaN | ||||||||
1586 | template <typename LHS, typename RHS> | ||||||||
1587 | inline MaxMin_match<FCmpInst, LHS, RHS, ufmax_pred_ty> | ||||||||
1588 | m_UnordFMax(const LHS &L, const RHS &R) { | ||||||||
1589 | return MaxMin_match<FCmpInst, LHS, RHS, ufmax_pred_ty>(L, R); | ||||||||
1590 | } | ||||||||
1591 | |||||||||
1592 | /// Match an 'unordered' floating point minimum function. | ||||||||
1593 | /// Floating point has one special value 'NaN'. Therefore, there is no total | ||||||||
1594 | /// order. However, if we can ignore the 'NaN' value (for example, because of a | ||||||||
1595 | /// 'no-nans-float-math' flag) a combination of a fcmp and select has 'minimum' | ||||||||
1596 | /// semantics. In the presence of 'NaN' we have to preserve the original | ||||||||
1597 | /// select(fcmp(ult/le, L, R), L, R) semantics matched by this predicate. | ||||||||
1598 | /// | ||||||||
1599 | /// min(L, R) iff L and R are not NaN | ||||||||
1600 | /// m_UnordFMin(L, R) = L iff L or R are NaN | ||||||||
1601 | template <typename LHS, typename RHS> | ||||||||
1602 | inline MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty> | ||||||||
1603 | m_UnordFMin(const LHS &L, const RHS &R) { | ||||||||
1604 | return MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>(L, R); | ||||||||
1605 | } | ||||||||
1606 | |||||||||
1607 | //===----------------------------------------------------------------------===// | ||||||||
1608 | // Matchers for overflow check patterns: e.g. (a + b) u< a | ||||||||
1609 | // | ||||||||
1610 | |||||||||
1611 | template <typename LHS_t, typename RHS_t, typename Sum_t> | ||||||||
1612 | struct UAddWithOverflow_match { | ||||||||
1613 | LHS_t L; | ||||||||
1614 | RHS_t R; | ||||||||
1615 | Sum_t S; | ||||||||
1616 | |||||||||
1617 | UAddWithOverflow_match(const LHS_t &L, const RHS_t &R, const Sum_t &S) | ||||||||
1618 | : L(L), R(R), S(S) {} | ||||||||
1619 | |||||||||
1620 | template <typename OpTy> bool match(OpTy *V) { | ||||||||
1621 | Value *ICmpLHS, *ICmpRHS; | ||||||||
1622 | ICmpInst::Predicate Pred; | ||||||||
1623 | if (!m_ICmp(Pred, m_Value(ICmpLHS), m_Value(ICmpRHS)).match(V)) | ||||||||
1624 | return false; | ||||||||
1625 | |||||||||
1626 | Value *AddLHS, *AddRHS; | ||||||||
1627 | auto AddExpr = m_Add(m_Value(AddLHS), m_Value(AddRHS)); | ||||||||
1628 | |||||||||
1629 | // (a + b) u< a, (a + b) u< b | ||||||||
1630 | if (Pred == ICmpInst::ICMP_ULT) | ||||||||
1631 | if (AddExpr.match(ICmpLHS) && (ICmpRHS == AddLHS || ICmpRHS == AddRHS)) | ||||||||
1632 | return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpLHS); | ||||||||
1633 | |||||||||
1634 | // a >u (a + b), b >u (a + b) | ||||||||
1635 | if (Pred == ICmpInst::ICMP_UGT) | ||||||||
1636 | if (AddExpr.match(ICmpRHS) && (ICmpLHS == AddLHS || ICmpLHS == AddRHS)) | ||||||||
1637 | return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpRHS); | ||||||||
1638 | |||||||||
1639 | // Match special-case for increment-by-1. | ||||||||
1640 | if (Pred == ICmpInst::ICMP_EQ) { | ||||||||
1641 | // (a + 1) == 0 | ||||||||
1642 | // (1 + a) == 0 | ||||||||
1643 | if (AddExpr.match(ICmpLHS) && m_ZeroInt().match(ICmpRHS) && | ||||||||
1644 | (m_One().match(AddLHS) || m_One().match(AddRHS))) | ||||||||
1645 | return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpLHS); | ||||||||
1646 | // 0 == (a + 1) | ||||||||
1647 | // 0 == (1 + a) | ||||||||
1648 | if (m_ZeroInt().match(ICmpLHS) && AddExpr.match(ICmpRHS) && | ||||||||
1649 | (m_One().match(AddLHS) || m_One().match(AddRHS))) | ||||||||
1650 | return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpRHS); | ||||||||
1651 | } | ||||||||
1652 | |||||||||
1653 | return false; | ||||||||
1654 | } | ||||||||
1655 | }; | ||||||||
1656 | |||||||||
1657 | /// Match an icmp instruction checking for unsigned overflow on addition. | ||||||||
1658 | /// | ||||||||
1659 | /// S is matched to the addition whose result is being checked for overflow, and | ||||||||
1660 | /// L and R are matched to the LHS and RHS of S. | ||||||||
1661 | template <typename LHS_t, typename RHS_t, typename Sum_t> | ||||||||
1662 | UAddWithOverflow_match<LHS_t, RHS_t, Sum_t> | ||||||||
1663 | m_UAddWithOverflow(const LHS_t &L, const RHS_t &R, const Sum_t &S) { | ||||||||
1664 | return UAddWithOverflow_match<LHS_t, RHS_t, Sum_t>(L, R, S); | ||||||||
1665 | } | ||||||||
1666 | |||||||||
1667 | template <typename Opnd_t> struct Argument_match { | ||||||||
1668 | unsigned OpI; | ||||||||
1669 | Opnd_t Val; | ||||||||
1670 | |||||||||
1671 | Argument_match(unsigned OpIdx, const Opnd_t &V) : OpI(OpIdx), Val(V) {} | ||||||||
1672 | |||||||||
1673 | template <typename OpTy> bool match(OpTy *V) { | ||||||||
1674 | // FIXME: Should likely be switched to use `CallBase`. | ||||||||
1675 | if (const auto *CI = dyn_cast<CallInst>(V)) | ||||||||
1676 | return Val.match(CI->getArgOperand(OpI)); | ||||||||
1677 | return false; | ||||||||
1678 | } | ||||||||
1679 | }; | ||||||||
1680 | |||||||||
1681 | /// Match an argument. | ||||||||
1682 | template <unsigned OpI, typename Opnd_t> | ||||||||
1683 | inline Argument_match<Opnd_t> m_Argument(const Opnd_t &Op) { | ||||||||
1684 | return Argument_match<Opnd_t>(OpI, Op); | ||||||||
1685 | } | ||||||||
1686 | |||||||||
1687 | /// Intrinsic matchers. | ||||||||
1688 | struct IntrinsicID_match { | ||||||||
1689 | unsigned ID; | ||||||||
1690 | |||||||||
1691 | IntrinsicID_match(Intrinsic::ID IntrID) : ID(IntrID) {} | ||||||||
1692 | |||||||||
1693 | template <typename OpTy> bool match(OpTy *V) { | ||||||||
1694 | if (const auto *CI
| ||||||||
1695 | if (const auto *F = CI->getCalledFunction()) | ||||||||
1696 | return F->getIntrinsicID() == ID; | ||||||||
1697 | return false; | ||||||||
1698 | } | ||||||||
1699 | }; | ||||||||
1700 | |||||||||
1701 | /// Intrinsic matches are combinations of ID matchers, and argument | ||||||||
1702 | /// matchers. Higher arity matcher are defined recursively in terms of and-ing | ||||||||
1703 | /// them with lower arity matchers. Here's some convenient typedefs for up to | ||||||||
1704 | /// several arguments, and more can be added as needed | ||||||||
1705 | template <typename T0 = void, typename T1 = void, typename T2 = void, | ||||||||
1706 | typename T3 = void, typename T4 = void, typename T5 = void, | ||||||||
1707 | typename T6 = void, typename T7 = void, typename T8 = void, | ||||||||
1708 | typename T9 = void, typename T10 = void> | ||||||||
1709 | struct m_Intrinsic_Ty; | ||||||||
1710 | template <typename T0> struct m_Intrinsic_Ty<T0> { | ||||||||
1711 | using Ty = match_combine_and<IntrinsicID_match, Argument_match<T0>>; | ||||||||
1712 | }; | ||||||||
1713 | template <typename T0, typename T1> struct m_Intrinsic_Ty<T0, T1> { | ||||||||
1714 | using Ty = | ||||||||
1715 | match_combine_and<typename m_Intrinsic_Ty<T0>::Ty, Argument_match<T1>>; | ||||||||
1716 | }; | ||||||||
1717 | template <typename T0, typename T1, typename T2> | ||||||||
1718 | struct m_Intrinsic_Ty<T0, T1, T2> { | ||||||||
1719 | using Ty = | ||||||||
1720 | match_combine_and<typename m_Intrinsic_Ty<T0, T1>::Ty, | ||||||||
1721 | Argument_match<T2>>; | ||||||||
1722 | }; | ||||||||
1723 | template <typename T0, typename T1, typename T2, typename T3> | ||||||||
1724 | struct m_Intrinsic_Ty<T0, T1, T2, T3> { | ||||||||
1725 | using Ty = | ||||||||
1726 | match_combine_and<typename m_Intrinsic_Ty<T0, T1, T2>::Ty, | ||||||||
1727 | Argument_match<T3>>; | ||||||||
1728 | }; | ||||||||
1729 | |||||||||
1730 | /// Match intrinsic calls like this: | ||||||||
1731 | /// m_Intrinsic<Intrinsic::fabs>(m_Value(X)) | ||||||||
1732 | template <Intrinsic::ID IntrID> inline IntrinsicID_match m_Intrinsic() { | ||||||||
1733 | return IntrinsicID_match(IntrID); | ||||||||
1734 | } | ||||||||
1735 | |||||||||
1736 | template <Intrinsic::ID IntrID, typename T0> | ||||||||
1737 | inline typename m_Intrinsic_Ty<T0>::Ty m_Intrinsic(const T0 &Op0) { | ||||||||
1738 | return m_CombineAnd(m_Intrinsic<IntrID>(), m_Argument<0>(Op0)); | ||||||||
1739 | } | ||||||||
1740 | |||||||||
1741 | template <Intrinsic::ID IntrID, typename T0, typename T1> | ||||||||
1742 | inline typename m_Intrinsic_Ty<T0, T1>::Ty m_Intrinsic(const T0 &Op0, | ||||||||
1743 | const T1 &Op1) { | ||||||||
1744 | return m_CombineAnd(m_Intrinsic<IntrID>(Op0), m_Argument<1>(Op1)); | ||||||||
1745 | } | ||||||||
1746 | |||||||||
1747 | template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2> | ||||||||
1748 | inline typename m_Intrinsic_Ty<T0, T1, T2>::Ty | ||||||||
1749 | m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2) { | ||||||||
1750 | return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1), m_Argument<2>(Op2)); | ||||||||
1751 | } | ||||||||
1752 | |||||||||
1753 | template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2, | ||||||||
1754 | typename T3> | ||||||||
1755 | inline typename m_Intrinsic_Ty<T0, T1, T2, T3>::Ty | ||||||||
1756 | m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2, const T3 &Op3) { | ||||||||
1757 | return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1, Op2), m_Argument<3>(Op3)); | ||||||||
1758 | } | ||||||||
1759 | |||||||||
1760 | // Helper intrinsic matching specializations. | ||||||||
1761 | template <typename Opnd0> | ||||||||
1762 | inline typename m_Intrinsic_Ty<Opnd0>::Ty m_BitReverse(const Opnd0 &Op0) { | ||||||||
1763 | return m_Intrinsic<Intrinsic::bitreverse>(Op0); | ||||||||
1764 | } | ||||||||
1765 | |||||||||
1766 | template <typename Opnd0> | ||||||||
1767 | inline typename m_Intrinsic_Ty<Opnd0>::Ty m_BSwap(const Opnd0 &Op0) { | ||||||||
1768 | return m_Intrinsic<Intrinsic::bswap>(Op0); | ||||||||
1769 | } | ||||||||
1770 | |||||||||
1771 | template <typename Opnd0> | ||||||||
1772 | inline typename m_Intrinsic_Ty<Opnd0>::Ty m_FAbs(const Opnd0 &Op0) { | ||||||||
1773 | return m_Intrinsic<Intrinsic::fabs>(Op0); | ||||||||
1774 | } | ||||||||
1775 | |||||||||
1776 | template <typename Opnd0> | ||||||||
1777 | inline typename m_Intrinsic_Ty<Opnd0>::Ty m_FCanonicalize(const Opnd0 &Op0) { | ||||||||
1778 | return m_Intrinsic<Intrinsic::canonicalize>(Op0); | ||||||||
1779 | } | ||||||||
1780 | |||||||||
1781 | template <typename Opnd0, typename Opnd1> | ||||||||
1782 | inline typename m_Intrinsic_Ty<Opnd0, Opnd1>::Ty m_FMin(const Opnd0 &Op0, | ||||||||
1783 | const Opnd1 &Op1) { | ||||||||
1784 | return m_Intrinsic<Intrinsic::minnum>(Op0, Op1); | ||||||||
1785 | } | ||||||||
1786 | |||||||||
1787 | template <typename Opnd0, typename Opnd1> | ||||||||
1788 | inline typename m_Intrinsic_Ty<Opnd0, Opnd1>::Ty m_FMax(const Opnd0 &Op0, | ||||||||
1789 | const Opnd1 &Op1) { | ||||||||
1790 | return m_Intrinsic<Intrinsic::maxnum>(Op0, Op1); | ||||||||
1791 | } | ||||||||
1792 | |||||||||
1793 | //===----------------------------------------------------------------------===// | ||||||||
1794 | // Matchers for two-operands operators with the operators in either order | ||||||||
1795 | // | ||||||||
1796 | |||||||||
1797 | /// Matches a BinaryOperator with LHS and RHS in either order. | ||||||||
1798 | template <typename LHS, typename RHS> | ||||||||
1799 | inline AnyBinaryOp_match<LHS, RHS, true> m_c_BinOp(const LHS &L, const RHS &R) { | ||||||||
1800 | return AnyBinaryOp_match<LHS, RHS, true>(L, R); | ||||||||
1801 | } | ||||||||
1802 | |||||||||
1803 | /// Matches an ICmp with a predicate over LHS and RHS in either order. | ||||||||
1804 | /// Does not swap the predicate. | ||||||||
1805 | template <typename LHS, typename RHS> | ||||||||
1806 | inline CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate, true> | ||||||||
1807 | m_c_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) { | ||||||||
1808 | return CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate, true>(Pred, L, | ||||||||
1809 | R); | ||||||||
1810 | } | ||||||||
1811 | |||||||||
1812 | /// Matches a Add with LHS and RHS in either order. | ||||||||
1813 | template <typename LHS, typename RHS> | ||||||||
1814 | inline BinaryOp_match<LHS, RHS, Instruction::Add, true> m_c_Add(const LHS &L, | ||||||||
1815 | const RHS &R) { | ||||||||
1816 | return BinaryOp_match<LHS, RHS, Instruction::Add, true>(L, R); | ||||||||
1817 | } | ||||||||
1818 | |||||||||
1819 | /// Matches a Mul with LHS and RHS in either order. | ||||||||
1820 | template <typename LHS, typename RHS> | ||||||||
1821 | inline BinaryOp_match<LHS, RHS, Instruction::Mul, true> m_c_Mul(const LHS &L, | ||||||||
1822 | const RHS &R) { | ||||||||
1823 | return BinaryOp_match<LHS, RHS, Instruction::Mul, true>(L, R); | ||||||||
1824 | } | ||||||||
1825 | |||||||||
1826 | /// Matches an And with LHS and RHS in either order. | ||||||||
1827 | template <typename LHS, typename RHS> | ||||||||
1828 | inline BinaryOp_match<LHS, RHS, Instruction::And, true> m_c_And(const LHS &L, | ||||||||
1829 | const RHS &R) { | ||||||||
1830 | return BinaryOp_match<LHS, RHS, Instruction::And, true>(L, R); | ||||||||
1831 | } | ||||||||
1832 | |||||||||
1833 | /// Matches an Or with LHS and RHS in either order. | ||||||||
1834 | template <typename LHS, typename RHS> | ||||||||
1835 | inline BinaryOp_match<LHS, RHS, Instruction::Or, true> m_c_Or(const LHS &L, | ||||||||
1836 | const RHS &R) { | ||||||||
1837 | return BinaryOp_match<LHS, RHS, Instruction::Or, true>(L, R); | ||||||||
1838 | } | ||||||||
1839 | |||||||||
1840 | /// Matches an Xor with LHS and RHS in either order. | ||||||||
1841 | template <typename LHS, typename RHS> | ||||||||
1842 | inline BinaryOp_match<LHS, RHS, Instruction::Xor, true> m_c_Xor(const LHS &L, | ||||||||
1843 | const RHS &R) { | ||||||||
1844 | return BinaryOp_match<LHS, RHS, Instruction::Xor, true>(L, R); | ||||||||
1845 | } | ||||||||
1846 | |||||||||
1847 | /// Matches a 'Neg' as 'sub 0, V'. | ||||||||
1848 | template <typename ValTy> | ||||||||
1849 | inline BinaryOp_match<cst_pred_ty<is_zero_int>, ValTy, Instruction::Sub> | ||||||||
1850 | m_Neg(const ValTy &V) { | ||||||||
1851 | return m_Sub(m_ZeroInt(), V); | ||||||||
1852 | } | ||||||||
1853 | |||||||||
1854 | /// Matches a 'Not' as 'xor V, -1' or 'xor -1, V'. | ||||||||
1855 | template <typename ValTy> | ||||||||
1856 | inline BinaryOp_match<ValTy, cst_pred_ty<is_all_ones>, Instruction::Xor, true> | ||||||||
1857 | m_Not(const ValTy &V) { | ||||||||
1858 | return m_c_Xor(V, m_AllOnes()); | ||||||||
1859 | } | ||||||||
1860 | |||||||||
1861 | /// Matches an SMin with LHS and RHS in either order. | ||||||||
1862 | template <typename LHS, typename RHS> | ||||||||
1863 | inline MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty, true> | ||||||||
1864 | m_c_SMin(const LHS &L, const RHS &R) { | ||||||||
1865 | return MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty, true>(L, R); | ||||||||
1866 | } | ||||||||
1867 | /// Matches an SMax with LHS and RHS in either order. | ||||||||
1868 | template <typename LHS, typename RHS> | ||||||||
1869 | inline MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty, true> | ||||||||
1870 | m_c_SMax(const LHS &L, const RHS &R) { | ||||||||
1871 | return MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty, true>(L, R); | ||||||||
1872 | } | ||||||||
1873 | /// Matches a UMin with LHS and RHS in either order. | ||||||||
1874 | template <typename LHS, typename RHS> | ||||||||
1875 | inline MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty, true> | ||||||||
1876 | m_c_UMin(const LHS &L, const RHS &R) { | ||||||||
1877 | return MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty, true>(L, R); | ||||||||
1878 | } | ||||||||
1879 | /// Matches a UMax with LHS and RHS in either order. | ||||||||
1880 | template <typename LHS, typename RHS> | ||||||||
1881 | inline MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty, true> | ||||||||
1882 | m_c_UMax(const LHS &L, const RHS &R) { | ||||||||
1883 | return MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty, true>(L, R); | ||||||||
1884 | } | ||||||||
1885 | |||||||||
1886 | /// Matches FAdd with LHS and RHS in either order. | ||||||||
1887 | template <typename LHS, typename RHS> | ||||||||
1888 | inline BinaryOp_match<LHS, RHS, Instruction::FAdd, true> | ||||||||
1889 | m_c_FAdd(const LHS &L, const RHS &R) { | ||||||||
1890 | return BinaryOp_match<LHS, RHS, Instruction::FAdd, true>(L, R); | ||||||||
1891 | } | ||||||||
1892 | |||||||||
1893 | /// Matches FMul with LHS and RHS in either order. | ||||||||
1894 | template <typename LHS, typename RHS> | ||||||||
1895 | inline BinaryOp_match<LHS, RHS, Instruction::FMul, true> | ||||||||
1896 | m_c_FMul(const LHS &L, const RHS &R) { | ||||||||
1897 | return BinaryOp_match<LHS, RHS, Instruction::FMul, true>(L, R); | ||||||||
1898 | } | ||||||||
1899 | |||||||||
1900 | template <typename Opnd_t> struct Signum_match { | ||||||||
1901 | Opnd_t Val; | ||||||||
1902 | Signum_match(const Opnd_t &V) : Val(V) {} | ||||||||
1903 | |||||||||
1904 | template <typename OpTy> bool match(OpTy *V) { | ||||||||
1905 | unsigned TypeSize = V->getType()->getScalarSizeInBits(); | ||||||||
1906 | if (TypeSize == 0) | ||||||||
1907 | return false; | ||||||||
1908 | |||||||||
1909 | unsigned ShiftWidth = TypeSize - 1; | ||||||||
1910 | Value *OpL = nullptr, *OpR = nullptr; | ||||||||
1911 | |||||||||
1912 | // This is the representation of signum we match: | ||||||||
1913 | // | ||||||||
1914 | // signum(x) == (x >> 63) | (-x >>u 63) | ||||||||
1915 | // | ||||||||
1916 | // An i1 value is its own signum, so it's correct to match | ||||||||
1917 | // | ||||||||
1918 | // signum(x) == (x >> 0) | (-x >>u 0) | ||||||||
1919 | // | ||||||||
1920 | // for i1 values. | ||||||||
1921 | |||||||||
1922 | auto LHS = m_AShr(m_Value(OpL), m_SpecificInt(ShiftWidth)); | ||||||||
1923 | auto RHS = m_LShr(m_Neg(m_Value(OpR)), m_SpecificInt(ShiftWidth)); | ||||||||
1924 | auto Signum = m_Or(LHS, RHS); | ||||||||
1925 | |||||||||
1926 | return Signum.match(V) && OpL == OpR && Val.match(OpL); | ||||||||
1927 | } | ||||||||
1928 | }; | ||||||||
1929 | |||||||||
1930 | /// Matches a signum pattern. | ||||||||
1931 | /// | ||||||||
1932 | /// signum(x) = | ||||||||
1933 | /// x > 0 -> 1 | ||||||||
1934 | /// x == 0 -> 0 | ||||||||
1935 | /// x < 0 -> -1 | ||||||||
1936 | template <typename Val_t> inline Signum_match<Val_t> m_Signum(const Val_t &V) { | ||||||||
1937 | return Signum_match<Val_t>(V); | ||||||||
1938 | } | ||||||||
1939 | |||||||||
1940 | } // end namespace PatternMatch | ||||||||
1941 | } // end namespace llvm | ||||||||
1942 | |||||||||
1943 | #endif // LLVM_IR_PATTERNMATCH_H |
1 | //===- llvm/Analysis/AliasAnalysis.h - Alias Analysis Interface -*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file defines the generic AliasAnalysis interface, which is used as the |
10 | // common interface used by all clients of alias analysis information, and |
11 | // implemented by all alias analysis implementations. Mod/Ref information is |
12 | // also captured by this interface. |
13 | // |
14 | // Implementations of this interface must implement the various virtual methods, |
15 | // which automatically provides functionality for the entire suite of client |
16 | // APIs. |
17 | // |
18 | // This API identifies memory regions with the MemoryLocation class. The pointer |
19 | // component specifies the base memory address of the region. The Size specifies |
20 | // the maximum size (in address units) of the memory region, or |
21 | // MemoryLocation::UnknownSize if the size is not known. The TBAA tag |
22 | // identifies the "type" of the memory reference; see the |
23 | // TypeBasedAliasAnalysis class for details. |
24 | // |
25 | // Some non-obvious details include: |
26 | // - Pointers that point to two completely different objects in memory never |
27 | // alias, regardless of the value of the Size component. |
28 | // - NoAlias doesn't imply inequal pointers. The most obvious example of this |
29 | // is two pointers to constant memory. Even if they are equal, constant |
30 | // memory is never stored to, so there will never be any dependencies. |
31 | // In this and other situations, the pointers may be both NoAlias and |
32 | // MustAlias at the same time. The current API can only return one result, |
33 | // though this is rarely a problem in practice. |
34 | // |
35 | //===----------------------------------------------------------------------===// |
36 | |
37 | #ifndef LLVM_ANALYSIS_ALIASANALYSIS_H |
38 | #define LLVM_ANALYSIS_ALIASANALYSIS_H |
39 | |
40 | #include "llvm/ADT/DenseMap.h" |
41 | #include "llvm/ADT/None.h" |
42 | #include "llvm/ADT/Optional.h" |
43 | #include "llvm/ADT/SmallVector.h" |
44 | #include "llvm/Analysis/MemoryLocation.h" |
45 | #include "llvm/Analysis/TargetLibraryInfo.h" |
46 | #include "llvm/IR/Function.h" |
47 | #include "llvm/IR/Instruction.h" |
48 | #include "llvm/IR/Instructions.h" |
49 | #include "llvm/IR/PassManager.h" |
50 | #include "llvm/Pass.h" |
51 | #include <cstdint> |
52 | #include <functional> |
53 | #include <memory> |
54 | #include <vector> |
55 | |
56 | namespace llvm { |
57 | |
58 | class AnalysisUsage; |
59 | class BasicAAResult; |
60 | class BasicBlock; |
61 | class DominatorTree; |
62 | class OrderedBasicBlock; |
63 | class Value; |
64 | |
65 | /// The possible results of an alias query. |
66 | /// |
67 | /// These results are always computed between two MemoryLocation objects as |
68 | /// a query to some alias analysis. |
69 | /// |
70 | /// Note that these are unscoped enumerations because we would like to support |
71 | /// implicitly testing a result for the existence of any possible aliasing with |
72 | /// a conversion to bool, but an "enum class" doesn't support this. The |
73 | /// canonical names from the literature are suffixed and unique anyways, and so |
74 | /// they serve as global constants in LLVM for these results. |
75 | /// |
76 | /// See docs/AliasAnalysis.html for more information on the specific meanings |
77 | /// of these values. |
78 | enum AliasResult : uint8_t { |
79 | /// The two locations do not alias at all. |
80 | /// |
81 | /// This value is arranged to convert to false, while all other values |
82 | /// convert to true. This allows a boolean context to convert the result to |
83 | /// a binary flag indicating whether there is the possibility of aliasing. |
84 | NoAlias = 0, |
85 | /// The two locations may or may not alias. This is the least precise result. |
86 | MayAlias, |
87 | /// The two locations alias, but only due to a partial overlap. |
88 | PartialAlias, |
89 | /// The two locations precisely alias each other. |
90 | MustAlias, |
91 | }; |
92 | |
93 | /// << operator for AliasResult. |
94 | raw_ostream &operator<<(raw_ostream &OS, AliasResult AR); |
95 | |
96 | /// Flags indicating whether a memory access modifies or references memory. |
97 | /// |
98 | /// This is no access at all, a modification, a reference, or both |
99 | /// a modification and a reference. These are specifically structured such that |
100 | /// they form a three bit matrix and bit-tests for 'mod' or 'ref' or 'must' |
101 | /// work with any of the possible values. |
102 | enum class ModRefInfo : uint8_t { |
103 | /// Must is provided for completeness, but no routines will return only |
104 | /// Must today. See definition of Must below. |
105 | Must = 0, |
106 | /// The access may reference the value stored in memory, |
107 | /// a mustAlias relation was found, and no mayAlias or partialAlias found. |
108 | MustRef = 1, |
109 | /// The access may modify the value stored in memory, |
110 | /// a mustAlias relation was found, and no mayAlias or partialAlias found. |
111 | MustMod = 2, |
112 | /// The access may reference, modify or both the value stored in memory, |
113 | /// a mustAlias relation was found, and no mayAlias or partialAlias found. |
114 | MustModRef = MustRef | MustMod, |
115 | /// The access neither references nor modifies the value stored in memory. |
116 | NoModRef = 4, |
117 | /// The access may reference the value stored in memory. |
118 | Ref = NoModRef | MustRef, |
119 | /// The access may modify the value stored in memory. |
120 | Mod = NoModRef | MustMod, |
121 | /// The access may reference and may modify the value stored in memory. |
122 | ModRef = Ref | Mod, |
123 | |
124 | /// About Must: |
125 | /// Must is set in a best effort manner. |
126 | /// We usually do not try our best to infer Must, instead it is merely |
127 | /// another piece of "free" information that is presented when available. |
128 | /// Must set means there was certainly a MustAlias found. For calls, |
129 | /// where multiple arguments are checked (argmemonly), this translates to |
130 | /// only MustAlias or NoAlias was found. |
131 | /// Must is not set for RAR accesses, even if the two locations must |
132 | /// alias. The reason is that two read accesses translate to an early return |
133 | /// of NoModRef. An additional alias check to set Must may be |
134 | /// expensive. Other cases may also not set Must(e.g. callCapturesBefore). |
135 | /// We refer to Must being *set* when the most significant bit is *cleared*. |
136 | /// Conversely we *clear* Must information by *setting* the Must bit to 1. |
137 | }; |
138 | |
139 | LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isNoModRef(const ModRefInfo MRI) { |
140 | return (static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustModRef)) == |
141 | static_cast<int>(ModRefInfo::Must); |
142 | } |
143 | LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isModOrRefSet(const ModRefInfo MRI) { |
144 | return static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustModRef); |
145 | } |
146 | LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isModAndRefSet(const ModRefInfo MRI) { |
147 | return (static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustModRef)) == |
148 | static_cast<int>(ModRefInfo::MustModRef); |
149 | } |
150 | LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isModSet(const ModRefInfo MRI) { |
151 | return static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustMod); |
152 | } |
153 | LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isRefSet(const ModRefInfo MRI) { |
154 | return static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustRef); |
155 | } |
156 | LLVM_NODISCARD[[clang::warn_unused_result]] inline bool isMustSet(const ModRefInfo MRI) { |
157 | return !(static_cast<int>(MRI) & static_cast<int>(ModRefInfo::NoModRef)); |
158 | } |
159 | |
160 | LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo setMod(const ModRefInfo MRI) { |
161 | return ModRefInfo(static_cast<int>(MRI) | |
162 | static_cast<int>(ModRefInfo::MustMod)); |
163 | } |
164 | LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo setRef(const ModRefInfo MRI) { |
165 | return ModRefInfo(static_cast<int>(MRI) | |
166 | static_cast<int>(ModRefInfo::MustRef)); |
167 | } |
168 | LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo setMust(const ModRefInfo MRI) { |
169 | return ModRefInfo(static_cast<int>(MRI) & |
170 | static_cast<int>(ModRefInfo::MustModRef)); |
171 | } |
172 | LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo setModAndRef(const ModRefInfo MRI) { |
173 | return ModRefInfo(static_cast<int>(MRI) | |
174 | static_cast<int>(ModRefInfo::MustModRef)); |
175 | } |
176 | LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo clearMod(const ModRefInfo MRI) { |
177 | return ModRefInfo(static_cast<int>(MRI) & static_cast<int>(ModRefInfo::Ref)); |
178 | } |
179 | LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo clearRef(const ModRefInfo MRI) { |
180 | return ModRefInfo(static_cast<int>(MRI) & static_cast<int>(ModRefInfo::Mod)); |
181 | } |
182 | LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo clearMust(const ModRefInfo MRI) { |
183 | return ModRefInfo(static_cast<int>(MRI) | |
184 | static_cast<int>(ModRefInfo::NoModRef)); |
185 | } |
186 | LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo unionModRef(const ModRefInfo MRI1, |
187 | const ModRefInfo MRI2) { |
188 | return ModRefInfo(static_cast<int>(MRI1) | static_cast<int>(MRI2)); |
189 | } |
190 | LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo intersectModRef(const ModRefInfo MRI1, |
191 | const ModRefInfo MRI2) { |
192 | return ModRefInfo(static_cast<int>(MRI1) & static_cast<int>(MRI2)); |
193 | } |
194 | |
195 | /// The locations at which a function might access memory. |
196 | /// |
197 | /// These are primarily used in conjunction with the \c AccessKind bits to |
198 | /// describe both the nature of access and the locations of access for a |
199 | /// function call. |
200 | enum FunctionModRefLocation { |
201 | /// Base case is no access to memory. |
202 | FMRL_Nowhere = 0, |
203 | /// Access to memory via argument pointers. |
204 | FMRL_ArgumentPointees = 8, |
205 | /// Memory that is inaccessible via LLVM IR. |
206 | FMRL_InaccessibleMem = 16, |
207 | /// Access to any memory. |
208 | FMRL_Anywhere = 32 | FMRL_InaccessibleMem | FMRL_ArgumentPointees |
209 | }; |
210 | |
211 | /// Summary of how a function affects memory in the program. |
212 | /// |
213 | /// Loads from constant globals are not considered memory accesses for this |
214 | /// interface. Also, functions may freely modify stack space local to their |
215 | /// invocation without having to report it through these interfaces. |
216 | enum FunctionModRefBehavior { |
217 | /// This function does not perform any non-local loads or stores to memory. |
218 | /// |
219 | /// This property corresponds to the GCC 'const' attribute. |
220 | /// This property corresponds to the LLVM IR 'readnone' attribute. |
221 | /// This property corresponds to the IntrNoMem LLVM intrinsic flag. |
222 | FMRB_DoesNotAccessMemory = |
223 | FMRL_Nowhere | static_cast<int>(ModRefInfo::NoModRef), |
224 | |
225 | /// The only memory references in this function (if it has any) are |
226 | /// non-volatile loads from objects pointed to by its pointer-typed |
227 | /// arguments, with arbitrary offsets. |
228 | /// |
229 | /// This property corresponds to the IntrReadArgMem LLVM intrinsic flag. |
230 | FMRB_OnlyReadsArgumentPointees = |
231 | FMRL_ArgumentPointees | static_cast<int>(ModRefInfo::Ref), |
232 | |
233 | /// The only memory references in this function (if it has any) are |
234 | /// non-volatile loads and stores from objects pointed to by its |
235 | /// pointer-typed arguments, with arbitrary offsets. |
236 | /// |
237 | /// This property corresponds to the IntrArgMemOnly LLVM intrinsic flag. |
238 | FMRB_OnlyAccessesArgumentPointees = |
239 | FMRL_ArgumentPointees | static_cast<int>(ModRefInfo::ModRef), |
240 | |
241 | /// The only memory references in this function (if it has any) are |
242 | /// references of memory that is otherwise inaccessible via LLVM IR. |
243 | /// |
244 | /// This property corresponds to the LLVM IR inaccessiblememonly attribute. |
245 | FMRB_OnlyAccessesInaccessibleMem = |
246 | FMRL_InaccessibleMem | static_cast<int>(ModRefInfo::ModRef), |
247 | |
248 | /// The function may perform non-volatile loads and stores of objects |
249 | /// pointed to by its pointer-typed arguments, with arbitrary offsets, and |
250 | /// it may also perform loads and stores of memory that is otherwise |
251 | /// inaccessible via LLVM IR. |
252 | /// |
253 | /// This property corresponds to the LLVM IR |
254 | /// inaccessiblemem_or_argmemonly attribute. |
255 | FMRB_OnlyAccessesInaccessibleOrArgMem = FMRL_InaccessibleMem | |
256 | FMRL_ArgumentPointees | |
257 | static_cast<int>(ModRefInfo::ModRef), |
258 | |
259 | /// This function does not perform any non-local stores or volatile loads, |
260 | /// but may read from any memory location. |
261 | /// |
262 | /// This property corresponds to the GCC 'pure' attribute. |
263 | /// This property corresponds to the LLVM IR 'readonly' attribute. |
264 | /// This property corresponds to the IntrReadMem LLVM intrinsic flag. |
265 | FMRB_OnlyReadsMemory = FMRL_Anywhere | static_cast<int>(ModRefInfo::Ref), |
266 | |
267 | // This function does not read from memory anywhere, but may write to any |
268 | // memory location. |
269 | // |
270 | // This property corresponds to the LLVM IR 'writeonly' attribute. |
271 | // This property corresponds to the IntrWriteMem LLVM intrinsic flag. |
272 | FMRB_DoesNotReadMemory = FMRL_Anywhere | static_cast<int>(ModRefInfo::Mod), |
273 | |
274 | /// This indicates that the function could not be classified into one of the |
275 | /// behaviors above. |
276 | FMRB_UnknownModRefBehavior = |
277 | FMRL_Anywhere | static_cast<int>(ModRefInfo::ModRef) |
278 | }; |
279 | |
280 | // Wrapper method strips bits significant only in FunctionModRefBehavior, |
281 | // to obtain a valid ModRefInfo. The benefit of using the wrapper is that if |
282 | // ModRefInfo enum changes, the wrapper can be updated to & with the new enum |
283 | // entry with all bits set to 1. |
284 | LLVM_NODISCARD[[clang::warn_unused_result]] inline ModRefInfo |
285 | createModRefInfo(const FunctionModRefBehavior FMRB) { |
286 | return ModRefInfo(FMRB & static_cast<int>(ModRefInfo::ModRef)); |
287 | } |
288 | |
289 | /// This class stores info we want to provide to or retain within an alias |
290 | /// query. By default, the root query is stateless and starts with a freshly |
291 | /// constructed info object. Specific alias analyses can use this query info to |
292 | /// store per-query state that is important for recursive or nested queries to |
293 | /// avoid recomputing. To enable preserving this state across multiple queries |
294 | /// where safe (due to the IR not changing), use a `BatchAAResults` wrapper. |
295 | /// The information stored in an `AAQueryInfo` is currently limitted to the |
296 | /// caches used by BasicAA, but can further be extended to fit other AA needs. |
297 | class AAQueryInfo { |
298 | public: |
299 | using LocPair = std::pair<MemoryLocation, MemoryLocation>; |
300 | using AliasCacheT = SmallDenseMap<LocPair, AliasResult, 8>; |
301 | AliasCacheT AliasCache; |
302 | |
303 | using IsCapturedCacheT = SmallDenseMap<const Value *, bool, 8>; |
304 | IsCapturedCacheT IsCapturedCache; |
305 | |
306 | AAQueryInfo() : AliasCache(), IsCapturedCache() {} |
307 | }; |
308 | |
309 | class BatchAAResults; |
310 | |
311 | class AAResults { |
312 | public: |
313 | // Make these results default constructable and movable. We have to spell |
314 | // these out because MSVC won't synthesize them. |
315 | AAResults(const TargetLibraryInfo &TLI) : TLI(TLI) {} |
316 | AAResults(AAResults &&Arg); |
317 | ~AAResults(); |
318 | |
319 | /// Register a specific AA result. |
320 | template <typename AAResultT> void addAAResult(AAResultT &AAResult) { |
321 | // FIXME: We should use a much lighter weight system than the usual |
322 | // polymorphic pattern because we don't own AAResult. It should |
323 | // ideally involve two pointers and no separate allocation. |
324 | AAs.emplace_back(new Model<AAResultT>(AAResult, *this)); |
325 | } |
326 | |
327 | /// Register a function analysis ID that the results aggregation depends on. |
328 | /// |
329 | /// This is used in the new pass manager to implement the invalidation logic |
330 | /// where we must invalidate the results aggregation if any of our component |
331 | /// analyses become invalid. |
332 | void addAADependencyID(AnalysisKey *ID) { AADeps.push_back(ID); } |
333 | |
334 | /// Handle invalidation events in the new pass manager. |
335 | /// |
336 | /// The aggregation is invalidated if any of the underlying analyses is |
337 | /// invalidated. |
338 | bool invalidate(Function &F, const PreservedAnalyses &PA, |
339 | FunctionAnalysisManager::Invalidator &Inv); |
340 | |
341 | //===--------------------------------------------------------------------===// |
342 | /// \name Alias Queries |
343 | /// @{ |
344 | |
345 | /// The main low level interface to the alias analysis implementation. |
346 | /// Returns an AliasResult indicating whether the two pointers are aliased to |
347 | /// each other. This is the interface that must be implemented by specific |
348 | /// alias analysis implementations. |
349 | AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB); |
350 | |
351 | /// A convenience wrapper around the primary \c alias interface. |
352 | AliasResult alias(const Value *V1, LocationSize V1Size, const Value *V2, |
353 | LocationSize V2Size) { |
354 | return alias(MemoryLocation(V1, V1Size), MemoryLocation(V2, V2Size)); |
355 | } |
356 | |
357 | /// A convenience wrapper around the primary \c alias interface. |
358 | AliasResult alias(const Value *V1, const Value *V2) { |
359 | return alias(V1, LocationSize::unknown(), V2, LocationSize::unknown()); |
360 | } |
361 | |
362 | /// A trivial helper function to check to see if the specified pointers are |
363 | /// no-alias. |
364 | bool isNoAlias(const MemoryLocation &LocA, const MemoryLocation &LocB) { |
365 | return alias(LocA, LocB) == NoAlias; |
366 | } |
367 | |
368 | /// A convenience wrapper around the \c isNoAlias helper interface. |
369 | bool isNoAlias(const Value *V1, LocationSize V1Size, const Value *V2, |
370 | LocationSize V2Size) { |
371 | return isNoAlias(MemoryLocation(V1, V1Size), MemoryLocation(V2, V2Size)); |
372 | } |
373 | |
374 | /// A convenience wrapper around the \c isNoAlias helper interface. |
375 | bool isNoAlias(const Value *V1, const Value *V2) { |
376 | return isNoAlias(MemoryLocation(V1), MemoryLocation(V2)); |
377 | } |
378 | |
379 | /// A trivial helper function to check to see if the specified pointers are |
380 | /// must-alias. |
381 | bool isMustAlias(const MemoryLocation &LocA, const MemoryLocation &LocB) { |
382 | return alias(LocA, LocB) == MustAlias; |
383 | } |
384 | |
385 | /// A convenience wrapper around the \c isMustAlias helper interface. |
386 | bool isMustAlias(const Value *V1, const Value *V2) { |
387 | return alias(V1, LocationSize::precise(1), V2, LocationSize::precise(1)) == |
388 | MustAlias; |
389 | } |
390 | |
391 | /// Checks whether the given location points to constant memory, or if |
392 | /// \p OrLocal is true whether it points to a local alloca. |
393 | bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal = false); |
394 | |
395 | /// A convenience wrapper around the primary \c pointsToConstantMemory |
396 | /// interface. |
397 | bool pointsToConstantMemory(const Value *P, bool OrLocal = false) { |
398 | return pointsToConstantMemory(MemoryLocation(P), OrLocal); |
399 | } |
400 | |
401 | /// @} |
402 | //===--------------------------------------------------------------------===// |
403 | /// \name Simple mod/ref information |
404 | /// @{ |
405 | |
406 | /// Get the ModRef info associated with a pointer argument of a call. The |
407 | /// result's bits are set to indicate the allowed aliasing ModRef kinds. Note |
408 | /// that these bits do not necessarily account for the overall behavior of |
409 | /// the function, but rather only provide additional per-argument |
410 | /// information. This never sets ModRefInfo::Must. |
411 | ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx); |
412 | |
413 | /// Return the behavior of the given call site. |
414 | FunctionModRefBehavior getModRefBehavior(const CallBase *Call); |
415 | |
416 | /// Return the behavior when calling the given function. |
417 | FunctionModRefBehavior getModRefBehavior(const Function *F); |
418 | |
419 | /// Checks if the specified call is known to never read or write memory. |
420 | /// |
421 | /// Note that if the call only reads from known-constant memory, it is also |
422 | /// legal to return true. Also, calls that unwind the stack are legal for |
423 | /// this predicate. |
424 | /// |
425 | /// Many optimizations (such as CSE and LICM) can be performed on such calls |
426 | /// without worrying about aliasing properties, and many calls have this |
427 | /// property (e.g. calls to 'sin' and 'cos'). |
428 | /// |
429 | /// This property corresponds to the GCC 'const' attribute. |
430 | bool doesNotAccessMemory(const CallBase *Call) { |
431 | return getModRefBehavior(Call) == FMRB_DoesNotAccessMemory; |
432 | } |
433 | |
434 | /// Checks if the specified function is known to never read or write memory. |
435 | /// |
436 | /// Note that if the function only reads from known-constant memory, it is |
437 | /// also legal to return true. Also, function that unwind the stack are legal |
438 | /// for this predicate. |
439 | /// |
440 | /// Many optimizations (such as CSE and LICM) can be performed on such calls |
441 | /// to such functions without worrying about aliasing properties, and many |
442 | /// functions have this property (e.g. 'sin' and 'cos'). |
443 | /// |
444 | /// This property corresponds to the GCC 'const' attribute. |
445 | bool doesNotAccessMemory(const Function *F) { |
446 | return getModRefBehavior(F) == FMRB_DoesNotAccessMemory; |
447 | } |
448 | |
449 | /// Checks if the specified call is known to only read from non-volatile |
450 | /// memory (or not access memory at all). |
451 | /// |
452 | /// Calls that unwind the stack are legal for this predicate. |
453 | /// |
454 | /// This property allows many common optimizations to be performed in the |
455 | /// absence of interfering store instructions, such as CSE of strlen calls. |
456 | /// |
457 | /// This property corresponds to the GCC 'pure' attribute. |
458 | bool onlyReadsMemory(const CallBase *Call) { |
459 | return onlyReadsMemory(getModRefBehavior(Call)); |
460 | } |
461 | |
462 | /// Checks if the specified function is known to only read from non-volatile |
463 | /// memory (or not access memory at all). |
464 | /// |
465 | /// Functions that unwind the stack are legal for this predicate. |
466 | /// |
467 | /// This property allows many common optimizations to be performed in the |
468 | /// absence of interfering store instructions, such as CSE of strlen calls. |
469 | /// |
470 | /// This property corresponds to the GCC 'pure' attribute. |
471 | bool onlyReadsMemory(const Function *F) { |
472 | return onlyReadsMemory(getModRefBehavior(F)); |
473 | } |
474 | |
475 | /// Checks if functions with the specified behavior are known to only read |
476 | /// from non-volatile memory (or not access memory at all). |
477 | static bool onlyReadsMemory(FunctionModRefBehavior MRB) { |
478 | return !isModSet(createModRefInfo(MRB)); |
479 | } |
480 | |
481 | /// Checks if functions with the specified behavior are known to only write |
482 | /// memory (or not access memory at all). |
483 | static bool doesNotReadMemory(FunctionModRefBehavior MRB) { |
484 | return !isRefSet(createModRefInfo(MRB)); |
485 | } |
486 | |
487 | /// Checks if functions with the specified behavior are known to read and |
488 | /// write at most from objects pointed to by their pointer-typed arguments |
489 | /// (with arbitrary offsets). |
490 | static bool onlyAccessesArgPointees(FunctionModRefBehavior MRB) { |
491 | return !(MRB & FMRL_Anywhere & ~FMRL_ArgumentPointees); |
492 | } |
493 | |
494 | /// Checks if functions with the specified behavior are known to potentially |
495 | /// read or write from objects pointed to be their pointer-typed arguments |
496 | /// (with arbitrary offsets). |
497 | static bool doesAccessArgPointees(FunctionModRefBehavior MRB) { |
498 | return isModOrRefSet(createModRefInfo(MRB)) && |
499 | (MRB & FMRL_ArgumentPointees); |
500 | } |
501 | |
502 | /// Checks if functions with the specified behavior are known to read and |
503 | /// write at most from memory that is inaccessible from LLVM IR. |
504 | static bool onlyAccessesInaccessibleMem(FunctionModRefBehavior MRB) { |
505 | return !(MRB & FMRL_Anywhere & ~FMRL_InaccessibleMem); |
506 | } |
507 | |
508 | /// Checks if functions with the specified behavior are known to potentially |
509 | /// read or write from memory that is inaccessible from LLVM IR. |
510 | static bool doesAccessInaccessibleMem(FunctionModRefBehavior MRB) { |
511 | return isModOrRefSet(createModRefInfo(MRB)) && (MRB & FMRL_InaccessibleMem); |
512 | } |
513 | |
514 | /// Checks if functions with the specified behavior are known to read and |
515 | /// write at most from memory that is inaccessible from LLVM IR or objects |
516 | /// pointed to by their pointer-typed arguments (with arbitrary offsets). |
517 | static bool onlyAccessesInaccessibleOrArgMem(FunctionModRefBehavior MRB) { |
518 | return !(MRB & FMRL_Anywhere & |
519 | ~(FMRL_InaccessibleMem | FMRL_ArgumentPointees)); |
520 | } |
521 | |
522 | /// getModRefInfo (for call sites) - Return information about whether |
523 | /// a particular call site modifies or reads the specified memory location. |
524 | ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc); |
525 | |
526 | /// getModRefInfo (for call sites) - A convenience wrapper. |
527 | ModRefInfo getModRefInfo(const CallBase *Call, const Value *P, |
528 | LocationSize Size) { |
529 | return getModRefInfo(Call, MemoryLocation(P, Size)); |
530 | } |
531 | |
532 | /// getModRefInfo (for loads) - Return information about whether |
533 | /// a particular load modifies or reads the specified memory location. |
534 | ModRefInfo getModRefInfo(const LoadInst *L, const MemoryLocation &Loc); |
535 | |
536 | /// getModRefInfo (for loads) - A convenience wrapper. |
537 | ModRefInfo getModRefInfo(const LoadInst *L, const Value *P, |
538 | LocationSize Size) { |
539 | return getModRefInfo(L, MemoryLocation(P, Size)); |
540 | } |
541 | |
542 | /// getModRefInfo (for stores) - Return information about whether |
543 | /// a particular store modifies or reads the specified memory location. |
544 | ModRefInfo getModRefInfo(const StoreInst *S, const MemoryLocation &Loc); |
545 | |
546 | /// getModRefInfo (for stores) - A convenience wrapper. |
547 | ModRefInfo getModRefInfo(const StoreInst *S, const Value *P, |
548 | LocationSize Size) { |
549 | return getModRefInfo(S, MemoryLocation(P, Size)); |
550 | } |
551 | |
552 | /// getModRefInfo (for fences) - Return information about whether |
553 | /// a particular store modifies or reads the specified memory location. |
554 | ModRefInfo getModRefInfo(const FenceInst *S, const MemoryLocation &Loc); |
555 | |
556 | /// getModRefInfo (for fences) - A convenience wrapper. |
557 | ModRefInfo getModRefInfo(const FenceInst *S, const Value *P, |
558 | LocationSize Size) { |
559 | return getModRefInfo(S, MemoryLocation(P, Size)); |
560 | } |
561 | |
562 | /// getModRefInfo (for cmpxchges) - Return information about whether |
563 | /// a particular cmpxchg modifies or reads the specified memory location. |
564 | ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX, |
565 | const MemoryLocation &Loc); |
566 | |
567 | /// getModRefInfo (for cmpxchges) - A convenience wrapper. |
568 | ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX, const Value *P, |
569 | LocationSize Size) { |
570 | return getModRefInfo(CX, MemoryLocation(P, Size)); |
571 | } |
572 | |
573 | /// getModRefInfo (for atomicrmws) - Return information about whether |
574 | /// a particular atomicrmw modifies or reads the specified memory location. |
575 | ModRefInfo getModRefInfo(const AtomicRMWInst *RMW, const MemoryLocation &Loc); |
576 | |
577 | /// getModRefInfo (for atomicrmws) - A convenience wrapper. |
578 | ModRefInfo getModRefInfo(const AtomicRMWInst *RMW, const Value *P, |
579 | LocationSize Size) { |
580 | return getModRefInfo(RMW, MemoryLocation(P, Size)); |
581 | } |
582 | |
583 | /// getModRefInfo (for va_args) - Return information about whether |
584 | /// a particular va_arg modifies or reads the specified memory location. |
585 | ModRefInfo getModRefInfo(const VAArgInst *I, const MemoryLocation &Loc); |
586 | |
587 | /// getModRefInfo (for va_args) - A convenience wrapper. |
588 | ModRefInfo getModRefInfo(const VAArgInst *I, const Value *P, |
589 | LocationSize Size) { |
590 | return getModRefInfo(I, MemoryLocation(P, Size)); |
591 | } |
592 | |
593 | /// getModRefInfo (for catchpads) - Return information about whether |
594 | /// a particular catchpad modifies or reads the specified memory location. |
595 | ModRefInfo getModRefInfo(const CatchPadInst *I, const MemoryLocation &Loc); |
596 | |
597 | /// getModRefInfo (for catchpads) - A convenience wrapper. |
598 | ModRefInfo getModRefInfo(const CatchPadInst *I, const Value *P, |
599 | LocationSize Size) { |
600 | return getModRefInfo(I, MemoryLocation(P, Size)); |
601 | } |
602 | |
603 | /// getModRefInfo (for catchrets) - Return information about whether |
604 | /// a particular catchret modifies or reads the specified memory location. |
605 | ModRefInfo getModRefInfo(const CatchReturnInst *I, const MemoryLocation &Loc); |
606 | |
607 | /// getModRefInfo (for catchrets) - A convenience wrapper. |
608 | ModRefInfo getModRefInfo(const CatchReturnInst *I, const Value *P, |
609 | LocationSize Size) { |
610 | return getModRefInfo(I, MemoryLocation(P, Size)); |
611 | } |
612 | |
613 | /// Check whether or not an instruction may read or write the optionally |
614 | /// specified memory location. |
615 | /// |
616 | /// |
617 | /// An instruction that doesn't read or write memory may be trivially LICM'd |
618 | /// for example. |
619 | /// |
620 | /// For function calls, this delegates to the alias-analysis specific |
621 | /// call-site mod-ref behavior queries. Otherwise it delegates to the specific |
622 | /// helpers above. |
623 | ModRefInfo getModRefInfo(const Instruction *I, |
624 | const Optional<MemoryLocation> &OptLoc) { |
625 | AAQueryInfo AAQIP; |
626 | return getModRefInfo(I, OptLoc, AAQIP); |
627 | } |
628 | |
629 | /// A convenience wrapper for constructing the memory location. |
630 | ModRefInfo getModRefInfo(const Instruction *I, const Value *P, |
631 | LocationSize Size) { |
632 | return getModRefInfo(I, MemoryLocation(P, Size)); |
633 | } |
634 | |
635 | /// Return information about whether a call and an instruction may refer to |
636 | /// the same memory locations. |
637 | ModRefInfo getModRefInfo(Instruction *I, const CallBase *Call); |
638 | |
639 | /// Return information about whether two call sites may refer to the same set |
640 | /// of memory locations. See the AA documentation for details: |
641 | /// http://llvm.org/docs/AliasAnalysis.html#ModRefInfo |
642 | ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2); |
643 | |
644 | /// Return information about whether a particular call site modifies |
645 | /// or reads the specified memory location \p MemLoc before instruction \p I |
646 | /// in a BasicBlock. An ordered basic block \p OBB can be used to speed up |
647 | /// instruction ordering queries inside the BasicBlock containing \p I. |
648 | /// Early exits in callCapturesBefore may lead to ModRefInfo::Must not being |
649 | /// set. |
650 | ModRefInfo callCapturesBefore(const Instruction *I, |
651 | const MemoryLocation &MemLoc, DominatorTree *DT, |
652 | OrderedBasicBlock *OBB = nullptr); |
653 | |
654 | /// A convenience wrapper to synthesize a memory location. |
655 | ModRefInfo callCapturesBefore(const Instruction *I, const Value *P, |
656 | LocationSize Size, DominatorTree *DT, |
657 | OrderedBasicBlock *OBB = nullptr) { |
658 | return callCapturesBefore(I, MemoryLocation(P, Size), DT, OBB); |
659 | } |
660 | |
661 | /// @} |
662 | //===--------------------------------------------------------------------===// |
663 | /// \name Higher level methods for querying mod/ref information. |
664 | /// @{ |
665 | |
666 | /// Check if it is possible for execution of the specified basic block to |
667 | /// modify the location Loc. |
668 | bool canBasicBlockModify(const BasicBlock &BB, const MemoryLocation &Loc); |
669 | |
670 | /// A convenience wrapper synthesizing a memory location. |
671 | bool canBasicBlockModify(const BasicBlock &BB, const Value *P, |
672 | LocationSize Size) { |
673 | return canBasicBlockModify(BB, MemoryLocation(P, Size)); |
674 | } |
675 | |
676 | /// Check if it is possible for the execution of the specified instructions |
677 | /// to mod\ref (according to the mode) the location Loc. |
678 | /// |
679 | /// The instructions to consider are all of the instructions in the range of |
680 | /// [I1,I2] INCLUSIVE. I1 and I2 must be in the same basic block. |
681 | bool canInstructionRangeModRef(const Instruction &I1, const Instruction &I2, |
682 | const MemoryLocation &Loc, |
683 | const ModRefInfo Mode); |
684 | |
685 | /// A convenience wrapper synthesizing a memory location. |
686 | bool canInstructionRangeModRef(const Instruction &I1, const Instruction &I2, |
687 | const Value *Ptr, LocationSize Size, |
688 | const ModRefInfo Mode) { |
689 | return canInstructionRangeModRef(I1, I2, MemoryLocation(Ptr, Size), Mode); |
690 | } |
691 | |
692 | private: |
693 | AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB, |
694 | AAQueryInfo &AAQI); |
695 | bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI, |
696 | bool OrLocal = false); |
697 | ModRefInfo getModRefInfo(Instruction *I, const CallBase *Call2, |
698 | AAQueryInfo &AAQIP); |
699 | ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, |
700 | AAQueryInfo &AAQI); |
701 | ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2, |
702 | AAQueryInfo &AAQI); |
703 | ModRefInfo getModRefInfo(const VAArgInst *V, const MemoryLocation &Loc, |
704 | AAQueryInfo &AAQI); |
705 | ModRefInfo getModRefInfo(const LoadInst *L, const MemoryLocation &Loc, |
706 | AAQueryInfo &AAQI); |
707 | ModRefInfo getModRefInfo(const StoreInst *S, const MemoryLocation &Loc, |
708 | AAQueryInfo &AAQI); |
709 | ModRefInfo getModRefInfo(const FenceInst *S, const MemoryLocation &Loc, |
710 | AAQueryInfo &AAQI); |
711 | ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX, |
712 | const MemoryLocation &Loc, AAQueryInfo &AAQI); |
713 | ModRefInfo getModRefInfo(const AtomicRMWInst *RMW, const MemoryLocation &Loc, |
714 | AAQueryInfo &AAQI); |
715 | ModRefInfo getModRefInfo(const CatchPadInst *I, const MemoryLocation &Loc, |
716 | AAQueryInfo &AAQI); |
717 | ModRefInfo getModRefInfo(const CatchReturnInst *I, const MemoryLocation &Loc, |
718 | AAQueryInfo &AAQI); |
719 | ModRefInfo getModRefInfo(const Instruction *I, |
720 | const Optional<MemoryLocation> &OptLoc, |
721 | AAQueryInfo &AAQIP) { |
722 | if (OptLoc == None) { |
723 | if (const auto *Call = dyn_cast<CallBase>(I)) { |
724 | return createModRefInfo(getModRefBehavior(Call)); |
725 | } |
726 | } |
727 | |
728 | const MemoryLocation &Loc = OptLoc.getValueOr(MemoryLocation()); |
729 | |
730 | switch (I->getOpcode()) { |
731 | case Instruction::VAArg: |
732 | return getModRefInfo((const VAArgInst *)I, Loc, AAQIP); |
733 | case Instruction::Load: |
734 | return getModRefInfo((const LoadInst *)I, Loc, AAQIP); |
735 | case Instruction::Store: |
736 | return getModRefInfo((const StoreInst *)I, Loc, AAQIP); |
737 | case Instruction::Fence: |
738 | return getModRefInfo((const FenceInst *)I, Loc, AAQIP); |
739 | case Instruction::AtomicCmpXchg: |
740 | return getModRefInfo((const AtomicCmpXchgInst *)I, Loc, AAQIP); |
741 | case Instruction::AtomicRMW: |
742 | return getModRefInfo((const AtomicRMWInst *)I, Loc, AAQIP); |
743 | case Instruction::Call: |
744 | return getModRefInfo((const CallInst *)I, Loc, AAQIP); |
745 | case Instruction::Invoke: |
746 | return getModRefInfo((const InvokeInst *)I, Loc, AAQIP); |
747 | case Instruction::CatchPad: |
748 | return getModRefInfo((const CatchPadInst *)I, Loc, AAQIP); |
749 | case Instruction::CatchRet: |
750 | return getModRefInfo((const CatchReturnInst *)I, Loc, AAQIP); |
751 | default: |
752 | return ModRefInfo::NoModRef; |
753 | } |
754 | } |
755 | |
756 | class Concept; |
757 | |
758 | template <typename T> class Model; |
759 | |
760 | template <typename T> friend class AAResultBase; |
761 | |
762 | const TargetLibraryInfo &TLI; |
763 | |
764 | std::vector<std::unique_ptr<Concept>> AAs; |
765 | |
766 | std::vector<AnalysisKey *> AADeps; |
767 | |
768 | friend class BatchAAResults; |
769 | }; |
770 | |
771 | /// This class is a wrapper over an AAResults, and it is intended to be used |
772 | /// only when there are no IR changes inbetween queries. BatchAAResults is |
773 | /// reusing the same `AAQueryInfo` to preserve the state across queries, |
774 | /// esentially making AA work in "batch mode". The internal state cannot be |
775 | /// cleared, so to go "out-of-batch-mode", the user must either use AAResults, |
776 | /// or create a new BatchAAResults. |
777 | class BatchAAResults { |
778 | AAResults &AA; |
779 | AAQueryInfo AAQI; |
780 | |
781 | public: |
782 | BatchAAResults(AAResults &AAR) : AA(AAR), AAQI() {} |
783 | AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB) { |
784 | return AA.alias(LocA, LocB, AAQI); |
785 | } |
786 | bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal = false) { |
787 | return AA.pointsToConstantMemory(Loc, AAQI, OrLocal); |
788 | } |
789 | ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc) { |
790 | return AA.getModRefInfo(Call, Loc, AAQI); |
791 | } |
792 | ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2) { |
793 | return AA.getModRefInfo(Call1, Call2, AAQI); |
794 | } |
795 | ModRefInfo getModRefInfo(const Instruction *I, |
796 | const Optional<MemoryLocation> &OptLoc) { |
797 | return AA.getModRefInfo(I, OptLoc, AAQI); |
798 | } |
799 | ModRefInfo getModRefInfo(Instruction *I, const CallBase *Call2) { |
800 | return AA.getModRefInfo(I, Call2, AAQI); |
801 | } |
802 | ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) { |
803 | return AA.getArgModRefInfo(Call, ArgIdx); |
804 | } |
805 | FunctionModRefBehavior getModRefBehavior(const CallBase *Call) { |
806 | return AA.getModRefBehavior(Call); |
807 | } |
808 | }; |
809 | |
810 | /// Temporary typedef for legacy code that uses a generic \c AliasAnalysis |
811 | /// pointer or reference. |
812 | using AliasAnalysis = AAResults; |
813 | |
814 | /// A private abstract base class describing the concept of an individual alias |
815 | /// analysis implementation. |
816 | /// |
817 | /// This interface is implemented by any \c Model instantiation. It is also the |
818 | /// interface which a type used to instantiate the model must provide. |
819 | /// |
820 | /// All of these methods model methods by the same name in the \c |
821 | /// AAResults class. Only differences and specifics to how the |
822 | /// implementations are called are documented here. |
823 | class AAResults::Concept { |
824 | public: |
825 | virtual ~Concept() = 0; |
826 | |
827 | /// An update API used internally by the AAResults to provide |
828 | /// a handle back to the top level aggregation. |
829 | virtual void setAAResults(AAResults *NewAAR) = 0; |
830 | |
831 | //===--------------------------------------------------------------------===// |
832 | /// \name Alias Queries |
833 | /// @{ |
834 | |
835 | /// The main low level interface to the alias analysis implementation. |
836 | /// Returns an AliasResult indicating whether the two pointers are aliased to |
837 | /// each other. This is the interface that must be implemented by specific |
838 | /// alias analysis implementations. |
839 | virtual AliasResult alias(const MemoryLocation &LocA, |
840 | const MemoryLocation &LocB, AAQueryInfo &AAQI) = 0; |
841 | |
842 | /// Checks whether the given location points to constant memory, or if |
843 | /// \p OrLocal is true whether it points to a local alloca. |
844 | virtual bool pointsToConstantMemory(const MemoryLocation &Loc, |
845 | AAQueryInfo &AAQI, bool OrLocal) = 0; |
846 | |
847 | /// @} |
848 | //===--------------------------------------------------------------------===// |
849 | /// \name Simple mod/ref information |
850 | /// @{ |
851 | |
852 | /// Get the ModRef info associated with a pointer argument of a callsite. The |
853 | /// result's bits are set to indicate the allowed aliasing ModRef kinds. Note |
854 | /// that these bits do not necessarily account for the overall behavior of |
855 | /// the function, but rather only provide additional per-argument |
856 | /// information. |
857 | virtual ModRefInfo getArgModRefInfo(const CallBase *Call, |
858 | unsigned ArgIdx) = 0; |
859 | |
860 | /// Return the behavior of the given call site. |
861 | virtual FunctionModRefBehavior getModRefBehavior(const CallBase *Call) = 0; |
862 | |
863 | /// Return the behavior when calling the given function. |
864 | virtual FunctionModRefBehavior getModRefBehavior(const Function *F) = 0; |
865 | |
866 | /// getModRefInfo (for call sites) - Return information about whether |
867 | /// a particular call site modifies or reads the specified memory location. |
868 | virtual ModRefInfo getModRefInfo(const CallBase *Call, |
869 | const MemoryLocation &Loc, |
870 | AAQueryInfo &AAQI) = 0; |
871 | |
872 | /// Return information about whether two call sites may refer to the same set |
873 | /// of memory locations. See the AA documentation for details: |
874 | /// http://llvm.org/docs/AliasAnalysis.html#ModRefInfo |
875 | virtual ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2, |
876 | AAQueryInfo &AAQI) = 0; |
877 | |
878 | /// @} |
879 | }; |
880 | |
881 | /// A private class template which derives from \c Concept and wraps some other |
882 | /// type. |
883 | /// |
884 | /// This models the concept by directly forwarding each interface point to the |
885 | /// wrapped type which must implement a compatible interface. This provides |
886 | /// a type erased binding. |
887 | template <typename AAResultT> class AAResults::Model final : public Concept { |
888 | AAResultT &Result; |
889 | |
890 | public: |
891 | explicit Model(AAResultT &Result, AAResults &AAR) : Result(Result) { |
892 | Result.setAAResults(&AAR); |
893 | } |
894 | ~Model() override = default; |
895 | |
896 | void setAAResults(AAResults *NewAAR) override { Result.setAAResults(NewAAR); } |
897 | |
898 | AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB, |
899 | AAQueryInfo &AAQI) override { |
900 | return Result.alias(LocA, LocB, AAQI); |
901 | } |
902 | |
903 | bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI, |
904 | bool OrLocal) override { |
905 | return Result.pointsToConstantMemory(Loc, AAQI, OrLocal); |
906 | } |
907 | |
908 | ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) override { |
909 | return Result.getArgModRefInfo(Call, ArgIdx); |
910 | } |
911 | |
912 | FunctionModRefBehavior getModRefBehavior(const CallBase *Call) override { |
913 | return Result.getModRefBehavior(Call); |
914 | } |
915 | |
916 | FunctionModRefBehavior getModRefBehavior(const Function *F) override { |
917 | return Result.getModRefBehavior(F); |
918 | } |
919 | |
920 | ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, |
921 | AAQueryInfo &AAQI) override { |
922 | return Result.getModRefInfo(Call, Loc, AAQI); |
923 | } |
924 | |
925 | ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2, |
926 | AAQueryInfo &AAQI) override { |
927 | return Result.getModRefInfo(Call1, Call2, AAQI); |
928 | } |
929 | }; |
930 | |
931 | /// A CRTP-driven "mixin" base class to help implement the function alias |
932 | /// analysis results concept. |
933 | /// |
934 | /// Because of the nature of many alias analysis implementations, they often |
935 | /// only implement a subset of the interface. This base class will attempt to |
936 | /// implement the remaining portions of the interface in terms of simpler forms |
937 | /// of the interface where possible, and otherwise provide conservatively |
938 | /// correct fallback implementations. |
939 | /// |
940 | /// Implementors of an alias analysis should derive from this CRTP, and then |
941 | /// override specific methods that they wish to customize. There is no need to |
942 | /// use virtual anywhere, the CRTP base class does static dispatch to the |
943 | /// derived type passed into it. |
944 | template <typename DerivedT> class AAResultBase { |
945 | // Expose some parts of the interface only to the AAResults::Model |
946 | // for wrapping. Specifically, this allows the model to call our |
947 | // setAAResults method without exposing it as a fully public API. |
948 | friend class AAResults::Model<DerivedT>; |
949 | |
950 | /// A pointer to the AAResults object that this AAResult is |
951 | /// aggregated within. May be null if not aggregated. |
952 | AAResults *AAR = nullptr; |
953 | |
954 | /// Helper to dispatch calls back through the derived type. |
955 | DerivedT &derived() { return static_cast<DerivedT &>(*this); } |
956 | |
957 | /// A setter for the AAResults pointer, which is used to satisfy the |
958 | /// AAResults::Model contract. |
959 | void setAAResults(AAResults *NewAAR) { AAR = NewAAR; } |
960 | |
961 | protected: |
962 | /// This proxy class models a common pattern where we delegate to either the |
963 | /// top-level \c AAResults aggregation if one is registered, or to the |
964 | /// current result if none are registered. |
965 | class AAResultsProxy { |
966 | AAResults *AAR; |
967 | DerivedT &CurrentResult; |
968 | |
969 | public: |
970 | AAResultsProxy(AAResults *AAR, DerivedT &CurrentResult) |
971 | : AAR(AAR), CurrentResult(CurrentResult) {} |
972 | |
973 | AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB, |
974 | AAQueryInfo &AAQI) { |
975 | return AAR ? AAR->alias(LocA, LocB, AAQI) |
976 | : CurrentResult.alias(LocA, LocB, AAQI); |
977 | } |
978 | |
979 | bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI, |
980 | bool OrLocal) { |
981 | return AAR ? AAR->pointsToConstantMemory(Loc, AAQI, OrLocal) |
982 | : CurrentResult.pointsToConstantMemory(Loc, AAQI, OrLocal); |
983 | } |
984 | |
985 | ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) { |
986 | return AAR ? AAR->getArgModRefInfo(Call, ArgIdx) |
987 | : CurrentResult.getArgModRefInfo(Call, ArgIdx); |
988 | } |
989 | |
990 | FunctionModRefBehavior getModRefBehavior(const CallBase *Call) { |
991 | return AAR ? AAR->getModRefBehavior(Call) |
992 | : CurrentResult.getModRefBehavior(Call); |
993 | } |
994 | |
995 | FunctionModRefBehavior getModRefBehavior(const Function *F) { |
996 | return AAR ? AAR->getModRefBehavior(F) : CurrentResult.getModRefBehavior(F); |
997 | } |
998 | |
999 | ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, |
1000 | AAQueryInfo &AAQI) { |
1001 | return AAR ? AAR->getModRefInfo(Call, Loc, AAQI) |
1002 | : CurrentResult.getModRefInfo(Call, Loc, AAQI); |
1003 | } |
1004 | |
1005 | ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2, |
1006 | AAQueryInfo &AAQI) { |
1007 | return AAR ? AAR->getModRefInfo(Call1, Call2, AAQI) |
1008 | : CurrentResult.getModRefInfo(Call1, Call2, AAQI); |
1009 | } |
1010 | }; |
1011 | |
1012 | explicit AAResultBase() = default; |
1013 | |
1014 | // Provide all the copy and move constructors so that derived types aren't |
1015 | // constrained. |
1016 | AAResultBase(const AAResultBase &Arg) {} |
1017 | AAResultBase(AAResultBase &&Arg) {} |
1018 | |
1019 | /// Get a proxy for the best AA result set to query at this time. |
1020 | /// |
1021 | /// When this result is part of a larger aggregation, this will proxy to that |
1022 | /// aggregation. When this result is used in isolation, it will just delegate |
1023 | /// back to the derived class's implementation. |
1024 | /// |
1025 | /// Note that callers of this need to take considerable care to not cause |
1026 | /// performance problems when they use this routine, in the case of a large |
1027 | /// number of alias analyses being aggregated, it can be expensive to walk |
1028 | /// back across the chain. |
1029 | AAResultsProxy getBestAAResults() { return AAResultsProxy(AAR, derived()); } |
1030 | |
1031 | public: |
1032 | AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB, |
1033 | AAQueryInfo &AAQI) { |
1034 | return MayAlias; |
1035 | } |
1036 | |
1037 | bool pointsToConstantMemory(const MemoryLocation &Loc, AAQueryInfo &AAQI, |
1038 | bool OrLocal) { |
1039 | return false; |
1040 | } |
1041 | |
1042 | ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx) { |
1043 | return ModRefInfo::ModRef; |
1044 | } |
1045 | |
1046 | FunctionModRefBehavior getModRefBehavior(const CallBase *Call) { |
1047 | return FMRB_UnknownModRefBehavior; |
1048 | } |
1049 | |
1050 | FunctionModRefBehavior getModRefBehavior(const Function *F) { |
1051 | return FMRB_UnknownModRefBehavior; |
1052 | } |
1053 | |
1054 | ModRefInfo getModRefInfo(const CallBase *Call, const MemoryLocation &Loc, |
1055 | AAQueryInfo &AAQI) { |
1056 | return ModRefInfo::ModRef; |
1057 | } |
1058 | |
1059 | ModRefInfo getModRefInfo(const CallBase *Call1, const CallBase *Call2, |
1060 | AAQueryInfo &AAQI) { |
1061 | return ModRefInfo::ModRef; |
1062 | } |
1063 | }; |
1064 | |
1065 | /// Return true if this pointer is returned by a noalias function. |
1066 | bool isNoAliasCall(const Value *V); |
1067 | |
1068 | /// Return true if this is an argument with the noalias attribute. |
1069 | bool isNoAliasArgument(const Value *V); |
1070 | |
1071 | /// Return true if this pointer refers to a distinct and identifiable object. |
1072 | /// This returns true for: |
1073 | /// Global Variables and Functions (but not Global Aliases) |
1074 | /// Allocas |
1075 | /// ByVal and NoAlias Arguments |
1076 | /// NoAlias returns (e.g. calls to malloc) |
1077 | /// |
1078 | bool isIdentifiedObject(const Value *V); |
1079 | |
1080 | /// Return true if V is umabigously identified at the function-level. |
1081 | /// Different IdentifiedFunctionLocals can't alias. |
1082 | /// Further, an IdentifiedFunctionLocal can not alias with any function |
1083 | /// arguments other than itself, which is not necessarily true for |
1084 | /// IdentifiedObjects. |
1085 | bool isIdentifiedFunctionLocal(const Value *V); |
1086 | |
1087 | /// A manager for alias analyses. |
1088 | /// |
1089 | /// This class can have analyses registered with it and when run, it will run |
1090 | /// all of them and aggregate their results into single AA results interface |
1091 | /// that dispatches across all of the alias analysis results available. |
1092 | /// |
1093 | /// Note that the order in which analyses are registered is very significant. |
1094 | /// That is the order in which the results will be aggregated and queried. |
1095 | /// |
1096 | /// This manager effectively wraps the AnalysisManager for registering alias |
1097 | /// analyses. When you register your alias analysis with this manager, it will |
1098 | /// ensure the analysis itself is registered with its AnalysisManager. |
1099 | /// |
1100 | /// The result of this analysis is only invalidated if one of the particular |
1101 | /// aggregated AA results end up being invalidated. This removes the need to |
1102 | /// explicitly preserve the results of `AAManager`. Note that analyses should no |
1103 | /// longer be registered once the `AAManager` is run. |
1104 | class AAManager : public AnalysisInfoMixin<AAManager> { |
1105 | public: |
1106 | using Result = AAResults; |
1107 | |
1108 | /// Register a specific AA result. |
1109 | template <typename AnalysisT> void registerFunctionAnalysis() { |
1110 | ResultGetters.push_back(&getFunctionAAResultImpl<AnalysisT>); |
1111 | } |
1112 | |
1113 | /// Register a specific AA result. |
1114 | template <typename AnalysisT> void registerModuleAnalysis() { |
1115 | ResultGetters.push_back(&getModuleAAResultImpl<AnalysisT>); |
1116 | } |
1117 | |
1118 | Result run(Function &F, FunctionAnalysisManager &AM) { |
1119 | Result R(AM.getResult<TargetLibraryAnalysis>(F)); |
1120 | for (auto &Getter : ResultGetters) |
1121 | (*Getter)(F, AM, R); |
1122 | return R; |
1123 | } |
1124 | |
1125 | private: |
1126 | friend AnalysisInfoMixin<AAManager>; |
1127 | |
1128 | static AnalysisKey Key; |
1129 | |
1130 | SmallVector<void (*)(Function &F, FunctionAnalysisManager &AM, |
1131 | AAResults &AAResults), |
1132 | 4> ResultGetters; |
1133 | |
1134 | template <typename AnalysisT> |
1135 | static void getFunctionAAResultImpl(Function &F, |
1136 | FunctionAnalysisManager &AM, |
1137 | AAResults &AAResults) { |
1138 | AAResults.addAAResult(AM.template getResult<AnalysisT>(F)); |
1139 | AAResults.addAADependencyID(AnalysisT::ID()); |
1140 | } |
1141 | |
1142 | template <typename AnalysisT> |
1143 | static void getModuleAAResultImpl(Function &F, FunctionAnalysisManager &AM, |
1144 | AAResults &AAResults) { |
1145 | auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F); |
1146 | auto &MAM = MAMProxy.getManager(); |
1147 | if (auto *R = MAM.template getCachedResult<AnalysisT>(*F.getParent())) { |
1148 | AAResults.addAAResult(*R); |
1149 | MAMProxy |
1150 | .template registerOuterAnalysisInvalidation<AnalysisT, AAManager>(); |
1151 | } |
1152 | } |
1153 | }; |
1154 | |
1155 | /// A wrapper pass to provide the legacy pass manager access to a suitably |
1156 | /// prepared AAResults object. |
1157 | class AAResultsWrapperPass : public FunctionPass { |
1158 | std::unique_ptr<AAResults> AAR; |
1159 | |
1160 | public: |
1161 | static char ID; |
1162 | |
1163 | AAResultsWrapperPass(); |
1164 | |
1165 | AAResults &getAAResults() { return *AAR; } |
1166 | const AAResults &getAAResults() const { return *AAR; } |
1167 | |
1168 | bool runOnFunction(Function &F) override; |
1169 | |
1170 | void getAnalysisUsage(AnalysisUsage &AU) const override; |
1171 | }; |
1172 | |
1173 | /// A wrapper pass for external alias analyses. This just squirrels away the |
1174 | /// callback used to run any analyses and register their results. |
1175 | struct ExternalAAWrapperPass : ImmutablePass { |
1176 | using CallbackT = std::function<void(Pass &, Function &, AAResults &)>; |
1177 | |
1178 | CallbackT CB; |
1179 | |
1180 | static char ID; |
1181 | |
1182 | ExternalAAWrapperPass() : ImmutablePass(ID) { |
1183 | initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry()); |
1184 | } |
1185 | |
1186 | explicit ExternalAAWrapperPass(CallbackT CB) |
1187 | : ImmutablePass(ID), CB(std::move(CB)) { |
1188 | initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry()); |
1189 | } |
1190 | |
1191 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
1192 | AU.setPreservesAll(); |
1193 | } |
1194 | }; |
1195 | |
1196 | FunctionPass *createAAResultsWrapperPass(); |
1197 | |
1198 | /// A wrapper pass around a callback which can be used to populate the |
1199 | /// AAResults in the AAResultsWrapperPass from an external AA. |
1200 | /// |
1201 | /// The callback provided here will be used each time we prepare an AAResults |
1202 | /// object, and will receive a reference to the function wrapper pass, the |
1203 | /// function, and the AAResults object to populate. This should be used when |
1204 | /// setting up a custom pass pipeline to inject a hook into the AA results. |
1205 | ImmutablePass *createExternalAAWrapperPass( |
1206 | std::function<void(Pass &, Function &, AAResults &)> Callback); |
1207 | |
1208 | /// A helper for the legacy pass manager to create a \c AAResults |
1209 | /// object populated to the best of our ability for a particular function when |
1210 | /// inside of a \c ModulePass or a \c CallGraphSCCPass. |
1211 | /// |
1212 | /// If a \c ModulePass or a \c CallGraphSCCPass calls \p |
1213 | /// createLegacyPMAAResults, it also needs to call \p addUsedAAAnalyses in \p |
1214 | /// getAnalysisUsage. |
1215 | AAResults createLegacyPMAAResults(Pass &P, Function &F, BasicAAResult &BAR); |
1216 | |
1217 | /// A helper for the legacy pass manager to populate \p AU to add uses to make |
1218 | /// sure the analyses required by \p createLegacyPMAAResults are available. |
1219 | void getAAResultsAnalysisUsage(AnalysisUsage &AU); |
1220 | |
1221 | } // end namespace llvm |
1222 | |
1223 | #endif // LLVM_ANALYSIS_ALIASANALYSIS_H |
1 | //===- llvm/Type.h - Classes for handling data types ------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file contains the declaration of the Type class. For more "Type" |
10 | // stuff, look in DerivedTypes.h. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #ifndef LLVM_IR_TYPE_H |
15 | #define LLVM_IR_TYPE_H |
16 | |
17 | #include "llvm/ADT/APFloat.h" |
18 | #include "llvm/ADT/ArrayRef.h" |
19 | #include "llvm/ADT/SmallPtrSet.h" |
20 | #include "llvm/Support/CBindingWrapping.h" |
21 | #include "llvm/Support/Casting.h" |
22 | #include "llvm/Support/Compiler.h" |
23 | #include "llvm/Support/ErrorHandling.h" |
24 | #include "llvm/Support/TypeSize.h" |
25 | #include <cassert> |
26 | #include <cstdint> |
27 | #include <iterator> |
28 | |
29 | namespace llvm { |
30 | |
31 | template<class GraphType> struct GraphTraits; |
32 | class IntegerType; |
33 | class LLVMContext; |
34 | class PointerType; |
35 | class raw_ostream; |
36 | class StringRef; |
37 | |
38 | /// The instances of the Type class are immutable: once they are created, |
39 | /// they are never changed. Also note that only one instance of a particular |
40 | /// type is ever created. Thus seeing if two types are equal is a matter of |
41 | /// doing a trivial pointer comparison. To enforce that no two equal instances |
42 | /// are created, Type instances can only be created via static factory methods |
43 | /// in class Type and in derived classes. Once allocated, Types are never |
44 | /// free'd. |
45 | /// |
46 | class Type { |
47 | public: |
48 | //===--------------------------------------------------------------------===// |
49 | /// Definitions of all of the base types for the Type system. Based on this |
50 | /// value, you can cast to a class defined in DerivedTypes.h. |
51 | /// Note: If you add an element to this, you need to add an element to the |
52 | /// Type::getPrimitiveType function, or else things will break! |
53 | /// Also update LLVMTypeKind and LLVMGetTypeKind () in the C binding. |
54 | /// |
55 | enum TypeID { |
56 | // PrimitiveTypes - make sure LastPrimitiveTyID stays up to date. |
57 | VoidTyID = 0, ///< 0: type with no size |
58 | HalfTyID, ///< 1: 16-bit floating point type |
59 | FloatTyID, ///< 2: 32-bit floating point type |
60 | DoubleTyID, ///< 3: 64-bit floating point type |
61 | X86_FP80TyID, ///< 4: 80-bit floating point type (X87) |
62 | FP128TyID, ///< 5: 128-bit floating point type (112-bit mantissa) |
63 | PPC_FP128TyID, ///< 6: 128-bit floating point type (two 64-bits, PowerPC) |
64 | LabelTyID, ///< 7: Labels |
65 | MetadataTyID, ///< 8: Metadata |
66 | X86_MMXTyID, ///< 9: MMX vectors (64 bits, X86 specific) |
67 | TokenTyID, ///< 10: Tokens |
68 | |
69 | // Derived types... see DerivedTypes.h file. |
70 | // Make sure FirstDerivedTyID stays up to date! |
71 | IntegerTyID, ///< 11: Arbitrary bit width integers |
72 | FunctionTyID, ///< 12: Functions |
73 | StructTyID, ///< 13: Structures |
74 | ArrayTyID, ///< 14: Arrays |
75 | PointerTyID, ///< 15: Pointers |
76 | VectorTyID ///< 16: SIMD 'packed' format, or other vector type |
77 | }; |
78 | |
79 | private: |
80 | /// This refers to the LLVMContext in which this type was uniqued. |
81 | LLVMContext &Context; |
82 | |
83 | TypeID ID : 8; // The current base type of this type. |
84 | unsigned SubclassData : 24; // Space for subclasses to store data. |
85 | // Note that this should be synchronized with |
86 | // MAX_INT_BITS value in IntegerType class. |
87 | |
88 | protected: |
89 | friend class LLVMContextImpl; |
90 | |
91 | explicit Type(LLVMContext &C, TypeID tid) |
92 | : Context(C), ID(tid), SubclassData(0) {} |
93 | ~Type() = default; |
94 | |
95 | unsigned getSubclassData() const { return SubclassData; } |
96 | |
97 | void setSubclassData(unsigned val) { |
98 | SubclassData = val; |
99 | // Ensure we don't have any accidental truncation. |
100 | assert(getSubclassData() == val && "Subclass data too large for field")((getSubclassData() == val && "Subclass data too large for field" ) ? static_cast<void> (0) : __assert_fail ("getSubclassData() == val && \"Subclass data too large for field\"" , "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/IR/Type.h" , 100, __PRETTY_FUNCTION__)); |
101 | } |
102 | |
103 | /// Keeps track of how many Type*'s there are in the ContainedTys list. |
104 | unsigned NumContainedTys = 0; |
105 | |
106 | /// A pointer to the array of Types contained by this Type. For example, this |
107 | /// includes the arguments of a function type, the elements of a structure, |
108 | /// the pointee of a pointer, the element type of an array, etc. This pointer |
109 | /// may be 0 for types that don't contain other types (Integer, Double, |
110 | /// Float). |
111 | Type * const *ContainedTys = nullptr; |
112 | |
113 | static bool isSequentialType(TypeID TyID) { |
114 | return TyID == ArrayTyID || TyID == VectorTyID; |
115 | } |
116 | |
117 | public: |
118 | /// Print the current type. |
119 | /// Omit the type details if \p NoDetails == true. |
120 | /// E.g., let %st = type { i32, i16 } |
121 | /// When \p NoDetails is true, we only print %st. |
122 | /// Put differently, \p NoDetails prints the type as if |
123 | /// inlined with the operands when printing an instruction. |
124 | void print(raw_ostream &O, bool IsForDebug = false, |
125 | bool NoDetails = false) const; |
126 | |
127 | void dump() const; |
128 | |
129 | /// Return the LLVMContext in which this type was uniqued. |
130 | LLVMContext &getContext() const { return Context; } |
131 | |
132 | //===--------------------------------------------------------------------===// |
133 | // Accessors for working with types. |
134 | // |
135 | |
136 | /// Return the type id for the type. This will return one of the TypeID enum |
137 | /// elements defined above. |
138 | TypeID getTypeID() const { return ID; } |
139 | |
140 | /// Return true if this is 'void'. |
141 | bool isVoidTy() const { return getTypeID() == VoidTyID; } |
142 | |
143 | /// Return true if this is 'half', a 16-bit IEEE fp type. |
144 | bool isHalfTy() const { return getTypeID() == HalfTyID; } |
145 | |
146 | /// Return true if this is 'float', a 32-bit IEEE fp type. |
147 | bool isFloatTy() const { return getTypeID() == FloatTyID; } |
148 | |
149 | /// Return true if this is 'double', a 64-bit IEEE fp type. |
150 | bool isDoubleTy() const { return getTypeID() == DoubleTyID; } |
151 | |
152 | /// Return true if this is x86 long double. |
153 | bool isX86_FP80Ty() const { return getTypeID() == X86_FP80TyID; } |
154 | |
155 | /// Return true if this is 'fp128'. |
156 | bool isFP128Ty() const { return getTypeID() == FP128TyID; } |
157 | |
158 | /// Return true if this is powerpc long double. |
159 | bool isPPC_FP128Ty() const { return getTypeID() == PPC_FP128TyID; } |
160 | |
161 | /// Return true if this is one of the six floating-point types |
162 | bool isFloatingPointTy() const { |
163 | return getTypeID() == HalfTyID || getTypeID() == FloatTyID || |
164 | getTypeID() == DoubleTyID || |
165 | getTypeID() == X86_FP80TyID || getTypeID() == FP128TyID || |
166 | getTypeID() == PPC_FP128TyID; |
167 | } |
168 | |
169 | const fltSemantics &getFltSemantics() const { |
170 | switch (getTypeID()) { |
171 | case HalfTyID: return APFloat::IEEEhalf(); |
172 | case FloatTyID: return APFloat::IEEEsingle(); |
173 | case DoubleTyID: return APFloat::IEEEdouble(); |
174 | case X86_FP80TyID: return APFloat::x87DoubleExtended(); |
175 | case FP128TyID: return APFloat::IEEEquad(); |
176 | case PPC_FP128TyID: return APFloat::PPCDoubleDouble(); |
177 | default: llvm_unreachable("Invalid floating type")::llvm::llvm_unreachable_internal("Invalid floating type", "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/IR/Type.h" , 177); |
178 | } |
179 | } |
180 | |
181 | /// Return true if this is X86 MMX. |
182 | bool isX86_MMXTy() const { return getTypeID() == X86_MMXTyID; } |
183 | |
184 | /// Return true if this is a FP type or a vector of FP. |
185 | bool isFPOrFPVectorTy() const { return getScalarType()->isFloatingPointTy(); } |
186 | |
187 | /// Return true if this is 'label'. |
188 | bool isLabelTy() const { return getTypeID() == LabelTyID; } |
189 | |
190 | /// Return true if this is 'metadata'. |
191 | bool isMetadataTy() const { return getTypeID() == MetadataTyID; } |
192 | |
193 | /// Return true if this is 'token'. |
194 | bool isTokenTy() const { return getTypeID() == TokenTyID; } |
195 | |
196 | /// True if this is an instance of IntegerType. |
197 | bool isIntegerTy() const { return getTypeID() == IntegerTyID; } |
198 | |
199 | /// Return true if this is an IntegerType of the given width. |
200 | bool isIntegerTy(unsigned Bitwidth) const; |
201 | |
202 | /// Return true if this is an integer type or a vector of integer types. |
203 | bool isIntOrIntVectorTy() const { return getScalarType()->isIntegerTy(); } |
204 | |
205 | /// Return true if this is an integer type or a vector of integer types of |
206 | /// the given width. |
207 | bool isIntOrIntVectorTy(unsigned BitWidth) const { |
208 | return getScalarType()->isIntegerTy(BitWidth); |
209 | } |
210 | |
211 | /// Return true if this is an integer type or a pointer type. |
212 | bool isIntOrPtrTy() const { return isIntegerTy() || isPointerTy(); } |
213 | |
214 | /// True if this is an instance of FunctionType. |
215 | bool isFunctionTy() const { return getTypeID() == FunctionTyID; } |
216 | |
217 | /// True if this is an instance of StructType. |
218 | bool isStructTy() const { return getTypeID() == StructTyID; } |
219 | |
220 | /// True if this is an instance of ArrayType. |
221 | bool isArrayTy() const { return getTypeID() == ArrayTyID; } |
222 | |
223 | /// True if this is an instance of PointerType. |
224 | bool isPointerTy() const { return getTypeID() == PointerTyID; } |
225 | |
226 | /// Return true if this is a pointer type or a vector of pointer types. |
227 | bool isPtrOrPtrVectorTy() const { return getScalarType()->isPointerTy(); } |
228 | |
229 | /// True if this is an instance of VectorType. |
230 | bool isVectorTy() const { return getTypeID() == VectorTyID; } |
231 | |
232 | /// Return true if this type could be converted with a lossless BitCast to |
233 | /// type 'Ty'. For example, i8* to i32*. BitCasts are valid for types of the |
234 | /// same size only where no re-interpretation of the bits is done. |
235 | /// Determine if this type could be losslessly bitcast to Ty |
236 | bool canLosslesslyBitCastTo(Type *Ty) const; |
237 | |
238 | /// Return true if this type is empty, that is, it has no elements or all of |
239 | /// its elements are empty. |
240 | bool isEmptyTy() const; |
241 | |
242 | /// Return true if the type is "first class", meaning it is a valid type for a |
243 | /// Value. |
244 | bool isFirstClassType() const { |
245 | return getTypeID() != FunctionTyID && getTypeID() != VoidTyID; |
246 | } |
247 | |
248 | /// Return true if the type is a valid type for a register in codegen. This |
249 | /// includes all first-class types except struct and array types. |
250 | bool isSingleValueType() const { |
251 | return isFloatingPointTy() || isX86_MMXTy() || isIntegerTy() || |
252 | isPointerTy() || isVectorTy(); |
253 | } |
254 | |
255 | /// Return true if the type is an aggregate type. This means it is valid as |
256 | /// the first operand of an insertvalue or extractvalue instruction. This |
257 | /// includes struct and array types, but does not include vector types. |
258 | bool isAggregateType() const { |
259 | return getTypeID() == StructTyID || getTypeID() == ArrayTyID; |
260 | } |
261 | |
262 | /// Return true if it makes sense to take the size of this type. To get the |
263 | /// actual size for a particular target, it is reasonable to use the |
264 | /// DataLayout subsystem to do this. |
265 | bool isSized(SmallPtrSetImpl<Type*> *Visited = nullptr) const { |
266 | // If it's a primitive, it is always sized. |
267 | if (getTypeID() == IntegerTyID || isFloatingPointTy() || |
268 | getTypeID() == PointerTyID || |
269 | getTypeID() == X86_MMXTyID) |
270 | return true; |
271 | // If it is not something that can have a size (e.g. a function or label), |
272 | // it doesn't have a size. |
273 | if (getTypeID() != StructTyID && getTypeID() != ArrayTyID && |
274 | getTypeID() != VectorTyID) |
275 | return false; |
276 | // Otherwise we have to try harder to decide. |
277 | return isSizedDerivedType(Visited); |
278 | } |
279 | |
280 | /// Return the basic size of this type if it is a primitive type. These are |
281 | /// fixed by LLVM and are not target-dependent. |
282 | /// This will return zero if the type does not have a size or is not a |
283 | /// primitive type. |
284 | /// |
285 | /// If this is a scalable vector type, the scalable property will be set and |
286 | /// the runtime size will be a positive integer multiple of the base size. |
287 | /// |
288 | /// Note that this may not reflect the size of memory allocated for an |
289 | /// instance of the type or the number of bytes that are written when an |
290 | /// instance of the type is stored to memory. The DataLayout class provides |
291 | /// additional query functions to provide this information. |
292 | /// |
293 | TypeSize getPrimitiveSizeInBits() const LLVM_READONLY__attribute__((__pure__)); |
294 | |
295 | /// If this is a vector type, return the getPrimitiveSizeInBits value for the |
296 | /// element type. Otherwise return the getPrimitiveSizeInBits value for this |
297 | /// type. |
298 | unsigned getScalarSizeInBits() const LLVM_READONLY__attribute__((__pure__)); |
299 | |
300 | /// Return the width of the mantissa of this type. This is only valid on |
301 | /// floating-point types. If the FP type does not have a stable mantissa (e.g. |
302 | /// ppc long double), this method returns -1. |
303 | int getFPMantissaWidth() const; |
304 | |
305 | /// If this is a vector type, return the element type, otherwise return |
306 | /// 'this'. |
307 | Type *getScalarType() const { |
308 | if (isVectorTy()) |
309 | return getVectorElementType(); |
310 | return const_cast<Type*>(this); |
311 | } |
312 | |
313 | //===--------------------------------------------------------------------===// |
314 | // Type Iteration support. |
315 | // |
316 | using subtype_iterator = Type * const *; |
317 | |
318 | subtype_iterator subtype_begin() const { return ContainedTys; } |
319 | subtype_iterator subtype_end() const { return &ContainedTys[NumContainedTys];} |
320 | ArrayRef<Type*> subtypes() const { |
321 | return makeArrayRef(subtype_begin(), subtype_end()); |
322 | } |
323 | |
324 | using subtype_reverse_iterator = std::reverse_iterator<subtype_iterator>; |
325 | |
326 | subtype_reverse_iterator subtype_rbegin() const { |
327 | return subtype_reverse_iterator(subtype_end()); |
328 | } |
329 | subtype_reverse_iterator subtype_rend() const { |
330 | return subtype_reverse_iterator(subtype_begin()); |
331 | } |
332 | |
333 | /// This method is used to implement the type iterator (defined at the end of |
334 | /// the file). For derived types, this returns the types 'contained' in the |
335 | /// derived type. |
336 | Type *getContainedType(unsigned i) const { |
337 | assert(i < NumContainedTys && "Index out of range!")((i < NumContainedTys && "Index out of range!") ? static_cast <void> (0) : __assert_fail ("i < NumContainedTys && \"Index out of range!\"" , "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/IR/Type.h" , 337, __PRETTY_FUNCTION__)); |
338 | return ContainedTys[i]; |
339 | } |
340 | |
341 | /// Return the number of types in the derived type. |
342 | unsigned getNumContainedTypes() const { return NumContainedTys; } |
343 | |
344 | //===--------------------------------------------------------------------===// |
345 | // Helper methods corresponding to subclass methods. This forces a cast to |
346 | // the specified subclass and calls its accessor. "getVectorNumElements" (for |
347 | // example) is shorthand for cast<VectorType>(Ty)->getNumElements(). This is |
348 | // only intended to cover the core methods that are frequently used, helper |
349 | // methods should not be added here. |
350 | |
351 | inline unsigned getIntegerBitWidth() const; |
352 | |
353 | inline Type *getFunctionParamType(unsigned i) const; |
354 | inline unsigned getFunctionNumParams() const; |
355 | inline bool isFunctionVarArg() const; |
356 | |
357 | inline StringRef getStructName() const; |
358 | inline unsigned getStructNumElements() const; |
359 | inline Type *getStructElementType(unsigned N) const; |
360 | |
361 | inline Type *getSequentialElementType() const { |
362 | assert(isSequentialType(getTypeID()) && "Not a sequential type!")((isSequentialType(getTypeID()) && "Not a sequential type!" ) ? static_cast<void> (0) : __assert_fail ("isSequentialType(getTypeID()) && \"Not a sequential type!\"" , "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/IR/Type.h" , 362, __PRETTY_FUNCTION__)); |
363 | return ContainedTys[0]; |
364 | } |
365 | |
366 | inline uint64_t getArrayNumElements() const; |
367 | |
368 | Type *getArrayElementType() const { |
369 | assert(getTypeID() == ArrayTyID)((getTypeID() == ArrayTyID) ? static_cast<void> (0) : __assert_fail ("getTypeID() == ArrayTyID", "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/IR/Type.h" , 369, __PRETTY_FUNCTION__)); |
370 | return ContainedTys[0]; |
371 | } |
372 | |
373 | inline bool getVectorIsScalable() const; |
374 | inline unsigned getVectorNumElements() const; |
375 | Type *getVectorElementType() const { |
376 | assert(getTypeID() == VectorTyID)((getTypeID() == VectorTyID) ? static_cast<void> (0) : __assert_fail ("getTypeID() == VectorTyID", "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/IR/Type.h" , 376, __PRETTY_FUNCTION__)); |
377 | return ContainedTys[0]; |
378 | } |
379 | |
380 | Type *getPointerElementType() const { |
381 | assert(getTypeID() == PointerTyID)((getTypeID() == PointerTyID) ? static_cast<void> (0) : __assert_fail ("getTypeID() == PointerTyID", "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/IR/Type.h" , 381, __PRETTY_FUNCTION__)); |
382 | return ContainedTys[0]; |
383 | } |
384 | |
385 | /// Given scalar/vector integer type, returns a type with elements twice as |
386 | /// wide as in the original type. For vectors, preserves element count. |
387 | inline Type *getExtendedType() const; |
388 | |
389 | /// Get the address space of this pointer or pointer vector type. |
390 | inline unsigned getPointerAddressSpace() const; |
391 | |
392 | //===--------------------------------------------------------------------===// |
393 | // Static members exported by the Type class itself. Useful for getting |
394 | // instances of Type. |
395 | // |
396 | |
397 | /// Return a type based on an identifier. |
398 | static Type *getPrimitiveType(LLVMContext &C, TypeID IDNumber); |
399 | |
400 | //===--------------------------------------------------------------------===// |
401 | // These are the builtin types that are always available. |
402 | // |
403 | static Type *getVoidTy(LLVMContext &C); |
404 | static Type *getLabelTy(LLVMContext &C); |
405 | static Type *getHalfTy(LLVMContext &C); |
406 | static Type *getFloatTy(LLVMContext &C); |
407 | static Type *getDoubleTy(LLVMContext &C); |
408 | static Type *getMetadataTy(LLVMContext &C); |
409 | static Type *getX86_FP80Ty(LLVMContext &C); |
410 | static Type *getFP128Ty(LLVMContext &C); |
411 | static Type *getPPC_FP128Ty(LLVMContext &C); |
412 | static Type *getX86_MMXTy(LLVMContext &C); |
413 | static Type *getTokenTy(LLVMContext &C); |
414 | static IntegerType *getIntNTy(LLVMContext &C, unsigned N); |
415 | static IntegerType *getInt1Ty(LLVMContext &C); |
416 | static IntegerType *getInt8Ty(LLVMContext &C); |
417 | static IntegerType *getInt16Ty(LLVMContext &C); |
418 | static IntegerType *getInt32Ty(LLVMContext &C); |
419 | static IntegerType *getInt64Ty(LLVMContext &C); |
420 | static IntegerType *getInt128Ty(LLVMContext &C); |
421 | template <typename ScalarTy> static Type *getScalarTy(LLVMContext &C) { |
422 | int noOfBits = sizeof(ScalarTy) * CHAR_BIT8; |
423 | if (std::is_integral<ScalarTy>::value) { |
424 | return (Type*) Type::getIntNTy(C, noOfBits); |
425 | } else if (std::is_floating_point<ScalarTy>::value) { |
426 | switch (noOfBits) { |
427 | case 32: |
428 | return Type::getFloatTy(C); |
429 | case 64: |
430 | return Type::getDoubleTy(C); |
431 | } |
432 | } |
433 | llvm_unreachable("Unsupported type in Type::getScalarTy")::llvm::llvm_unreachable_internal("Unsupported type in Type::getScalarTy" , "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/IR/Type.h" , 433); |
434 | } |
435 | |
436 | //===--------------------------------------------------------------------===// |
437 | // Convenience methods for getting pointer types with one of the above builtin |
438 | // types as pointee. |
439 | // |
440 | static PointerType *getHalfPtrTy(LLVMContext &C, unsigned AS = 0); |
441 | static PointerType *getFloatPtrTy(LLVMContext &C, unsigned AS = 0); |
442 | static PointerType *getDoublePtrTy(LLVMContext &C, unsigned AS = 0); |
443 | static PointerType *getX86_FP80PtrTy(LLVMContext &C, unsigned AS = 0); |
444 | static PointerType *getFP128PtrTy(LLVMContext &C, unsigned AS = 0); |
445 | static PointerType *getPPC_FP128PtrTy(LLVMContext &C, unsigned AS = 0); |
446 | static PointerType *getX86_MMXPtrTy(LLVMContext &C, unsigned AS = 0); |
447 | static PointerType *getIntNPtrTy(LLVMContext &C, unsigned N, unsigned AS = 0); |
448 | static PointerType *getInt1PtrTy(LLVMContext &C, unsigned AS = 0); |
449 | static PointerType *getInt8PtrTy(LLVMContext &C, unsigned AS = 0); |
450 | static PointerType *getInt16PtrTy(LLVMContext &C, unsigned AS = 0); |
451 | static PointerType *getInt32PtrTy(LLVMContext &C, unsigned AS = 0); |
452 | static PointerType *getInt64PtrTy(LLVMContext &C, unsigned AS = 0); |
453 | |
454 | /// Return a pointer to the current type. This is equivalent to |
455 | /// PointerType::get(Foo, AddrSpace). |
456 | PointerType *getPointerTo(unsigned AddrSpace = 0) const; |
457 | |
458 | private: |
459 | /// Derived types like structures and arrays are sized iff all of the members |
460 | /// of the type are sized as well. Since asking for their size is relatively |
461 | /// uncommon, move this operation out-of-line. |
462 | bool isSizedDerivedType(SmallPtrSetImpl<Type*> *Visited = nullptr) const; |
463 | }; |
464 | |
465 | // Printing of types. |
466 | inline raw_ostream &operator<<(raw_ostream &OS, const Type &T) { |
467 | T.print(OS); |
468 | return OS; |
469 | } |
470 | |
471 | // allow isa<PointerType>(x) to work without DerivedTypes.h included. |
472 | template <> struct isa_impl<PointerType, Type> { |
473 | static inline bool doit(const Type &Ty) { |
474 | return Ty.getTypeID() == Type::PointerTyID; |
475 | } |
476 | }; |
477 | |
478 | // Create wrappers for C Binding types (see CBindingWrapping.h). |
479 | DEFINE_ISA_CONVERSION_FUNCTIONS(Type, LLVMTypeRef)inline Type *unwrap(LLVMTypeRef P) { return reinterpret_cast< Type*>(P); } inline LLVMTypeRef wrap(const Type *P) { return reinterpret_cast<LLVMTypeRef>(const_cast<Type*>( P)); } template<typename T> inline T *unwrap(LLVMTypeRef P) { return cast<T>(unwrap(P)); } |
480 | |
481 | /* Specialized opaque type conversions. |
482 | */ |
483 | inline Type **unwrap(LLVMTypeRef* Tys) { |
484 | return reinterpret_cast<Type**>(Tys); |
485 | } |
486 | |
487 | inline LLVMTypeRef *wrap(Type **Tys) { |
488 | return reinterpret_cast<LLVMTypeRef*>(const_cast<Type**>(Tys)); |
489 | } |
490 | |
491 | } // end namespace llvm |
492 | |
493 | #endif // LLVM_IR_TYPE_H |