File: | build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/CodeGen/CodeGenPrepare.cpp |
Warning: | line 2386, column 10 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// | ||||
2 | // | ||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||
6 | // | ||||
7 | //===----------------------------------------------------------------------===// | ||||
8 | // | ||||
9 | // This pass munges the code in the input function to better prepare it for | ||||
10 | // SelectionDAG-based code generation. This works around limitations in it's | ||||
11 | // basic-block-at-a-time approach. It should eventually be removed. | ||||
12 | // | ||||
13 | //===----------------------------------------------------------------------===// | ||||
14 | |||||
15 | #include "llvm/ADT/APInt.h" | ||||
16 | #include "llvm/ADT/ArrayRef.h" | ||||
17 | #include "llvm/ADT/DenseMap.h" | ||||
18 | #include "llvm/ADT/MapVector.h" | ||||
19 | #include "llvm/ADT/PointerIntPair.h" | ||||
20 | #include "llvm/ADT/STLExtras.h" | ||||
21 | #include "llvm/ADT/SmallPtrSet.h" | ||||
22 | #include "llvm/ADT/SmallVector.h" | ||||
23 | #include "llvm/ADT/Statistic.h" | ||||
24 | #include "llvm/Analysis/BlockFrequencyInfo.h" | ||||
25 | #include "llvm/Analysis/BranchProbabilityInfo.h" | ||||
26 | #include "llvm/Analysis/InstructionSimplify.h" | ||||
27 | #include "llvm/Analysis/LoopInfo.h" | ||||
28 | #include "llvm/Analysis/ProfileSummaryInfo.h" | ||||
29 | #include "llvm/Analysis/TargetLibraryInfo.h" | ||||
30 | #include "llvm/Analysis/TargetTransformInfo.h" | ||||
31 | #include "llvm/Analysis/ValueTracking.h" | ||||
32 | #include "llvm/Analysis/VectorUtils.h" | ||||
33 | #include "llvm/CodeGen/Analysis.h" | ||||
34 | #include "llvm/CodeGen/BasicBlockSectionsProfileReader.h" | ||||
35 | #include "llvm/CodeGen/ISDOpcodes.h" | ||||
36 | #include "llvm/CodeGen/SelectionDAGNodes.h" | ||||
37 | #include "llvm/CodeGen/TargetLowering.h" | ||||
38 | #include "llvm/CodeGen/TargetPassConfig.h" | ||||
39 | #include "llvm/CodeGen/TargetSubtargetInfo.h" | ||||
40 | #include "llvm/CodeGen/ValueTypes.h" | ||||
41 | #include "llvm/Config/llvm-config.h" | ||||
42 | #include "llvm/IR/Argument.h" | ||||
43 | #include "llvm/IR/Attributes.h" | ||||
44 | #include "llvm/IR/BasicBlock.h" | ||||
45 | #include "llvm/IR/Constant.h" | ||||
46 | #include "llvm/IR/Constants.h" | ||||
47 | #include "llvm/IR/DataLayout.h" | ||||
48 | #include "llvm/IR/DebugInfo.h" | ||||
49 | #include "llvm/IR/DerivedTypes.h" | ||||
50 | #include "llvm/IR/Dominators.h" | ||||
51 | #include "llvm/IR/Function.h" | ||||
52 | #include "llvm/IR/GetElementPtrTypeIterator.h" | ||||
53 | #include "llvm/IR/GlobalValue.h" | ||||
54 | #include "llvm/IR/GlobalVariable.h" | ||||
55 | #include "llvm/IR/IRBuilder.h" | ||||
56 | #include "llvm/IR/InlineAsm.h" | ||||
57 | #include "llvm/IR/InstrTypes.h" | ||||
58 | #include "llvm/IR/Instruction.h" | ||||
59 | #include "llvm/IR/Instructions.h" | ||||
60 | #include "llvm/IR/IntrinsicInst.h" | ||||
61 | #include "llvm/IR/Intrinsics.h" | ||||
62 | #include "llvm/IR/IntrinsicsAArch64.h" | ||||
63 | #include "llvm/IR/LLVMContext.h" | ||||
64 | #include "llvm/IR/MDBuilder.h" | ||||
65 | #include "llvm/IR/Module.h" | ||||
66 | #include "llvm/IR/Operator.h" | ||||
67 | #include "llvm/IR/PatternMatch.h" | ||||
68 | #include "llvm/IR/ProfDataUtils.h" | ||||
69 | #include "llvm/IR/Statepoint.h" | ||||
70 | #include "llvm/IR/Type.h" | ||||
71 | #include "llvm/IR/Use.h" | ||||
72 | #include "llvm/IR/User.h" | ||||
73 | #include "llvm/IR/Value.h" | ||||
74 | #include "llvm/IR/ValueHandle.h" | ||||
75 | #include "llvm/IR/ValueMap.h" | ||||
76 | #include "llvm/InitializePasses.h" | ||||
77 | #include "llvm/Pass.h" | ||||
78 | #include "llvm/Support/BlockFrequency.h" | ||||
79 | #include "llvm/Support/BranchProbability.h" | ||||
80 | #include "llvm/Support/Casting.h" | ||||
81 | #include "llvm/Support/CommandLine.h" | ||||
82 | #include "llvm/Support/Compiler.h" | ||||
83 | #include "llvm/Support/Debug.h" | ||||
84 | #include "llvm/Support/ErrorHandling.h" | ||||
85 | #include "llvm/Support/MachineValueType.h" | ||||
86 | #include "llvm/Support/MathExtras.h" | ||||
87 | #include "llvm/Support/raw_ostream.h" | ||||
88 | #include "llvm/Target/TargetMachine.h" | ||||
89 | #include "llvm/Target/TargetOptions.h" | ||||
90 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" | ||||
91 | #include "llvm/Transforms/Utils/BypassSlowDivision.h" | ||||
92 | #include "llvm/Transforms/Utils/Local.h" | ||||
93 | #include "llvm/Transforms/Utils/SimplifyLibCalls.h" | ||||
94 | #include "llvm/Transforms/Utils/SizeOpts.h" | ||||
95 | #include <algorithm> | ||||
96 | #include <cassert> | ||||
97 | #include <cstdint> | ||||
98 | #include <iterator> | ||||
99 | #include <limits> | ||||
100 | #include <memory> | ||||
101 | #include <utility> | ||||
102 | #include <vector> | ||||
103 | |||||
104 | using namespace llvm; | ||||
105 | using namespace llvm::PatternMatch; | ||||
106 | |||||
107 | #define DEBUG_TYPE"codegenprepare" "codegenprepare" | ||||
108 | |||||
109 | STATISTIC(NumBlocksElim, "Number of blocks eliminated")static llvm::Statistic NumBlocksElim = {"codegenprepare", "NumBlocksElim" , "Number of blocks eliminated"}; | ||||
110 | STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated")static llvm::Statistic NumPHIsElim = {"codegenprepare", "NumPHIsElim" , "Number of trivial PHIs eliminated"}; | ||||
111 | STATISTIC(NumGEPsElim, "Number of GEPs converted to casts")static llvm::Statistic NumGEPsElim = {"codegenprepare", "NumGEPsElim" , "Number of GEPs converted to casts"}; | ||||
112 | STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "static llvm::Statistic NumCmpUses = {"codegenprepare", "NumCmpUses" , "Number of uses of Cmp expressions replaced with uses of " "sunken Cmps" } | ||||
113 | "sunken Cmps")static llvm::Statistic NumCmpUses = {"codegenprepare", "NumCmpUses" , "Number of uses of Cmp expressions replaced with uses of " "sunken Cmps" }; | ||||
114 | STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "static llvm::Statistic NumCastUses = {"codegenprepare", "NumCastUses" , "Number of uses of Cast expressions replaced with uses " "of sunken Casts" } | ||||
115 | "of sunken Casts")static llvm::Statistic NumCastUses = {"codegenprepare", "NumCastUses" , "Number of uses of Cast expressions replaced with uses " "of sunken Casts" }; | ||||
116 | STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "static llvm::Statistic NumMemoryInsts = {"codegenprepare", "NumMemoryInsts" , "Number of memory instructions whose address " "computations were sunk" } | ||||
117 | "computations were sunk")static llvm::Statistic NumMemoryInsts = {"codegenprepare", "NumMemoryInsts" , "Number of memory instructions whose address " "computations were sunk" }; | ||||
118 | STATISTIC(NumMemoryInstsPhiCreated,static llvm::Statistic NumMemoryInstsPhiCreated = {"codegenprepare" , "NumMemoryInstsPhiCreated", "Number of phis created when address " "computations were sunk to memory instructions"} | ||||
119 | "Number of phis created when address "static llvm::Statistic NumMemoryInstsPhiCreated = {"codegenprepare" , "NumMemoryInstsPhiCreated", "Number of phis created when address " "computations were sunk to memory instructions"} | ||||
120 | "computations were sunk to memory instructions")static llvm::Statistic NumMemoryInstsPhiCreated = {"codegenprepare" , "NumMemoryInstsPhiCreated", "Number of phis created when address " "computations were sunk to memory instructions"}; | ||||
121 | STATISTIC(NumMemoryInstsSelectCreated,static llvm::Statistic NumMemoryInstsSelectCreated = {"codegenprepare" , "NumMemoryInstsSelectCreated", "Number of select created when address " "computations were sunk to memory instructions"} | ||||
122 | "Number of select created when address "static llvm::Statistic NumMemoryInstsSelectCreated = {"codegenprepare" , "NumMemoryInstsSelectCreated", "Number of select created when address " "computations were sunk to memory instructions"} | ||||
123 | "computations were sunk to memory instructions")static llvm::Statistic NumMemoryInstsSelectCreated = {"codegenprepare" , "NumMemoryInstsSelectCreated", "Number of select created when address " "computations were sunk to memory instructions"}; | ||||
124 | STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads")static llvm::Statistic NumExtsMoved = {"codegenprepare", "NumExtsMoved" , "Number of [s|z]ext instructions combined with loads"}; | ||||
125 | STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized")static llvm::Statistic NumExtUses = {"codegenprepare", "NumExtUses" , "Number of uses of [s|z]ext instructions optimized"}; | ||||
126 | STATISTIC(NumAndsAdded,static llvm::Statistic NumAndsAdded = {"codegenprepare", "NumAndsAdded" , "Number of and mask instructions added to form ext loads"} | ||||
127 | "Number of and mask instructions added to form ext loads")static llvm::Statistic NumAndsAdded = {"codegenprepare", "NumAndsAdded" , "Number of and mask instructions added to form ext loads"}; | ||||
128 | STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized")static llvm::Statistic NumAndUses = {"codegenprepare", "NumAndUses" , "Number of uses of and mask instructions optimized"}; | ||||
129 | STATISTIC(NumRetsDup, "Number of return instructions duplicated")static llvm::Statistic NumRetsDup = {"codegenprepare", "NumRetsDup" , "Number of return instructions duplicated"}; | ||||
130 | STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved")static llvm::Statistic NumDbgValueMoved = {"codegenprepare", "NumDbgValueMoved" , "Number of debug value instructions moved"}; | ||||
131 | STATISTIC(NumSelectsExpanded, "Number of selects turned into branches")static llvm::Statistic NumSelectsExpanded = {"codegenprepare" , "NumSelectsExpanded", "Number of selects turned into branches" }; | ||||
132 | STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed")static llvm::Statistic NumStoreExtractExposed = {"codegenprepare" , "NumStoreExtractExposed", "Number of store(extractelement) exposed" }; | ||||
133 | |||||
134 | static cl::opt<bool> DisableBranchOpts( | ||||
135 | "disable-cgp-branch-opts", cl::Hidden, cl::init(false), | ||||
136 | cl::desc("Disable branch optimizations in CodeGenPrepare")); | ||||
137 | |||||
138 | static cl::opt<bool> | ||||
139 | DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false), | ||||
140 | cl::desc("Disable GC optimizations in CodeGenPrepare")); | ||||
141 | |||||
142 | static cl::opt<bool> | ||||
143 | DisableSelectToBranch("disable-cgp-select2branch", cl::Hidden, | ||||
144 | cl::init(false), | ||||
145 | cl::desc("Disable select to branch conversion.")); | ||||
146 | |||||
147 | static cl::opt<bool> | ||||
148 | AddrSinkUsingGEPs("addr-sink-using-gep", cl::Hidden, cl::init(true), | ||||
149 | cl::desc("Address sinking in CGP using GEPs.")); | ||||
150 | |||||
151 | static cl::opt<bool> | ||||
152 | EnableAndCmpSinking("enable-andcmp-sinking", cl::Hidden, cl::init(true), | ||||
153 | cl::desc("Enable sinkinig and/cmp into branches.")); | ||||
154 | |||||
155 | static cl::opt<bool> DisableStoreExtract( | ||||
156 | "disable-cgp-store-extract", cl::Hidden, cl::init(false), | ||||
157 | cl::desc("Disable store(extract) optimizations in CodeGenPrepare")); | ||||
158 | |||||
159 | static cl::opt<bool> StressStoreExtract( | ||||
160 | "stress-cgp-store-extract", cl::Hidden, cl::init(false), | ||||
161 | cl::desc("Stress test store(extract) optimizations in CodeGenPrepare")); | ||||
162 | |||||
163 | static cl::opt<bool> DisableExtLdPromotion( | ||||
164 | "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), | ||||
165 | cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " | ||||
166 | "CodeGenPrepare")); | ||||
167 | |||||
168 | static cl::opt<bool> StressExtLdPromotion( | ||||
169 | "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), | ||||
170 | cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " | ||||
171 | "optimization in CodeGenPrepare")); | ||||
172 | |||||
173 | static cl::opt<bool> DisablePreheaderProtect( | ||||
174 | "disable-preheader-prot", cl::Hidden, cl::init(false), | ||||
175 | cl::desc("Disable protection against removing loop preheaders")); | ||||
176 | |||||
177 | static cl::opt<bool> ProfileGuidedSectionPrefix( | ||||
178 | "profile-guided-section-prefix", cl::Hidden, cl::init(true), | ||||
179 | cl::desc("Use profile info to add section prefix for hot/cold functions")); | ||||
180 | |||||
181 | static cl::opt<bool> ProfileUnknownInSpecialSection( | ||||
182 | "profile-unknown-in-special-section", cl::Hidden, | ||||
183 | cl::desc("In profiling mode like sampleFDO, if a function doesn't have " | ||||
184 | "profile, we cannot tell the function is cold for sure because " | ||||
185 | "it may be a function newly added without ever being sampled. " | ||||
186 | "With the flag enabled, compiler can put such profile unknown " | ||||
187 | "functions into a special section, so runtime system can choose " | ||||
188 | "to handle it in a different way than .text section, to save " | ||||
189 | "RAM for example. ")); | ||||
190 | |||||
191 | static cl::opt<bool> BBSectionsGuidedSectionPrefix( | ||||
192 | "bbsections-guided-section-prefix", cl::Hidden, cl::init(true), | ||||
193 | cl::desc("Use the basic-block-sections profile to determine the text " | ||||
194 | "section prefix for hot functions. Functions with " | ||||
195 | "basic-block-sections profile will be placed in `.text.hot` " | ||||
196 | "regardless of their FDO profile info. Other functions won't be " | ||||
197 | "impacted, i.e., their prefixes will be decided by FDO/sampleFDO " | ||||
198 | "profiles.")); | ||||
199 | |||||
200 | static cl::opt<unsigned> FreqRatioToSkipMerge( | ||||
201 | "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2), | ||||
202 | cl::desc("Skip merging empty blocks if (frequency of empty block) / " | ||||
203 | "(frequency of destination block) is greater than this ratio")); | ||||
204 | |||||
205 | static cl::opt<bool> ForceSplitStore( | ||||
206 | "force-split-store", cl::Hidden, cl::init(false), | ||||
207 | cl::desc("Force store splitting no matter what the target query says.")); | ||||
208 | |||||
209 | static cl::opt<bool> EnableTypePromotionMerge( | ||||
210 | "cgp-type-promotion-merge", cl::Hidden, | ||||
211 | cl::desc("Enable merging of redundant sexts when one is dominating" | ||||
212 | " the other."), | ||||
213 | cl::init(true)); | ||||
214 | |||||
215 | static cl::opt<bool> DisableComplexAddrModes( | ||||
216 | "disable-complex-addr-modes", cl::Hidden, cl::init(false), | ||||
217 | cl::desc("Disables combining addressing modes with different parts " | ||||
218 | "in optimizeMemoryInst.")); | ||||
219 | |||||
220 | static cl::opt<bool> | ||||
221 | AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden, cl::init(false), | ||||
222 | cl::desc("Allow creation of Phis in Address sinking.")); | ||||
223 | |||||
224 | static cl::opt<bool> AddrSinkNewSelects( | ||||
225 | "addr-sink-new-select", cl::Hidden, cl::init(true), | ||||
226 | cl::desc("Allow creation of selects in Address sinking.")); | ||||
227 | |||||
228 | static cl::opt<bool> AddrSinkCombineBaseReg( | ||||
229 | "addr-sink-combine-base-reg", cl::Hidden, cl::init(true), | ||||
230 | cl::desc("Allow combining of BaseReg field in Address sinking.")); | ||||
231 | |||||
232 | static cl::opt<bool> AddrSinkCombineBaseGV( | ||||
233 | "addr-sink-combine-base-gv", cl::Hidden, cl::init(true), | ||||
234 | cl::desc("Allow combining of BaseGV field in Address sinking.")); | ||||
235 | |||||
236 | static cl::opt<bool> AddrSinkCombineBaseOffs( | ||||
237 | "addr-sink-combine-base-offs", cl::Hidden, cl::init(true), | ||||
238 | cl::desc("Allow combining of BaseOffs field in Address sinking.")); | ||||
239 | |||||
240 | static cl::opt<bool> AddrSinkCombineScaledReg( | ||||
241 | "addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true), | ||||
242 | cl::desc("Allow combining of ScaledReg field in Address sinking.")); | ||||
243 | |||||
244 | static cl::opt<bool> | ||||
245 | EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden, | ||||
246 | cl::init(true), | ||||
247 | cl::desc("Enable splitting large offset of GEP.")); | ||||
248 | |||||
249 | static cl::opt<bool> EnableICMP_EQToICMP_ST( | ||||
250 | "cgp-icmp-eq2icmp-st", cl::Hidden, cl::init(false), | ||||
251 | cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion.")); | ||||
252 | |||||
253 | static cl::opt<bool> | ||||
254 | VerifyBFIUpdates("cgp-verify-bfi-updates", cl::Hidden, cl::init(false), | ||||
255 | cl::desc("Enable BFI update verification for " | ||||
256 | "CodeGenPrepare.")); | ||||
257 | |||||
258 | static cl::opt<bool> | ||||
259 | OptimizePhiTypes("cgp-optimize-phi-types", cl::Hidden, cl::init(false), | ||||
260 | cl::desc("Enable converting phi types in CodeGenPrepare")); | ||||
261 | |||||
262 | namespace { | ||||
263 | |||||
264 | enum ExtType { | ||||
265 | ZeroExtension, // Zero extension has been seen. | ||||
266 | SignExtension, // Sign extension has been seen. | ||||
267 | BothExtension // This extension type is used if we saw sext after | ||||
268 | // ZeroExtension had been set, or if we saw zext after | ||||
269 | // SignExtension had been set. It makes the type | ||||
270 | // information of a promoted instruction invalid. | ||||
271 | }; | ||||
272 | |||||
273 | using SetOfInstrs = SmallPtrSet<Instruction *, 16>; | ||||
274 | using TypeIsSExt = PointerIntPair<Type *, 2, ExtType>; | ||||
275 | using InstrToOrigTy = DenseMap<Instruction *, TypeIsSExt>; | ||||
276 | using SExts = SmallVector<Instruction *, 16>; | ||||
277 | using ValueToSExts = DenseMap<Value *, SExts>; | ||||
278 | |||||
279 | class TypePromotionTransaction; | ||||
280 | |||||
281 | class CodeGenPrepare : public FunctionPass { | ||||
282 | const TargetMachine *TM = nullptr; | ||||
283 | const TargetSubtargetInfo *SubtargetInfo; | ||||
284 | const TargetLowering *TLI = nullptr; | ||||
285 | const TargetRegisterInfo *TRI; | ||||
286 | const TargetTransformInfo *TTI = nullptr; | ||||
287 | const BasicBlockSectionsProfileReader *BBSectionsProfileReader = nullptr; | ||||
288 | const TargetLibraryInfo *TLInfo; | ||||
289 | const LoopInfo *LI; | ||||
290 | std::unique_ptr<BlockFrequencyInfo> BFI; | ||||
291 | std::unique_ptr<BranchProbabilityInfo> BPI; | ||||
292 | ProfileSummaryInfo *PSI; | ||||
293 | |||||
294 | /// As we scan instructions optimizing them, this is the next instruction | ||||
295 | /// to optimize. Transforms that can invalidate this should update it. | ||||
296 | BasicBlock::iterator CurInstIterator; | ||||
297 | |||||
298 | /// Keeps track of non-local addresses that have been sunk into a block. | ||||
299 | /// This allows us to avoid inserting duplicate code for blocks with | ||||
300 | /// multiple load/stores of the same address. The usage of WeakTrackingVH | ||||
301 | /// enables SunkAddrs to be treated as a cache whose entries can be | ||||
302 | /// invalidated if a sunken address computation has been erased. | ||||
303 | ValueMap<Value *, WeakTrackingVH> SunkAddrs; | ||||
304 | |||||
305 | /// Keeps track of all instructions inserted for the current function. | ||||
306 | SetOfInstrs InsertedInsts; | ||||
307 | |||||
308 | /// Keeps track of the type of the related instruction before their | ||||
309 | /// promotion for the current function. | ||||
310 | InstrToOrigTy PromotedInsts; | ||||
311 | |||||
312 | /// Keep track of instructions removed during promotion. | ||||
313 | SetOfInstrs RemovedInsts; | ||||
314 | |||||
315 | /// Keep track of sext chains based on their initial value. | ||||
316 | DenseMap<Value *, Instruction *> SeenChainsForSExt; | ||||
317 | |||||
318 | /// Keep track of GEPs accessing the same data structures such as structs or | ||||
319 | /// arrays that are candidates to be split later because of their large | ||||
320 | /// size. | ||||
321 | MapVector<AssertingVH<Value>, | ||||
322 | SmallVector<std::pair<AssertingVH<GetElementPtrInst>, int64_t>, 32>> | ||||
323 | LargeOffsetGEPMap; | ||||
324 | |||||
325 | /// Keep track of new GEP base after splitting the GEPs having large offset. | ||||
326 | SmallSet<AssertingVH<Value>, 2> NewGEPBases; | ||||
327 | |||||
328 | /// Map serial numbers to Large offset GEPs. | ||||
329 | DenseMap<AssertingVH<GetElementPtrInst>, int> LargeOffsetGEPID; | ||||
330 | |||||
331 | /// Keep track of SExt promoted. | ||||
332 | ValueToSExts ValToSExtendedUses; | ||||
333 | |||||
334 | /// True if the function has the OptSize attribute. | ||||
335 | bool OptSize; | ||||
336 | |||||
337 | /// DataLayout for the Function being processed. | ||||
338 | const DataLayout *DL = nullptr; | ||||
339 | |||||
340 | /// Building the dominator tree can be expensive, so we only build it | ||||
341 | /// lazily and update it when required. | ||||
342 | std::unique_ptr<DominatorTree> DT; | ||||
343 | |||||
344 | public: | ||||
345 | static char ID; // Pass identification, replacement for typeid | ||||
346 | |||||
347 | CodeGenPrepare() : FunctionPass(ID) { | ||||
348 | initializeCodeGenPreparePass(*PassRegistry::getPassRegistry()); | ||||
349 | } | ||||
350 | |||||
351 | bool runOnFunction(Function &F) override; | ||||
352 | |||||
353 | StringRef getPassName() const override { return "CodeGen Prepare"; } | ||||
354 | |||||
355 | void getAnalysisUsage(AnalysisUsage &AU) const override { | ||||
356 | // FIXME: When we can selectively preserve passes, preserve the domtree. | ||||
357 | AU.addRequired<ProfileSummaryInfoWrapperPass>(); | ||||
358 | AU.addRequired<TargetLibraryInfoWrapperPass>(); | ||||
359 | AU.addRequired<TargetPassConfig>(); | ||||
360 | AU.addRequired<TargetTransformInfoWrapperPass>(); | ||||
361 | AU.addRequired<LoopInfoWrapperPass>(); | ||||
362 | AU.addUsedIfAvailable<BasicBlockSectionsProfileReader>(); | ||||
363 | } | ||||
364 | |||||
365 | private: | ||||
366 | template <typename F> | ||||
367 | void resetIteratorIfInvalidatedWhileCalling(BasicBlock *BB, F f) { | ||||
368 | // Substituting can cause recursive simplifications, which can invalidate | ||||
369 | // our iterator. Use a WeakTrackingVH to hold onto it in case this | ||||
370 | // happens. | ||||
371 | Value *CurValue = &*CurInstIterator; | ||||
372 | WeakTrackingVH IterHandle(CurValue); | ||||
373 | |||||
374 | f(); | ||||
375 | |||||
376 | // If the iterator instruction was recursively deleted, start over at the | ||||
377 | // start of the block. | ||||
378 | if (IterHandle != CurValue) { | ||||
379 | CurInstIterator = BB->begin(); | ||||
380 | SunkAddrs.clear(); | ||||
381 | } | ||||
382 | } | ||||
383 | |||||
384 | // Get the DominatorTree, building if necessary. | ||||
385 | DominatorTree &getDT(Function &F) { | ||||
386 | if (!DT) | ||||
387 | DT = std::make_unique<DominatorTree>(F); | ||||
388 | return *DT; | ||||
389 | } | ||||
390 | |||||
391 | void removeAllAssertingVHReferences(Value *V); | ||||
392 | bool eliminateAssumptions(Function &F); | ||||
393 | bool eliminateFallThrough(Function &F); | ||||
394 | bool eliminateMostlyEmptyBlocks(Function &F); | ||||
395 | BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB); | ||||
396 | bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; | ||||
397 | void eliminateMostlyEmptyBlock(BasicBlock *BB); | ||||
398 | bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB, | ||||
399 | bool isPreheader); | ||||
400 | bool makeBitReverse(Instruction &I); | ||||
401 | bool optimizeBlock(BasicBlock &BB, bool &ModifiedDT); | ||||
402 | bool optimizeInst(Instruction *I, bool &ModifiedDT); | ||||
403 | bool optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, Type *AccessTy, | ||||
404 | unsigned AddrSpace); | ||||
405 | bool optimizeGatherScatterInst(Instruction *MemoryInst, Value *Ptr); | ||||
406 | bool optimizeInlineAsmInst(CallInst *CS); | ||||
407 | bool optimizeCallInst(CallInst *CI, bool &ModifiedDT); | ||||
408 | bool optimizeExt(Instruction *&I); | ||||
409 | bool optimizeExtUses(Instruction *I); | ||||
410 | bool optimizeLoadExt(LoadInst *Load); | ||||
411 | bool optimizeShiftInst(BinaryOperator *BO); | ||||
412 | bool optimizeFunnelShift(IntrinsicInst *Fsh); | ||||
413 | bool optimizeSelectInst(SelectInst *SI); | ||||
414 | bool optimizeShuffleVectorInst(ShuffleVectorInst *SVI); | ||||
415 | bool optimizeSwitchType(SwitchInst *SI); | ||||
416 | bool optimizeSwitchPhiConstants(SwitchInst *SI); | ||||
417 | bool optimizeSwitchInst(SwitchInst *SI); | ||||
418 | bool optimizeExtractElementInst(Instruction *Inst); | ||||
419 | bool dupRetToEnableTailCallOpts(BasicBlock *BB, bool &ModifiedDT); | ||||
420 | bool fixupDbgValue(Instruction *I); | ||||
421 | bool placeDbgValues(Function &F); | ||||
422 | bool placePseudoProbes(Function &F); | ||||
423 | bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts, | ||||
424 | LoadInst *&LI, Instruction *&Inst, bool HasPromoted); | ||||
425 | bool tryToPromoteExts(TypePromotionTransaction &TPT, | ||||
426 | const SmallVectorImpl<Instruction *> &Exts, | ||||
427 | SmallVectorImpl<Instruction *> &ProfitablyMovedExts, | ||||
428 | unsigned CreatedInstsCost = 0); | ||||
429 | bool mergeSExts(Function &F); | ||||
430 | bool splitLargeGEPOffsets(); | ||||
431 | bool optimizePhiType(PHINode *Inst, SmallPtrSetImpl<PHINode *> &Visited, | ||||
432 | SmallPtrSetImpl<Instruction *> &DeletedInstrs); | ||||
433 | bool optimizePhiTypes(Function &F); | ||||
434 | bool performAddressTypePromotion( | ||||
435 | Instruction *&Inst, bool AllowPromotionWithoutCommonHeader, | ||||
436 | bool HasPromoted, TypePromotionTransaction &TPT, | ||||
437 | SmallVectorImpl<Instruction *> &SpeculativelyMovedExts); | ||||
438 | bool splitBranchCondition(Function &F, bool &ModifiedDT); | ||||
439 | bool simplifyOffsetableRelocate(GCStatepointInst &I); | ||||
440 | |||||
441 | bool tryToSinkFreeOperands(Instruction *I); | ||||
442 | bool replaceMathCmpWithIntrinsic(BinaryOperator *BO, Value *Arg0, Value *Arg1, | ||||
443 | CmpInst *Cmp, Intrinsic::ID IID); | ||||
444 | bool optimizeCmp(CmpInst *Cmp, bool &ModifiedDT); | ||||
445 | bool combineToUSubWithOverflow(CmpInst *Cmp, bool &ModifiedDT); | ||||
446 | bool combineToUAddWithOverflow(CmpInst *Cmp, bool &ModifiedDT); | ||||
447 | void verifyBFIUpdates(Function &F); | ||||
448 | }; | ||||
449 | |||||
450 | } // end anonymous namespace | ||||
451 | |||||
452 | char CodeGenPrepare::ID = 0; | ||||
453 | |||||
454 | INITIALIZE_PASS_BEGIN(CodeGenPrepare, DEBUG_TYPE,static void *initializeCodeGenPreparePassOnce(PassRegistry & Registry) { | ||||
455 | "Optimize for code generation", false, false)static void *initializeCodeGenPreparePassOnce(PassRegistry & Registry) { | ||||
456 | INITIALIZE_PASS_DEPENDENCY(BasicBlockSectionsProfileReader)initializeBasicBlockSectionsProfileReaderPass(Registry); | ||||
457 | INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)initializeLoopInfoWrapperPassPass(Registry); | ||||
458 | INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)initializeProfileSummaryInfoWrapperPassPass(Registry); | ||||
459 | INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry); | ||||
460 | INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)initializeTargetPassConfigPass(Registry); | ||||
461 | INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)initializeTargetTransformInfoWrapperPassPass(Registry); | ||||
462 | INITIALIZE_PASS_END(CodeGenPrepare, DEBUG_TYPE, "Optimize for code generation",PassInfo *PI = new PassInfo( "Optimize for code generation", "codegenprepare" , &CodeGenPrepare::ID, PassInfo::NormalCtor_t(callDefaultCtor <CodeGenPrepare>), false, false); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeCodeGenPreparePassFlag ; void llvm::initializeCodeGenPreparePass(PassRegistry &Registry ) { llvm::call_once(InitializeCodeGenPreparePassFlag, initializeCodeGenPreparePassOnce , std::ref(Registry)); } | ||||
463 | false, false)PassInfo *PI = new PassInfo( "Optimize for code generation", "codegenprepare" , &CodeGenPrepare::ID, PassInfo::NormalCtor_t(callDefaultCtor <CodeGenPrepare>), false, false); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeCodeGenPreparePassFlag ; void llvm::initializeCodeGenPreparePass(PassRegistry &Registry ) { llvm::call_once(InitializeCodeGenPreparePassFlag, initializeCodeGenPreparePassOnce , std::ref(Registry)); } | ||||
464 | |||||
465 | FunctionPass *llvm::createCodeGenPreparePass() { return new CodeGenPrepare(); } | ||||
466 | |||||
467 | bool CodeGenPrepare::runOnFunction(Function &F) { | ||||
468 | if (skipFunction(F)) | ||||
469 | return false; | ||||
470 | |||||
471 | DL = &F.getParent()->getDataLayout(); | ||||
472 | |||||
473 | bool EverMadeChange = false; | ||||
474 | // Clear per function information. | ||||
475 | InsertedInsts.clear(); | ||||
476 | PromotedInsts.clear(); | ||||
477 | |||||
478 | TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>(); | ||||
479 | SubtargetInfo = TM->getSubtargetImpl(F); | ||||
480 | TLI = SubtargetInfo->getTargetLowering(); | ||||
481 | TRI = SubtargetInfo->getRegisterInfo(); | ||||
482 | TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); | ||||
483 | TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); | ||||
484 | LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); | ||||
485 | BPI.reset(new BranchProbabilityInfo(F, *LI)); | ||||
486 | BFI.reset(new BlockFrequencyInfo(F, *BPI, *LI)); | ||||
487 | PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); | ||||
488 | BBSectionsProfileReader = | ||||
489 | getAnalysisIfAvailable<BasicBlockSectionsProfileReader>(); | ||||
490 | OptSize = F.hasOptSize(); | ||||
491 | // Use the basic-block-sections profile to promote hot functions to .text.hot | ||||
492 | // if requested. | ||||
493 | if (BBSectionsGuidedSectionPrefix && BBSectionsProfileReader && | ||||
494 | BBSectionsProfileReader->isFunctionHot(F.getName())) { | ||||
495 | F.setSectionPrefix("hot"); | ||||
496 | } else if (ProfileGuidedSectionPrefix) { | ||||
497 | // The hot attribute overwrites profile count based hotness while profile | ||||
498 | // counts based hotness overwrite the cold attribute. | ||||
499 | // This is a conservative behabvior. | ||||
500 | if (F.hasFnAttribute(Attribute::Hot) || | ||||
501 | PSI->isFunctionHotInCallGraph(&F, *BFI)) | ||||
502 | F.setSectionPrefix("hot"); | ||||
503 | // If PSI shows this function is not hot, we will placed the function | ||||
504 | // into unlikely section if (1) PSI shows this is a cold function, or | ||||
505 | // (2) the function has a attribute of cold. | ||||
506 | else if (PSI->isFunctionColdInCallGraph(&F, *BFI) || | ||||
507 | F.hasFnAttribute(Attribute::Cold)) | ||||
508 | F.setSectionPrefix("unlikely"); | ||||
509 | else if (ProfileUnknownInSpecialSection && PSI->hasPartialSampleProfile() && | ||||
510 | PSI->isFunctionHotnessUnknown(F)) | ||||
511 | F.setSectionPrefix("unknown"); | ||||
512 | } | ||||
513 | |||||
514 | /// This optimization identifies DIV instructions that can be | ||||
515 | /// profitably bypassed and carried out with a shorter, faster divide. | ||||
516 | if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI->isSlowDivBypassed()) { | ||||
517 | const DenseMap<unsigned int, unsigned int> &BypassWidths = | ||||
518 | TLI->getBypassSlowDivWidths(); | ||||
519 | BasicBlock *BB = &*F.begin(); | ||||
520 | while (BB != nullptr) { | ||||
521 | // bypassSlowDivision may create new BBs, but we don't want to reapply the | ||||
522 | // optimization to those blocks. | ||||
523 | BasicBlock *Next = BB->getNextNode(); | ||||
524 | // F.hasOptSize is already checked in the outer if statement. | ||||
525 | if (!llvm::shouldOptimizeForSize(BB, PSI, BFI.get())) | ||||
526 | EverMadeChange |= bypassSlowDivision(BB, BypassWidths); | ||||
527 | BB = Next; | ||||
528 | } | ||||
529 | } | ||||
530 | |||||
531 | // Get rid of @llvm.assume builtins before attempting to eliminate empty | ||||
532 | // blocks, since there might be blocks that only contain @llvm.assume calls | ||||
533 | // (plus arguments that we can get rid of). | ||||
534 | EverMadeChange |= eliminateAssumptions(F); | ||||
535 | |||||
536 | // Eliminate blocks that contain only PHI nodes and an | ||||
537 | // unconditional branch. | ||||
538 | EverMadeChange |= eliminateMostlyEmptyBlocks(F); | ||||
539 | |||||
540 | bool ModifiedDT = false; | ||||
541 | if (!DisableBranchOpts) | ||||
542 | EverMadeChange |= splitBranchCondition(F, ModifiedDT); | ||||
543 | |||||
544 | // Split some critical edges where one of the sources is an indirect branch, | ||||
545 | // to help generate sane code for PHIs involving such edges. | ||||
546 | EverMadeChange |= | ||||
547 | SplitIndirectBrCriticalEdges(F, /*IgnoreBlocksWithoutPHI=*/true); | ||||
548 | |||||
549 | bool MadeChange = true; | ||||
550 | while (MadeChange) { | ||||
551 | MadeChange = false; | ||||
552 | DT.reset(); | ||||
553 | for (BasicBlock &BB : llvm::make_early_inc_range(F)) { | ||||
554 | bool ModifiedDTOnIteration = false; | ||||
555 | MadeChange |= optimizeBlock(BB, ModifiedDTOnIteration); | ||||
556 | |||||
557 | // Restart BB iteration if the dominator tree of the Function was changed | ||||
558 | if (ModifiedDTOnIteration) | ||||
559 | break; | ||||
560 | } | ||||
561 | if (EnableTypePromotionMerge && !ValToSExtendedUses.empty()) | ||||
562 | MadeChange |= mergeSExts(F); | ||||
563 | if (!LargeOffsetGEPMap.empty()) | ||||
564 | MadeChange |= splitLargeGEPOffsets(); | ||||
565 | MadeChange |= optimizePhiTypes(F); | ||||
566 | |||||
567 | if (MadeChange) | ||||
568 | eliminateFallThrough(F); | ||||
569 | |||||
570 | // Really free removed instructions during promotion. | ||||
571 | for (Instruction *I : RemovedInsts) | ||||
572 | I->deleteValue(); | ||||
573 | |||||
574 | EverMadeChange |= MadeChange; | ||||
575 | SeenChainsForSExt.clear(); | ||||
576 | ValToSExtendedUses.clear(); | ||||
577 | RemovedInsts.clear(); | ||||
578 | LargeOffsetGEPMap.clear(); | ||||
579 | LargeOffsetGEPID.clear(); | ||||
580 | } | ||||
581 | |||||
582 | NewGEPBases.clear(); | ||||
583 | SunkAddrs.clear(); | ||||
584 | |||||
585 | if (!DisableBranchOpts) { | ||||
586 | MadeChange = false; | ||||
587 | // Use a set vector to get deterministic iteration order. The order the | ||||
588 | // blocks are removed may affect whether or not PHI nodes in successors | ||||
589 | // are removed. | ||||
590 | SmallSetVector<BasicBlock *, 8> WorkList; | ||||
591 | for (BasicBlock &BB : F) { | ||||
592 | SmallVector<BasicBlock *, 2> Successors(successors(&BB)); | ||||
593 | MadeChange |= ConstantFoldTerminator(&BB, true); | ||||
594 | if (!MadeChange) | ||||
595 | continue; | ||||
596 | |||||
597 | for (BasicBlock *Succ : Successors) | ||||
598 | if (pred_empty(Succ)) | ||||
599 | WorkList.insert(Succ); | ||||
600 | } | ||||
601 | |||||
602 | // Delete the dead blocks and any of their dead successors. | ||||
603 | MadeChange |= !WorkList.empty(); | ||||
604 | while (!WorkList.empty()) { | ||||
605 | BasicBlock *BB = WorkList.pop_back_val(); | ||||
606 | SmallVector<BasicBlock *, 2> Successors(successors(BB)); | ||||
607 | |||||
608 | DeleteDeadBlock(BB); | ||||
609 | |||||
610 | for (BasicBlock *Succ : Successors) | ||||
611 | if (pred_empty(Succ)) | ||||
612 | WorkList.insert(Succ); | ||||
613 | } | ||||
614 | |||||
615 | // Merge pairs of basic blocks with unconditional branches, connected by | ||||
616 | // a single edge. | ||||
617 | if (EverMadeChange || MadeChange) | ||||
618 | MadeChange |= eliminateFallThrough(F); | ||||
619 | |||||
620 | EverMadeChange |= MadeChange; | ||||
621 | } | ||||
622 | |||||
623 | if (!DisableGCOpts) { | ||||
624 | SmallVector<GCStatepointInst *, 2> Statepoints; | ||||
625 | for (BasicBlock &BB : F) | ||||
626 | for (Instruction &I : BB) | ||||
627 | if (auto *SP = dyn_cast<GCStatepointInst>(&I)) | ||||
628 | Statepoints.push_back(SP); | ||||
629 | for (auto &I : Statepoints) | ||||
630 | EverMadeChange |= simplifyOffsetableRelocate(*I); | ||||
631 | } | ||||
632 | |||||
633 | // Do this last to clean up use-before-def scenarios introduced by other | ||||
634 | // preparatory transforms. | ||||
635 | EverMadeChange |= placeDbgValues(F); | ||||
636 | EverMadeChange |= placePseudoProbes(F); | ||||
637 | |||||
638 | #ifndef NDEBUG | ||||
639 | if (VerifyBFIUpdates) | ||||
640 | verifyBFIUpdates(F); | ||||
641 | #endif | ||||
642 | |||||
643 | return EverMadeChange; | ||||
644 | } | ||||
645 | |||||
646 | bool CodeGenPrepare::eliminateAssumptions(Function &F) { | ||||
647 | bool MadeChange = false; | ||||
648 | for (BasicBlock &BB : F) { | ||||
649 | CurInstIterator = BB.begin(); | ||||
650 | while (CurInstIterator != BB.end()) { | ||||
651 | Instruction *I = &*(CurInstIterator++); | ||||
652 | if (auto *Assume = dyn_cast<AssumeInst>(I)) { | ||||
653 | MadeChange = true; | ||||
654 | Value *Operand = Assume->getOperand(0); | ||||
655 | Assume->eraseFromParent(); | ||||
656 | |||||
657 | resetIteratorIfInvalidatedWhileCalling(&BB, [&]() { | ||||
658 | RecursivelyDeleteTriviallyDeadInstructions(Operand, TLInfo, nullptr); | ||||
659 | }); | ||||
660 | } | ||||
661 | } | ||||
662 | } | ||||
663 | return MadeChange; | ||||
664 | } | ||||
665 | |||||
666 | /// An instruction is about to be deleted, so remove all references to it in our | ||||
667 | /// GEP-tracking data strcutures. | ||||
668 | void CodeGenPrepare::removeAllAssertingVHReferences(Value *V) { | ||||
669 | LargeOffsetGEPMap.erase(V); | ||||
670 | NewGEPBases.erase(V); | ||||
671 | |||||
672 | auto GEP = dyn_cast<GetElementPtrInst>(V); | ||||
673 | if (!GEP) | ||||
674 | return; | ||||
675 | |||||
676 | LargeOffsetGEPID.erase(GEP); | ||||
677 | |||||
678 | auto VecI = LargeOffsetGEPMap.find(GEP->getPointerOperand()); | ||||
679 | if (VecI == LargeOffsetGEPMap.end()) | ||||
680 | return; | ||||
681 | |||||
682 | auto &GEPVector = VecI->second; | ||||
683 | llvm::erase_if(GEPVector, [=](auto &Elt) { return Elt.first == GEP; }); | ||||
684 | |||||
685 | if (GEPVector.empty()) | ||||
686 | LargeOffsetGEPMap.erase(VecI); | ||||
687 | } | ||||
688 | |||||
689 | // Verify BFI has been updated correctly by recomputing BFI and comparing them. | ||||
690 | void LLVM_ATTRIBUTE_UNUSED__attribute__((__unused__)) CodeGenPrepare::verifyBFIUpdates(Function &F) { | ||||
691 | DominatorTree NewDT(F); | ||||
692 | LoopInfo NewLI(NewDT); | ||||
693 | BranchProbabilityInfo NewBPI(F, NewLI, TLInfo); | ||||
694 | BlockFrequencyInfo NewBFI(F, NewBPI, NewLI); | ||||
695 | NewBFI.verifyMatch(*BFI); | ||||
696 | } | ||||
697 | |||||
698 | /// Merge basic blocks which are connected by a single edge, where one of the | ||||
699 | /// basic blocks has a single successor pointing to the other basic block, | ||||
700 | /// which has a single predecessor. | ||||
701 | bool CodeGenPrepare::eliminateFallThrough(Function &F) { | ||||
702 | bool Changed = false; | ||||
703 | // Scan all of the blocks in the function, except for the entry block. | ||||
704 | // Use a temporary array to avoid iterator being invalidated when | ||||
705 | // deleting blocks. | ||||
706 | SmallVector<WeakTrackingVH, 16> Blocks; | ||||
707 | for (auto &Block : llvm::drop_begin(F)) | ||||
708 | Blocks.push_back(&Block); | ||||
709 | |||||
710 | SmallSet<WeakTrackingVH, 16> Preds; | ||||
711 | for (auto &Block : Blocks) { | ||||
712 | auto *BB = cast_or_null<BasicBlock>(Block); | ||||
713 | if (!BB) | ||||
714 | continue; | ||||
715 | // If the destination block has a single pred, then this is a trivial | ||||
716 | // edge, just collapse it. | ||||
717 | BasicBlock *SinglePred = BB->getSinglePredecessor(); | ||||
718 | |||||
719 | // Don't merge if BB's address is taken. | ||||
720 | if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) | ||||
721 | continue; | ||||
722 | |||||
723 | BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator()); | ||||
724 | if (Term && !Term->isConditional()) { | ||||
725 | Changed = true; | ||||
726 | LLVM_DEBUG(dbgs() << "To merge:\n" << *BB << "\n\n\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "To merge:\n" << * BB << "\n\n\n"; } } while (false); | ||||
727 | |||||
728 | // Merge BB into SinglePred and delete it. | ||||
729 | MergeBlockIntoPredecessor(BB); | ||||
730 | Preds.insert(SinglePred); | ||||
731 | } | ||||
732 | } | ||||
733 | |||||
734 | // (Repeatedly) merging blocks into their predecessors can create redundant | ||||
735 | // debug intrinsics. | ||||
736 | for (const auto &Pred : Preds) | ||||
737 | if (auto *BB = cast_or_null<BasicBlock>(Pred)) | ||||
738 | RemoveRedundantDbgInstrs(BB); | ||||
739 | |||||
740 | return Changed; | ||||
741 | } | ||||
742 | |||||
743 | /// Find a destination block from BB if BB is mergeable empty block. | ||||
744 | BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) { | ||||
745 | // If this block doesn't end with an uncond branch, ignore it. | ||||
746 | BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); | ||||
747 | if (!BI || !BI->isUnconditional()) | ||||
748 | return nullptr; | ||||
749 | |||||
750 | // If the instruction before the branch (skipping debug info) isn't a phi | ||||
751 | // node, then other stuff is happening here. | ||||
752 | BasicBlock::iterator BBI = BI->getIterator(); | ||||
753 | if (BBI != BB->begin()) { | ||||
754 | --BBI; | ||||
755 | while (isa<DbgInfoIntrinsic>(BBI)) { | ||||
756 | if (BBI == BB->begin()) | ||||
757 | break; | ||||
758 | --BBI; | ||||
759 | } | ||||
760 | if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) | ||||
761 | return nullptr; | ||||
762 | } | ||||
763 | |||||
764 | // Do not break infinite loops. | ||||
765 | BasicBlock *DestBB = BI->getSuccessor(0); | ||||
766 | if (DestBB == BB) | ||||
767 | return nullptr; | ||||
768 | |||||
769 | if (!canMergeBlocks(BB, DestBB)) | ||||
770 | DestBB = nullptr; | ||||
771 | |||||
772 | return DestBB; | ||||
773 | } | ||||
774 | |||||
775 | /// Eliminate blocks that contain only PHI nodes, debug info directives, and an | ||||
776 | /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split | ||||
777 | /// edges in ways that are non-optimal for isel. Start by eliminating these | ||||
778 | /// blocks so we can split them the way we want them. | ||||
779 | bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) { | ||||
780 | SmallPtrSet<BasicBlock *, 16> Preheaders; | ||||
781 | SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end()); | ||||
782 | while (!LoopList.empty()) { | ||||
783 | Loop *L = LoopList.pop_back_val(); | ||||
784 | llvm::append_range(LoopList, *L); | ||||
785 | if (BasicBlock *Preheader = L->getLoopPreheader()) | ||||
786 | Preheaders.insert(Preheader); | ||||
787 | } | ||||
788 | |||||
789 | bool MadeChange = false; | ||||
790 | // Copy blocks into a temporary array to avoid iterator invalidation issues | ||||
791 | // as we remove them. | ||||
792 | // Note that this intentionally skips the entry block. | ||||
793 | SmallVector<WeakTrackingVH, 16> Blocks; | ||||
794 | for (auto &Block : llvm::drop_begin(F)) | ||||
795 | Blocks.push_back(&Block); | ||||
796 | |||||
797 | for (auto &Block : Blocks) { | ||||
798 | BasicBlock *BB = cast_or_null<BasicBlock>(Block); | ||||
799 | if (!BB) | ||||
800 | continue; | ||||
801 | BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB); | ||||
802 | if (!DestBB || | ||||
803 | !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB))) | ||||
804 | continue; | ||||
805 | |||||
806 | eliminateMostlyEmptyBlock(BB); | ||||
807 | MadeChange = true; | ||||
808 | } | ||||
809 | return MadeChange; | ||||
810 | } | ||||
811 | |||||
812 | bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB, | ||||
813 | BasicBlock *DestBB, | ||||
814 | bool isPreheader) { | ||||
815 | // Do not delete loop preheaders if doing so would create a critical edge. | ||||
816 | // Loop preheaders can be good locations to spill registers. If the | ||||
817 | // preheader is deleted and we create a critical edge, registers may be | ||||
818 | // spilled in the loop body instead. | ||||
819 | if (!DisablePreheaderProtect && isPreheader && | ||||
820 | !(BB->getSinglePredecessor() && | ||||
821 | BB->getSinglePredecessor()->getSingleSuccessor())) | ||||
822 | return false; | ||||
823 | |||||
824 | // Skip merging if the block's successor is also a successor to any callbr | ||||
825 | // that leads to this block. | ||||
826 | // FIXME: Is this really needed? Is this a correctness issue? | ||||
827 | for (BasicBlock *Pred : predecessors(BB)) { | ||||
828 | if (auto *CBI = dyn_cast<CallBrInst>((Pred)->getTerminator())) | ||||
829 | for (unsigned i = 0, e = CBI->getNumSuccessors(); i != e; ++i) | ||||
830 | if (DestBB == CBI->getSuccessor(i)) | ||||
831 | return false; | ||||
832 | } | ||||
833 | |||||
834 | // Try to skip merging if the unique predecessor of BB is terminated by a | ||||
835 | // switch or indirect branch instruction, and BB is used as an incoming block | ||||
836 | // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to | ||||
837 | // add COPY instructions in the predecessor of BB instead of BB (if it is not | ||||
838 | // merged). Note that the critical edge created by merging such blocks wont be | ||||
839 | // split in MachineSink because the jump table is not analyzable. By keeping | ||||
840 | // such empty block (BB), ISel will place COPY instructions in BB, not in the | ||||
841 | // predecessor of BB. | ||||
842 | BasicBlock *Pred = BB->getUniquePredecessor(); | ||||
843 | if (!Pred || !(isa<SwitchInst>(Pred->getTerminator()) || | ||||
844 | isa<IndirectBrInst>(Pred->getTerminator()))) | ||||
845 | return true; | ||||
846 | |||||
847 | if (BB->getTerminator() != BB->getFirstNonPHIOrDbg()) | ||||
848 | return true; | ||||
849 | |||||
850 | // We use a simple cost heuristic which determine skipping merging is | ||||
851 | // profitable if the cost of skipping merging is less than the cost of | ||||
852 | // merging : Cost(skipping merging) < Cost(merging BB), where the | ||||
853 | // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and | ||||
854 | // the Cost(merging BB) is Freq(Pred) * Cost(Copy). | ||||
855 | // Assuming Cost(Copy) == Cost(Branch), we could simplify it to : | ||||
856 | // Freq(Pred) / Freq(BB) > 2. | ||||
857 | // Note that if there are multiple empty blocks sharing the same incoming | ||||
858 | // value for the PHIs in the DestBB, we consider them together. In such | ||||
859 | // case, Cost(merging BB) will be the sum of their frequencies. | ||||
860 | |||||
861 | if (!isa<PHINode>(DestBB->begin())) | ||||
862 | return true; | ||||
863 | |||||
864 | SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs; | ||||
865 | |||||
866 | // Find all other incoming blocks from which incoming values of all PHIs in | ||||
867 | // DestBB are the same as the ones from BB. | ||||
868 | for (BasicBlock *DestBBPred : predecessors(DestBB)) { | ||||
869 | if (DestBBPred == BB) | ||||
870 | continue; | ||||
871 | |||||
872 | if (llvm::all_of(DestBB->phis(), [&](const PHINode &DestPN) { | ||||
873 | return DestPN.getIncomingValueForBlock(BB) == | ||||
874 | DestPN.getIncomingValueForBlock(DestBBPred); | ||||
875 | })) | ||||
876 | SameIncomingValueBBs.insert(DestBBPred); | ||||
877 | } | ||||
878 | |||||
879 | // See if all BB's incoming values are same as the value from Pred. In this | ||||
880 | // case, no reason to skip merging because COPYs are expected to be place in | ||||
881 | // Pred already. | ||||
882 | if (SameIncomingValueBBs.count(Pred)) | ||||
883 | return true; | ||||
884 | |||||
885 | BlockFrequency PredFreq = BFI->getBlockFreq(Pred); | ||||
886 | BlockFrequency BBFreq = BFI->getBlockFreq(BB); | ||||
887 | |||||
888 | for (auto *SameValueBB : SameIncomingValueBBs) | ||||
889 | if (SameValueBB->getUniquePredecessor() == Pred && | ||||
890 | DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB)) | ||||
891 | BBFreq += BFI->getBlockFreq(SameValueBB); | ||||
892 | |||||
893 | return PredFreq.getFrequency() <= | ||||
894 | BBFreq.getFrequency() * FreqRatioToSkipMerge; | ||||
895 | } | ||||
896 | |||||
897 | /// Return true if we can merge BB into DestBB if there is a single | ||||
898 | /// unconditional branch between them, and BB contains no other non-phi | ||||
899 | /// instructions. | ||||
900 | bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB, | ||||
901 | const BasicBlock *DestBB) const { | ||||
902 | // We only want to eliminate blocks whose phi nodes are used by phi nodes in | ||||
903 | // the successor. If there are more complex condition (e.g. preheaders), | ||||
904 | // don't mess around with them. | ||||
905 | for (const PHINode &PN : BB->phis()) { | ||||
906 | for (const User *U : PN.users()) { | ||||
907 | const Instruction *UI = cast<Instruction>(U); | ||||
908 | if (UI->getParent() != DestBB || !isa<PHINode>(UI)) | ||||
909 | return false; | ||||
910 | // If User is inside DestBB block and it is a PHINode then check | ||||
911 | // incoming value. If incoming value is not from BB then this is | ||||
912 | // a complex condition (e.g. preheaders) we want to avoid here. | ||||
913 | if (UI->getParent() == DestBB) { | ||||
914 | if (const PHINode *UPN = dyn_cast<PHINode>(UI)) | ||||
915 | for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { | ||||
916 | Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); | ||||
917 | if (Insn && Insn->getParent() == BB && | ||||
918 | Insn->getParent() != UPN->getIncomingBlock(I)) | ||||
919 | return false; | ||||
920 | } | ||||
921 | } | ||||
922 | } | ||||
923 | } | ||||
924 | |||||
925 | // If BB and DestBB contain any common predecessors, then the phi nodes in BB | ||||
926 | // and DestBB may have conflicting incoming values for the block. If so, we | ||||
927 | // can't merge the block. | ||||
928 | const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); | ||||
929 | if (!DestBBPN) | ||||
930 | return true; // no conflict. | ||||
931 | |||||
932 | // Collect the preds of BB. | ||||
933 | SmallPtrSet<const BasicBlock *, 16> BBPreds; | ||||
934 | if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { | ||||
935 | // It is faster to get preds from a PHI than with pred_iterator. | ||||
936 | for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) | ||||
937 | BBPreds.insert(BBPN->getIncomingBlock(i)); | ||||
938 | } else { | ||||
939 | BBPreds.insert(pred_begin(BB), pred_end(BB)); | ||||
940 | } | ||||
941 | |||||
942 | // Walk the preds of DestBB. | ||||
943 | for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { | ||||
944 | BasicBlock *Pred = DestBBPN->getIncomingBlock(i); | ||||
945 | if (BBPreds.count(Pred)) { // Common predecessor? | ||||
946 | for (const PHINode &PN : DestBB->phis()) { | ||||
947 | const Value *V1 = PN.getIncomingValueForBlock(Pred); | ||||
948 | const Value *V2 = PN.getIncomingValueForBlock(BB); | ||||
949 | |||||
950 | // If V2 is a phi node in BB, look up what the mapped value will be. | ||||
951 | if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) | ||||
952 | if (V2PN->getParent() == BB) | ||||
953 | V2 = V2PN->getIncomingValueForBlock(Pred); | ||||
954 | |||||
955 | // If there is a conflict, bail out. | ||||
956 | if (V1 != V2) | ||||
957 | return false; | ||||
958 | } | ||||
959 | } | ||||
960 | } | ||||
961 | |||||
962 | return true; | ||||
963 | } | ||||
964 | |||||
965 | /// Eliminate a basic block that has only phi's and an unconditional branch in | ||||
966 | /// it. | ||||
967 | void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) { | ||||
968 | BranchInst *BI = cast<BranchInst>(BB->getTerminator()); | ||||
969 | BasicBlock *DestBB = BI->getSuccessor(0); | ||||
970 | |||||
971 | LLVM_DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB; } } while (false) | ||||
972 | << *BB << *DestBB)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB; } } while (false); | ||||
973 | |||||
974 | // If the destination block has a single pred, then this is a trivial edge, | ||||
975 | // just collapse it. | ||||
976 | if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { | ||||
977 | if (SinglePred != DestBB) { | ||||
978 | assert(SinglePred == BB &&(static_cast <bool> (SinglePred == BB && "Single predecessor not the same as predecessor" ) ? void (0) : __assert_fail ("SinglePred == BB && \"Single predecessor not the same as predecessor\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 979, __extension__ __PRETTY_FUNCTION__ )) | ||||
979 | "Single predecessor not the same as predecessor")(static_cast <bool> (SinglePred == BB && "Single predecessor not the same as predecessor" ) ? void (0) : __assert_fail ("SinglePred == BB && \"Single predecessor not the same as predecessor\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 979, __extension__ __PRETTY_FUNCTION__ )); | ||||
980 | // Merge DestBB into SinglePred/BB and delete it. | ||||
981 | MergeBlockIntoPredecessor(DestBB); | ||||
982 | // Note: BB(=SinglePred) will not be deleted on this path. | ||||
983 | // DestBB(=its single successor) is the one that was deleted. | ||||
984 | LLVM_DEBUG(dbgs() << "AFTER:\n" << *SinglePred << "\n\n\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "AFTER:\n" << *SinglePred << "\n\n\n"; } } while (false); | ||||
985 | return; | ||||
986 | } | ||||
987 | } | ||||
988 | |||||
989 | // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB | ||||
990 | // to handle the new incoming edges it is about to have. | ||||
991 | for (PHINode &PN : DestBB->phis()) { | ||||
992 | // Remove the incoming value for BB, and remember it. | ||||
993 | Value *InVal = PN.removeIncomingValue(BB, false); | ||||
994 | |||||
995 | // Two options: either the InVal is a phi node defined in BB or it is some | ||||
996 | // value that dominates BB. | ||||
997 | PHINode *InValPhi = dyn_cast<PHINode>(InVal); | ||||
998 | if (InValPhi && InValPhi->getParent() == BB) { | ||||
999 | // Add all of the input values of the input PHI as inputs of this phi. | ||||
1000 | for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) | ||||
1001 | PN.addIncoming(InValPhi->getIncomingValue(i), | ||||
1002 | InValPhi->getIncomingBlock(i)); | ||||
1003 | } else { | ||||
1004 | // Otherwise, add one instance of the dominating value for each edge that | ||||
1005 | // we will be adding. | ||||
1006 | if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { | ||||
1007 | for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) | ||||
1008 | PN.addIncoming(InVal, BBPN->getIncomingBlock(i)); | ||||
1009 | } else { | ||||
1010 | for (BasicBlock *Pred : predecessors(BB)) | ||||
1011 | PN.addIncoming(InVal, Pred); | ||||
1012 | } | ||||
1013 | } | ||||
1014 | } | ||||
1015 | |||||
1016 | // The PHIs are now updated, change everything that refers to BB to use | ||||
1017 | // DestBB and remove BB. | ||||
1018 | BB->replaceAllUsesWith(DestBB); | ||||
1019 | BB->eraseFromParent(); | ||||
1020 | ++NumBlocksElim; | ||||
1021 | |||||
1022 | LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"; } } while (false); | ||||
1023 | } | ||||
1024 | |||||
1025 | // Computes a map of base pointer relocation instructions to corresponding | ||||
1026 | // derived pointer relocation instructions given a vector of all relocate calls | ||||
1027 | static void computeBaseDerivedRelocateMap( | ||||
1028 | const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls, | ||||
1029 | DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> | ||||
1030 | &RelocateInstMap) { | ||||
1031 | // Collect information in two maps: one primarily for locating the base object | ||||
1032 | // while filling the second map; the second map is the final structure holding | ||||
1033 | // a mapping between Base and corresponding Derived relocate calls | ||||
1034 | DenseMap<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap; | ||||
1035 | for (auto *ThisRelocate : AllRelocateCalls) { | ||||
1036 | auto K = std::make_pair(ThisRelocate->getBasePtrIndex(), | ||||
1037 | ThisRelocate->getDerivedPtrIndex()); | ||||
1038 | RelocateIdxMap.insert(std::make_pair(K, ThisRelocate)); | ||||
1039 | } | ||||
1040 | for (auto &Item : RelocateIdxMap) { | ||||
1041 | std::pair<unsigned, unsigned> Key = Item.first; | ||||
1042 | if (Key.first == Key.second) | ||||
1043 | // Base relocation: nothing to insert | ||||
1044 | continue; | ||||
1045 | |||||
1046 | GCRelocateInst *I = Item.second; | ||||
1047 | auto BaseKey = std::make_pair(Key.first, Key.first); | ||||
1048 | |||||
1049 | // We're iterating over RelocateIdxMap so we cannot modify it. | ||||
1050 | auto MaybeBase = RelocateIdxMap.find(BaseKey); | ||||
1051 | if (MaybeBase == RelocateIdxMap.end()) | ||||
1052 | // TODO: We might want to insert a new base object relocate and gep off | ||||
1053 | // that, if there are enough derived object relocates. | ||||
1054 | continue; | ||||
1055 | |||||
1056 | RelocateInstMap[MaybeBase->second].push_back(I); | ||||
1057 | } | ||||
1058 | } | ||||
1059 | |||||
1060 | // Accepts a GEP and extracts the operands into a vector provided they're all | ||||
1061 | // small integer constants | ||||
1062 | static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP, | ||||
1063 | SmallVectorImpl<Value *> &OffsetV) { | ||||
1064 | for (unsigned i = 1; i < GEP->getNumOperands(); i++) { | ||||
1065 | // Only accept small constant integer operands | ||||
1066 | auto *Op = dyn_cast<ConstantInt>(GEP->getOperand(i)); | ||||
1067 | if (!Op || Op->getZExtValue() > 20) | ||||
1068 | return false; | ||||
1069 | } | ||||
1070 | |||||
1071 | for (unsigned i = 1; i < GEP->getNumOperands(); i++) | ||||
1072 | OffsetV.push_back(GEP->getOperand(i)); | ||||
1073 | return true; | ||||
1074 | } | ||||
1075 | |||||
1076 | // Takes a RelocatedBase (base pointer relocation instruction) and Targets to | ||||
1077 | // replace, computes a replacement, and affects it. | ||||
1078 | static bool | ||||
1079 | simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase, | ||||
1080 | const SmallVectorImpl<GCRelocateInst *> &Targets) { | ||||
1081 | bool MadeChange = false; | ||||
1082 | // We must ensure the relocation of derived pointer is defined after | ||||
1083 | // relocation of base pointer. If we find a relocation corresponding to base | ||||
1084 | // defined earlier than relocation of base then we move relocation of base | ||||
1085 | // right before found relocation. We consider only relocation in the same | ||||
1086 | // basic block as relocation of base. Relocations from other basic block will | ||||
1087 | // be skipped by optimization and we do not care about them. | ||||
1088 | for (auto R = RelocatedBase->getParent()->getFirstInsertionPt(); | ||||
1089 | &*R != RelocatedBase; ++R) | ||||
1090 | if (auto *RI = dyn_cast<GCRelocateInst>(R)) | ||||
1091 | if (RI->getStatepoint() == RelocatedBase->getStatepoint()) | ||||
1092 | if (RI->getBasePtrIndex() == RelocatedBase->getBasePtrIndex()) { | ||||
1093 | RelocatedBase->moveBefore(RI); | ||||
1094 | break; | ||||
1095 | } | ||||
1096 | |||||
1097 | for (GCRelocateInst *ToReplace : Targets) { | ||||
1098 | assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() &&(static_cast <bool> (ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() && "Not relocating a derived object of the original base object" ) ? void (0) : __assert_fail ("ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() && \"Not relocating a derived object of the original base object\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1099, __extension__ __PRETTY_FUNCTION__ )) | ||||
1099 | "Not relocating a derived object of the original base object")(static_cast <bool> (ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() && "Not relocating a derived object of the original base object" ) ? void (0) : __assert_fail ("ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() && \"Not relocating a derived object of the original base object\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1099, __extension__ __PRETTY_FUNCTION__ )); | ||||
1100 | if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) { | ||||
1101 | // A duplicate relocate call. TODO: coalesce duplicates. | ||||
1102 | continue; | ||||
1103 | } | ||||
1104 | |||||
1105 | if (RelocatedBase->getParent() != ToReplace->getParent()) { | ||||
1106 | // Base and derived relocates are in different basic blocks. | ||||
1107 | // In this case transform is only valid when base dominates derived | ||||
1108 | // relocate. However it would be too expensive to check dominance | ||||
1109 | // for each such relocate, so we skip the whole transformation. | ||||
1110 | continue; | ||||
1111 | } | ||||
1112 | |||||
1113 | Value *Base = ToReplace->getBasePtr(); | ||||
1114 | auto *Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr()); | ||||
1115 | if (!Derived || Derived->getPointerOperand() != Base) | ||||
1116 | continue; | ||||
1117 | |||||
1118 | SmallVector<Value *, 2> OffsetV; | ||||
1119 | if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV)) | ||||
1120 | continue; | ||||
1121 | |||||
1122 | // Create a Builder and replace the target callsite with a gep | ||||
1123 | assert(RelocatedBase->getNextNode() &&(static_cast <bool> (RelocatedBase->getNextNode() && "Should always have one since it's not a terminator") ? void (0) : __assert_fail ("RelocatedBase->getNextNode() && \"Should always have one since it's not a terminator\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1124, __extension__ __PRETTY_FUNCTION__ )) | ||||
1124 | "Should always have one since it's not a terminator")(static_cast <bool> (RelocatedBase->getNextNode() && "Should always have one since it's not a terminator") ? void (0) : __assert_fail ("RelocatedBase->getNextNode() && \"Should always have one since it's not a terminator\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1124, __extension__ __PRETTY_FUNCTION__ )); | ||||
1125 | |||||
1126 | // Insert after RelocatedBase | ||||
1127 | IRBuilder<> Builder(RelocatedBase->getNextNode()); | ||||
1128 | Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc()); | ||||
1129 | |||||
1130 | // If gc_relocate does not match the actual type, cast it to the right type. | ||||
1131 | // In theory, there must be a bitcast after gc_relocate if the type does not | ||||
1132 | // match, and we should reuse it to get the derived pointer. But it could be | ||||
1133 | // cases like this: | ||||
1134 | // bb1: | ||||
1135 | // ... | ||||
1136 | // %g1 = call coldcc i8 addrspace(1)* | ||||
1137 | // @llvm.experimental.gc.relocate.p1i8(...) br label %merge | ||||
1138 | // | ||||
1139 | // bb2: | ||||
1140 | // ... | ||||
1141 | // %g2 = call coldcc i8 addrspace(1)* | ||||
1142 | // @llvm.experimental.gc.relocate.p1i8(...) br label %merge | ||||
1143 | // | ||||
1144 | // merge: | ||||
1145 | // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ] | ||||
1146 | // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)* | ||||
1147 | // | ||||
1148 | // In this case, we can not find the bitcast any more. So we insert a new | ||||
1149 | // bitcast no matter there is already one or not. In this way, we can handle | ||||
1150 | // all cases, and the extra bitcast should be optimized away in later | ||||
1151 | // passes. | ||||
1152 | Value *ActualRelocatedBase = RelocatedBase; | ||||
1153 | if (RelocatedBase->getType() != Base->getType()) { | ||||
1154 | ActualRelocatedBase = | ||||
1155 | Builder.CreateBitCast(RelocatedBase, Base->getType()); | ||||
1156 | } | ||||
1157 | Value *Replacement = | ||||
1158 | Builder.CreateGEP(Derived->getSourceElementType(), ActualRelocatedBase, | ||||
1159 | makeArrayRef(OffsetV)); | ||||
1160 | Replacement->takeName(ToReplace); | ||||
1161 | // If the newly generated derived pointer's type does not match the original | ||||
1162 | // derived pointer's type, cast the new derived pointer to match it. Same | ||||
1163 | // reasoning as above. | ||||
1164 | Value *ActualReplacement = Replacement; | ||||
1165 | if (Replacement->getType() != ToReplace->getType()) { | ||||
1166 | ActualReplacement = | ||||
1167 | Builder.CreateBitCast(Replacement, ToReplace->getType()); | ||||
1168 | } | ||||
1169 | ToReplace->replaceAllUsesWith(ActualReplacement); | ||||
1170 | ToReplace->eraseFromParent(); | ||||
1171 | |||||
1172 | MadeChange = true; | ||||
1173 | } | ||||
1174 | return MadeChange; | ||||
1175 | } | ||||
1176 | |||||
1177 | // Turns this: | ||||
1178 | // | ||||
1179 | // %base = ... | ||||
1180 | // %ptr = gep %base + 15 | ||||
1181 | // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) | ||||
1182 | // %base' = relocate(%tok, i32 4, i32 4) | ||||
1183 | // %ptr' = relocate(%tok, i32 4, i32 5) | ||||
1184 | // %val = load %ptr' | ||||
1185 | // | ||||
1186 | // into this: | ||||
1187 | // | ||||
1188 | // %base = ... | ||||
1189 | // %ptr = gep %base + 15 | ||||
1190 | // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) | ||||
1191 | // %base' = gc.relocate(%tok, i32 4, i32 4) | ||||
1192 | // %ptr' = gep %base' + 15 | ||||
1193 | // %val = load %ptr' | ||||
1194 | bool CodeGenPrepare::simplifyOffsetableRelocate(GCStatepointInst &I) { | ||||
1195 | bool MadeChange = false; | ||||
1196 | SmallVector<GCRelocateInst *, 2> AllRelocateCalls; | ||||
1197 | for (auto *U : I.users()) | ||||
1198 | if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U)) | ||||
1199 | // Collect all the relocate calls associated with a statepoint | ||||
1200 | AllRelocateCalls.push_back(Relocate); | ||||
1201 | |||||
1202 | // We need at least one base pointer relocation + one derived pointer | ||||
1203 | // relocation to mangle | ||||
1204 | if (AllRelocateCalls.size() < 2) | ||||
1205 | return false; | ||||
1206 | |||||
1207 | // RelocateInstMap is a mapping from the base relocate instruction to the | ||||
1208 | // corresponding derived relocate instructions | ||||
1209 | DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> RelocateInstMap; | ||||
1210 | computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap); | ||||
1211 | if (RelocateInstMap.empty()) | ||||
1212 | return false; | ||||
1213 | |||||
1214 | for (auto &Item : RelocateInstMap) | ||||
1215 | // Item.first is the RelocatedBase to offset against | ||||
1216 | // Item.second is the vector of Targets to replace | ||||
1217 | MadeChange = simplifyRelocatesOffABase(Item.first, Item.second); | ||||
1218 | return MadeChange; | ||||
1219 | } | ||||
1220 | |||||
1221 | /// Sink the specified cast instruction into its user blocks. | ||||
1222 | static bool SinkCast(CastInst *CI) { | ||||
1223 | BasicBlock *DefBB = CI->getParent(); | ||||
1224 | |||||
1225 | /// InsertedCasts - Only insert a cast in each block once. | ||||
1226 | DenseMap<BasicBlock *, CastInst *> InsertedCasts; | ||||
1227 | |||||
1228 | bool MadeChange = false; | ||||
1229 | for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); | ||||
1230 | UI != E;) { | ||||
1231 | Use &TheUse = UI.getUse(); | ||||
1232 | Instruction *User = cast<Instruction>(*UI); | ||||
1233 | |||||
1234 | // Figure out which BB this cast is used in. For PHI's this is the | ||||
1235 | // appropriate predecessor block. | ||||
1236 | BasicBlock *UserBB = User->getParent(); | ||||
1237 | if (PHINode *PN = dyn_cast<PHINode>(User)) { | ||||
1238 | UserBB = PN->getIncomingBlock(TheUse); | ||||
1239 | } | ||||
1240 | |||||
1241 | // Preincrement use iterator so we don't invalidate it. | ||||
1242 | ++UI; | ||||
1243 | |||||
1244 | // The first insertion point of a block containing an EH pad is after the | ||||
1245 | // pad. If the pad is the user, we cannot sink the cast past the pad. | ||||
1246 | if (User->isEHPad()) | ||||
1247 | continue; | ||||
1248 | |||||
1249 | // If the block selected to receive the cast is an EH pad that does not | ||||
1250 | // allow non-PHI instructions before the terminator, we can't sink the | ||||
1251 | // cast. | ||||
1252 | if (UserBB->getTerminator()->isEHPad()) | ||||
1253 | continue; | ||||
1254 | |||||
1255 | // If this user is in the same block as the cast, don't change the cast. | ||||
1256 | if (UserBB == DefBB) | ||||
1257 | continue; | ||||
1258 | |||||
1259 | // If we have already inserted a cast into this block, use it. | ||||
1260 | CastInst *&InsertedCast = InsertedCasts[UserBB]; | ||||
1261 | |||||
1262 | if (!InsertedCast) { | ||||
1263 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); | ||||
1264 | assert(InsertPt != UserBB->end())(static_cast <bool> (InsertPt != UserBB->end()) ? void (0) : __assert_fail ("InsertPt != UserBB->end()", "llvm/lib/CodeGen/CodeGenPrepare.cpp" , 1264, __extension__ __PRETTY_FUNCTION__)); | ||||
1265 | InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0), | ||||
1266 | CI->getType(), "", &*InsertPt); | ||||
1267 | InsertedCast->setDebugLoc(CI->getDebugLoc()); | ||||
1268 | } | ||||
1269 | |||||
1270 | // Replace a use of the cast with a use of the new cast. | ||||
1271 | TheUse = InsertedCast; | ||||
1272 | MadeChange = true; | ||||
1273 | ++NumCastUses; | ||||
1274 | } | ||||
1275 | |||||
1276 | // If we removed all uses, nuke the cast. | ||||
1277 | if (CI->use_empty()) { | ||||
1278 | salvageDebugInfo(*CI); | ||||
1279 | CI->eraseFromParent(); | ||||
1280 | MadeChange = true; | ||||
1281 | } | ||||
1282 | |||||
1283 | return MadeChange; | ||||
1284 | } | ||||
1285 | |||||
1286 | /// If the specified cast instruction is a noop copy (e.g. it's casting from | ||||
1287 | /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to | ||||
1288 | /// reduce the number of virtual registers that must be created and coalesced. | ||||
1289 | /// | ||||
1290 | /// Return true if any changes are made. | ||||
1291 | static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI, | ||||
1292 | const DataLayout &DL) { | ||||
1293 | // Sink only "cheap" (or nop) address-space casts. This is a weaker condition | ||||
1294 | // than sinking only nop casts, but is helpful on some platforms. | ||||
1295 | if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) { | ||||
1296 | if (!TLI.isFreeAddrSpaceCast(ASC->getSrcAddressSpace(), | ||||
1297 | ASC->getDestAddressSpace())) | ||||
1298 | return false; | ||||
1299 | } | ||||
1300 | |||||
1301 | // If this is a noop copy, | ||||
1302 | EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType()); | ||||
1303 | EVT DstVT = TLI.getValueType(DL, CI->getType()); | ||||
1304 | |||||
1305 | // This is an fp<->int conversion? | ||||
1306 | if (SrcVT.isInteger() != DstVT.isInteger()) | ||||
1307 | return false; | ||||
1308 | |||||
1309 | // If this is an extension, it will be a zero or sign extension, which | ||||
1310 | // isn't a noop. | ||||
1311 | if (SrcVT.bitsLT(DstVT)) | ||||
1312 | return false; | ||||
1313 | |||||
1314 | // If these values will be promoted, find out what they will be promoted | ||||
1315 | // to. This helps us consider truncates on PPC as noop copies when they | ||||
1316 | // are. | ||||
1317 | if (TLI.getTypeAction(CI->getContext(), SrcVT) == | ||||
1318 | TargetLowering::TypePromoteInteger) | ||||
1319 | SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); | ||||
1320 | if (TLI.getTypeAction(CI->getContext(), DstVT) == | ||||
1321 | TargetLowering::TypePromoteInteger) | ||||
1322 | DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); | ||||
1323 | |||||
1324 | // If, after promotion, these are the same types, this is a noop copy. | ||||
1325 | if (SrcVT != DstVT) | ||||
1326 | return false; | ||||
1327 | |||||
1328 | return SinkCast(CI); | ||||
1329 | } | ||||
1330 | |||||
1331 | // Match a simple increment by constant operation. Note that if a sub is | ||||
1332 | // matched, the step is negated (as if the step had been canonicalized to | ||||
1333 | // an add, even though we leave the instruction alone.) | ||||
1334 | bool matchIncrement(const Instruction *IVInc, Instruction *&LHS, | ||||
1335 | Constant *&Step) { | ||||
1336 | if (match(IVInc, m_Add(m_Instruction(LHS), m_Constant(Step))) || | ||||
1337 | match(IVInc, m_ExtractValue<0>(m_Intrinsic<Intrinsic::uadd_with_overflow>( | ||||
1338 | m_Instruction(LHS), m_Constant(Step))))) | ||||
1339 | return true; | ||||
1340 | if (match(IVInc, m_Sub(m_Instruction(LHS), m_Constant(Step))) || | ||||
1341 | match(IVInc, m_ExtractValue<0>(m_Intrinsic<Intrinsic::usub_with_overflow>( | ||||
1342 | m_Instruction(LHS), m_Constant(Step))))) { | ||||
1343 | Step = ConstantExpr::getNeg(Step); | ||||
1344 | return true; | ||||
1345 | } | ||||
1346 | return false; | ||||
1347 | } | ||||
1348 | |||||
1349 | /// If given \p PN is an inductive variable with value IVInc coming from the | ||||
1350 | /// backedge, and on each iteration it gets increased by Step, return pair | ||||
1351 | /// <IVInc, Step>. Otherwise, return None. | ||||
1352 | static Optional<std::pair<Instruction *, Constant *>> | ||||
1353 | getIVIncrement(const PHINode *PN, const LoopInfo *LI) { | ||||
1354 | const Loop *L = LI->getLoopFor(PN->getParent()); | ||||
1355 | if (!L || L->getHeader() != PN->getParent() || !L->getLoopLatch()) | ||||
1356 | return None; | ||||
1357 | auto *IVInc = | ||||
1358 | dyn_cast<Instruction>(PN->getIncomingValueForBlock(L->getLoopLatch())); | ||||
1359 | if (!IVInc || LI->getLoopFor(IVInc->getParent()) != L) | ||||
1360 | return None; | ||||
1361 | Instruction *LHS = nullptr; | ||||
1362 | Constant *Step = nullptr; | ||||
1363 | if (matchIncrement(IVInc, LHS, Step) && LHS == PN) | ||||
1364 | return std::make_pair(IVInc, Step); | ||||
1365 | return None; | ||||
1366 | } | ||||
1367 | |||||
1368 | static bool isIVIncrement(const Value *V, const LoopInfo *LI) { | ||||
1369 | auto *I = dyn_cast<Instruction>(V); | ||||
1370 | if (!I) | ||||
1371 | return false; | ||||
1372 | Instruction *LHS = nullptr; | ||||
1373 | Constant *Step = nullptr; | ||||
1374 | if (!matchIncrement(I, LHS, Step)) | ||||
1375 | return false; | ||||
1376 | if (auto *PN = dyn_cast<PHINode>(LHS)) | ||||
1377 | if (auto IVInc = getIVIncrement(PN, LI)) | ||||
1378 | return IVInc->first == I; | ||||
1379 | return false; | ||||
1380 | } | ||||
1381 | |||||
1382 | bool CodeGenPrepare::replaceMathCmpWithIntrinsic(BinaryOperator *BO, | ||||
1383 | Value *Arg0, Value *Arg1, | ||||
1384 | CmpInst *Cmp, | ||||
1385 | Intrinsic::ID IID) { | ||||
1386 | auto IsReplacableIVIncrement = [this, &Cmp](BinaryOperator *BO) { | ||||
1387 | if (!isIVIncrement(BO, LI)) | ||||
1388 | return false; | ||||
1389 | const Loop *L = LI->getLoopFor(BO->getParent()); | ||||
1390 | assert(L && "L should not be null after isIVIncrement()")(static_cast <bool> (L && "L should not be null after isIVIncrement()" ) ? void (0) : __assert_fail ("L && \"L should not be null after isIVIncrement()\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1390, __extension__ __PRETTY_FUNCTION__ )); | ||||
1391 | // Do not risk on moving increment into a child loop. | ||||
1392 | if (LI->getLoopFor(Cmp->getParent()) != L) | ||||
1393 | return false; | ||||
1394 | |||||
1395 | // Finally, we need to ensure that the insert point will dominate all | ||||
1396 | // existing uses of the increment. | ||||
1397 | |||||
1398 | auto &DT = getDT(*BO->getParent()->getParent()); | ||||
1399 | if (DT.dominates(Cmp->getParent(), BO->getParent())) | ||||
1400 | // If we're moving up the dom tree, all uses are trivially dominated. | ||||
1401 | // (This is the common case for code produced by LSR.) | ||||
1402 | return true; | ||||
1403 | |||||
1404 | // Otherwise, special case the single use in the phi recurrence. | ||||
1405 | return BO->hasOneUse() && DT.dominates(Cmp->getParent(), L->getLoopLatch()); | ||||
1406 | }; | ||||
1407 | if (BO->getParent() != Cmp->getParent() && !IsReplacableIVIncrement(BO)) { | ||||
1408 | // We used to use a dominator tree here to allow multi-block optimization. | ||||
1409 | // But that was problematic because: | ||||
1410 | // 1. It could cause a perf regression by hoisting the math op into the | ||||
1411 | // critical path. | ||||
1412 | // 2. It could cause a perf regression by creating a value that was live | ||||
1413 | // across multiple blocks and increasing register pressure. | ||||
1414 | // 3. Use of a dominator tree could cause large compile-time regression. | ||||
1415 | // This is because we recompute the DT on every change in the main CGP | ||||
1416 | // run-loop. The recomputing is probably unnecessary in many cases, so if | ||||
1417 | // that was fixed, using a DT here would be ok. | ||||
1418 | // | ||||
1419 | // There is one important particular case we still want to handle: if BO is | ||||
1420 | // the IV increment. Important properties that make it profitable: | ||||
1421 | // - We can speculate IV increment anywhere in the loop (as long as the | ||||
1422 | // indvar Phi is its only user); | ||||
1423 | // - Upon computing Cmp, we effectively compute something equivalent to the | ||||
1424 | // IV increment (despite it loops differently in the IR). So moving it up | ||||
1425 | // to the cmp point does not really increase register pressure. | ||||
1426 | return false; | ||||
1427 | } | ||||
1428 | |||||
1429 | // We allow matching the canonical IR (add X, C) back to (usubo X, -C). | ||||
1430 | if (BO->getOpcode() == Instruction::Add && | ||||
1431 | IID == Intrinsic::usub_with_overflow) { | ||||
1432 | assert(isa<Constant>(Arg1) && "Unexpected input for usubo")(static_cast <bool> (isa<Constant>(Arg1) && "Unexpected input for usubo") ? void (0) : __assert_fail ("isa<Constant>(Arg1) && \"Unexpected input for usubo\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1432, __extension__ __PRETTY_FUNCTION__ )); | ||||
1433 | Arg1 = ConstantExpr::getNeg(cast<Constant>(Arg1)); | ||||
1434 | } | ||||
1435 | |||||
1436 | // Insert at the first instruction of the pair. | ||||
1437 | Instruction *InsertPt = nullptr; | ||||
1438 | for (Instruction &Iter : *Cmp->getParent()) { | ||||
1439 | // If BO is an XOR, it is not guaranteed that it comes after both inputs to | ||||
1440 | // the overflow intrinsic are defined. | ||||
1441 | if ((BO->getOpcode() != Instruction::Xor && &Iter == BO) || &Iter == Cmp) { | ||||
1442 | InsertPt = &Iter; | ||||
1443 | break; | ||||
1444 | } | ||||
1445 | } | ||||
1446 | assert(InsertPt != nullptr && "Parent block did not contain cmp or binop")(static_cast <bool> (InsertPt != nullptr && "Parent block did not contain cmp or binop" ) ? void (0) : __assert_fail ("InsertPt != nullptr && \"Parent block did not contain cmp or binop\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1446, __extension__ __PRETTY_FUNCTION__ )); | ||||
1447 | |||||
1448 | IRBuilder<> Builder(InsertPt); | ||||
1449 | Value *MathOV = Builder.CreateBinaryIntrinsic(IID, Arg0, Arg1); | ||||
1450 | if (BO->getOpcode() != Instruction::Xor) { | ||||
1451 | Value *Math = Builder.CreateExtractValue(MathOV, 0, "math"); | ||||
1452 | BO->replaceAllUsesWith(Math); | ||||
1453 | } else | ||||
1454 | assert(BO->hasOneUse() &&(static_cast <bool> (BO->hasOneUse() && "Patterns with XOr should use the BO only in the compare" ) ? void (0) : __assert_fail ("BO->hasOneUse() && \"Patterns with XOr should use the BO only in the compare\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1455, __extension__ __PRETTY_FUNCTION__ )) | ||||
1455 | "Patterns with XOr should use the BO only in the compare")(static_cast <bool> (BO->hasOneUse() && "Patterns with XOr should use the BO only in the compare" ) ? void (0) : __assert_fail ("BO->hasOneUse() && \"Patterns with XOr should use the BO only in the compare\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1455, __extension__ __PRETTY_FUNCTION__ )); | ||||
1456 | Value *OV = Builder.CreateExtractValue(MathOV, 1, "ov"); | ||||
1457 | Cmp->replaceAllUsesWith(OV); | ||||
1458 | Cmp->eraseFromParent(); | ||||
1459 | BO->eraseFromParent(); | ||||
1460 | return true; | ||||
1461 | } | ||||
1462 | |||||
1463 | /// Match special-case patterns that check for unsigned add overflow. | ||||
1464 | static bool matchUAddWithOverflowConstantEdgeCases(CmpInst *Cmp, | ||||
1465 | BinaryOperator *&Add) { | ||||
1466 | // Add = add A, 1; Cmp = icmp eq A,-1 (overflow if A is max val) | ||||
1467 | // Add = add A,-1; Cmp = icmp ne A, 0 (overflow if A is non-zero) | ||||
1468 | Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1); | ||||
1469 | |||||
1470 | // We are not expecting non-canonical/degenerate code. Just bail out. | ||||
1471 | if (isa<Constant>(A)) | ||||
1472 | return false; | ||||
1473 | |||||
1474 | ICmpInst::Predicate Pred = Cmp->getPredicate(); | ||||
1475 | if (Pred == ICmpInst::ICMP_EQ && match(B, m_AllOnes())) | ||||
1476 | B = ConstantInt::get(B->getType(), 1); | ||||
1477 | else if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt())) | ||||
1478 | B = ConstantInt::get(B->getType(), -1); | ||||
1479 | else | ||||
1480 | return false; | ||||
1481 | |||||
1482 | // Check the users of the variable operand of the compare looking for an add | ||||
1483 | // with the adjusted constant. | ||||
1484 | for (User *U : A->users()) { | ||||
1485 | if (match(U, m_Add(m_Specific(A), m_Specific(B)))) { | ||||
1486 | Add = cast<BinaryOperator>(U); | ||||
1487 | return true; | ||||
1488 | } | ||||
1489 | } | ||||
1490 | return false; | ||||
1491 | } | ||||
1492 | |||||
1493 | /// Try to combine the compare into a call to the llvm.uadd.with.overflow | ||||
1494 | /// intrinsic. Return true if any changes were made. | ||||
1495 | bool CodeGenPrepare::combineToUAddWithOverflow(CmpInst *Cmp, bool &ModifiedDT) { | ||||
1496 | Value *A, *B; | ||||
1497 | BinaryOperator *Add; | ||||
1498 | if (!match(Cmp, m_UAddWithOverflow(m_Value(A), m_Value(B), m_BinOp(Add)))) { | ||||
1499 | if (!matchUAddWithOverflowConstantEdgeCases(Cmp, Add)) | ||||
1500 | return false; | ||||
1501 | // Set A and B in case we match matchUAddWithOverflowConstantEdgeCases. | ||||
1502 | A = Add->getOperand(0); | ||||
1503 | B = Add->getOperand(1); | ||||
1504 | } | ||||
1505 | |||||
1506 | if (!TLI->shouldFormOverflowOp(ISD::UADDO, | ||||
1507 | TLI->getValueType(*DL, Add->getType()), | ||||
1508 | Add->hasNUsesOrMore(2))) | ||||
1509 | return false; | ||||
1510 | |||||
1511 | // We don't want to move around uses of condition values this late, so we | ||||
1512 | // check if it is legal to create the call to the intrinsic in the basic | ||||
1513 | // block containing the icmp. | ||||
1514 | if (Add->getParent() != Cmp->getParent() && !Add->hasOneUse()) | ||||
1515 | return false; | ||||
1516 | |||||
1517 | if (!replaceMathCmpWithIntrinsic(Add, A, B, Cmp, | ||||
1518 | Intrinsic::uadd_with_overflow)) | ||||
1519 | return false; | ||||
1520 | |||||
1521 | // Reset callers - do not crash by iterating over a dead instruction. | ||||
1522 | ModifiedDT = true; | ||||
1523 | return true; | ||||
1524 | } | ||||
1525 | |||||
1526 | bool CodeGenPrepare::combineToUSubWithOverflow(CmpInst *Cmp, bool &ModifiedDT) { | ||||
1527 | // We are not expecting non-canonical/degenerate code. Just bail out. | ||||
1528 | Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1); | ||||
1529 | if (isa<Constant>(A) && isa<Constant>(B)) | ||||
1530 | return false; | ||||
1531 | |||||
1532 | // Convert (A u> B) to (A u< B) to simplify pattern matching. | ||||
1533 | ICmpInst::Predicate Pred = Cmp->getPredicate(); | ||||
1534 | if (Pred == ICmpInst::ICMP_UGT) { | ||||
1535 | std::swap(A, B); | ||||
1536 | Pred = ICmpInst::ICMP_ULT; | ||||
1537 | } | ||||
1538 | // Convert special-case: (A == 0) is the same as (A u< 1). | ||||
1539 | if (Pred == ICmpInst::ICMP_EQ && match(B, m_ZeroInt())) { | ||||
1540 | B = ConstantInt::get(B->getType(), 1); | ||||
1541 | Pred = ICmpInst::ICMP_ULT; | ||||
1542 | } | ||||
1543 | // Convert special-case: (A != 0) is the same as (0 u< A). | ||||
1544 | if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt())) { | ||||
1545 | std::swap(A, B); | ||||
1546 | Pred = ICmpInst::ICMP_ULT; | ||||
1547 | } | ||||
1548 | if (Pred != ICmpInst::ICMP_ULT) | ||||
1549 | return false; | ||||
1550 | |||||
1551 | // Walk the users of a variable operand of a compare looking for a subtract or | ||||
1552 | // add with that same operand. Also match the 2nd operand of the compare to | ||||
1553 | // the add/sub, but that may be a negated constant operand of an add. | ||||
1554 | Value *CmpVariableOperand = isa<Constant>(A) ? B : A; | ||||
1555 | BinaryOperator *Sub = nullptr; | ||||
1556 | for (User *U : CmpVariableOperand->users()) { | ||||
1557 | // A - B, A u< B --> usubo(A, B) | ||||
1558 | if (match(U, m_Sub(m_Specific(A), m_Specific(B)))) { | ||||
1559 | Sub = cast<BinaryOperator>(U); | ||||
1560 | break; | ||||
1561 | } | ||||
1562 | |||||
1563 | // A + (-C), A u< C (canonicalized form of (sub A, C)) | ||||
1564 | const APInt *CmpC, *AddC; | ||||
1565 | if (match(U, m_Add(m_Specific(A), m_APInt(AddC))) && | ||||
1566 | match(B, m_APInt(CmpC)) && *AddC == -(*CmpC)) { | ||||
1567 | Sub = cast<BinaryOperator>(U); | ||||
1568 | break; | ||||
1569 | } | ||||
1570 | } | ||||
1571 | if (!Sub) | ||||
1572 | return false; | ||||
1573 | |||||
1574 | if (!TLI->shouldFormOverflowOp(ISD::USUBO, | ||||
1575 | TLI->getValueType(*DL, Sub->getType()), | ||||
1576 | Sub->hasNUsesOrMore(2))) | ||||
1577 | return false; | ||||
1578 | |||||
1579 | if (!replaceMathCmpWithIntrinsic(Sub, Sub->getOperand(0), Sub->getOperand(1), | ||||
1580 | Cmp, Intrinsic::usub_with_overflow)) | ||||
1581 | return false; | ||||
1582 | |||||
1583 | // Reset callers - do not crash by iterating over a dead instruction. | ||||
1584 | ModifiedDT = true; | ||||
1585 | return true; | ||||
1586 | } | ||||
1587 | |||||
1588 | /// Sink the given CmpInst into user blocks to reduce the number of virtual | ||||
1589 | /// registers that must be created and coalesced. This is a clear win except on | ||||
1590 | /// targets with multiple condition code registers (PowerPC), where it might | ||||
1591 | /// lose; some adjustment may be wanted there. | ||||
1592 | /// | ||||
1593 | /// Return true if any changes are made. | ||||
1594 | static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) { | ||||
1595 | if (TLI.hasMultipleConditionRegisters()) | ||||
1596 | return false; | ||||
1597 | |||||
1598 | // Avoid sinking soft-FP comparisons, since this can move them into a loop. | ||||
1599 | if (TLI.useSoftFloat() && isa<FCmpInst>(Cmp)) | ||||
1600 | return false; | ||||
1601 | |||||
1602 | // Only insert a cmp in each block once. | ||||
1603 | DenseMap<BasicBlock *, CmpInst *> InsertedCmps; | ||||
1604 | |||||
1605 | bool MadeChange = false; | ||||
1606 | for (Value::user_iterator UI = Cmp->user_begin(), E = Cmp->user_end(); | ||||
1607 | UI != E;) { | ||||
1608 | Use &TheUse = UI.getUse(); | ||||
1609 | Instruction *User = cast<Instruction>(*UI); | ||||
1610 | |||||
1611 | // Preincrement use iterator so we don't invalidate it. | ||||
1612 | ++UI; | ||||
1613 | |||||
1614 | // Don't bother for PHI nodes. | ||||
1615 | if (isa<PHINode>(User)) | ||||
1616 | continue; | ||||
1617 | |||||
1618 | // Figure out which BB this cmp is used in. | ||||
1619 | BasicBlock *UserBB = User->getParent(); | ||||
1620 | BasicBlock *DefBB = Cmp->getParent(); | ||||
1621 | |||||
1622 | // If this user is in the same block as the cmp, don't change the cmp. | ||||
1623 | if (UserBB == DefBB) | ||||
1624 | continue; | ||||
1625 | |||||
1626 | // If we have already inserted a cmp into this block, use it. | ||||
1627 | CmpInst *&InsertedCmp = InsertedCmps[UserBB]; | ||||
1628 | |||||
1629 | if (!InsertedCmp) { | ||||
1630 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); | ||||
1631 | assert(InsertPt != UserBB->end())(static_cast <bool> (InsertPt != UserBB->end()) ? void (0) : __assert_fail ("InsertPt != UserBB->end()", "llvm/lib/CodeGen/CodeGenPrepare.cpp" , 1631, __extension__ __PRETTY_FUNCTION__)); | ||||
1632 | InsertedCmp = CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(), | ||||
1633 | Cmp->getOperand(0), Cmp->getOperand(1), "", | ||||
1634 | &*InsertPt); | ||||
1635 | // Propagate the debug info. | ||||
1636 | InsertedCmp->setDebugLoc(Cmp->getDebugLoc()); | ||||
1637 | } | ||||
1638 | |||||
1639 | // Replace a use of the cmp with a use of the new cmp. | ||||
1640 | TheUse = InsertedCmp; | ||||
1641 | MadeChange = true; | ||||
1642 | ++NumCmpUses; | ||||
1643 | } | ||||
1644 | |||||
1645 | // If we removed all uses, nuke the cmp. | ||||
1646 | if (Cmp->use_empty()) { | ||||
1647 | Cmp->eraseFromParent(); | ||||
1648 | MadeChange = true; | ||||
1649 | } | ||||
1650 | |||||
1651 | return MadeChange; | ||||
1652 | } | ||||
1653 | |||||
1654 | /// For pattern like: | ||||
1655 | /// | ||||
1656 | /// DomCond = icmp sgt/slt CmpOp0, CmpOp1 (might not be in DomBB) | ||||
1657 | /// ... | ||||
1658 | /// DomBB: | ||||
1659 | /// ... | ||||
1660 | /// br DomCond, TrueBB, CmpBB | ||||
1661 | /// CmpBB: (with DomBB being the single predecessor) | ||||
1662 | /// ... | ||||
1663 | /// Cmp = icmp eq CmpOp0, CmpOp1 | ||||
1664 | /// ... | ||||
1665 | /// | ||||
1666 | /// It would use two comparison on targets that lowering of icmp sgt/slt is | ||||
1667 | /// different from lowering of icmp eq (PowerPC). This function try to convert | ||||
1668 | /// 'Cmp = icmp eq CmpOp0, CmpOp1' to ' Cmp = icmp slt/sgt CmpOp0, CmpOp1'. | ||||
1669 | /// After that, DomCond and Cmp can use the same comparison so reduce one | ||||
1670 | /// comparison. | ||||
1671 | /// | ||||
1672 | /// Return true if any changes are made. | ||||
1673 | static bool foldICmpWithDominatingICmp(CmpInst *Cmp, | ||||
1674 | const TargetLowering &TLI) { | ||||
1675 | if (!EnableICMP_EQToICMP_ST && TLI.isEqualityCmpFoldedWithSignedCmp()) | ||||
1676 | return false; | ||||
1677 | |||||
1678 | ICmpInst::Predicate Pred = Cmp->getPredicate(); | ||||
1679 | if (Pred != ICmpInst::ICMP_EQ) | ||||
1680 | return false; | ||||
1681 | |||||
1682 | // If icmp eq has users other than BranchInst and SelectInst, converting it to | ||||
1683 | // icmp slt/sgt would introduce more redundant LLVM IR. | ||||
1684 | for (User *U : Cmp->users()) { | ||||
1685 | if (isa<BranchInst>(U)) | ||||
1686 | continue; | ||||
1687 | if (isa<SelectInst>(U) && cast<SelectInst>(U)->getCondition() == Cmp) | ||||
1688 | continue; | ||||
1689 | return false; | ||||
1690 | } | ||||
1691 | |||||
1692 | // This is a cheap/incomplete check for dominance - just match a single | ||||
1693 | // predecessor with a conditional branch. | ||||
1694 | BasicBlock *CmpBB = Cmp->getParent(); | ||||
1695 | BasicBlock *DomBB = CmpBB->getSinglePredecessor(); | ||||
1696 | if (!DomBB) | ||||
1697 | return false; | ||||
1698 | |||||
1699 | // We want to ensure that the only way control gets to the comparison of | ||||
1700 | // interest is that a less/greater than comparison on the same operands is | ||||
1701 | // false. | ||||
1702 | Value *DomCond; | ||||
1703 | BasicBlock *TrueBB, *FalseBB; | ||||
1704 | if (!match(DomBB->getTerminator(), m_Br(m_Value(DomCond), TrueBB, FalseBB))) | ||||
1705 | return false; | ||||
1706 | if (CmpBB != FalseBB) | ||||
1707 | return false; | ||||
1708 | |||||
1709 | Value *CmpOp0 = Cmp->getOperand(0), *CmpOp1 = Cmp->getOperand(1); | ||||
1710 | ICmpInst::Predicate DomPred; | ||||
1711 | if (!match(DomCond, m_ICmp(DomPred, m_Specific(CmpOp0), m_Specific(CmpOp1)))) | ||||
1712 | return false; | ||||
1713 | if (DomPred != ICmpInst::ICMP_SGT && DomPred != ICmpInst::ICMP_SLT) | ||||
1714 | return false; | ||||
1715 | |||||
1716 | // Convert the equality comparison to the opposite of the dominating | ||||
1717 | // comparison and swap the direction for all branch/select users. | ||||
1718 | // We have conceptually converted: | ||||
1719 | // Res = (a < b) ? <LT_RES> : (a == b) ? <EQ_RES> : <GT_RES>; | ||||
1720 | // to | ||||
1721 | // Res = (a < b) ? <LT_RES> : (a > b) ? <GT_RES> : <EQ_RES>; | ||||
1722 | // And similarly for branches. | ||||
1723 | for (User *U : Cmp->users()) { | ||||
1724 | if (auto *BI = dyn_cast<BranchInst>(U)) { | ||||
1725 | assert(BI->isConditional() && "Must be conditional")(static_cast <bool> (BI->isConditional() && "Must be conditional" ) ? void (0) : __assert_fail ("BI->isConditional() && \"Must be conditional\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1725, __extension__ __PRETTY_FUNCTION__ )); | ||||
1726 | BI->swapSuccessors(); | ||||
1727 | continue; | ||||
1728 | } | ||||
1729 | if (auto *SI = dyn_cast<SelectInst>(U)) { | ||||
1730 | // Swap operands | ||||
1731 | SI->swapValues(); | ||||
1732 | SI->swapProfMetadata(); | ||||
1733 | continue; | ||||
1734 | } | ||||
1735 | llvm_unreachable("Must be a branch or a select")::llvm::llvm_unreachable_internal("Must be a branch or a select" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1735); | ||||
1736 | } | ||||
1737 | Cmp->setPredicate(CmpInst::getSwappedPredicate(DomPred)); | ||||
1738 | return true; | ||||
1739 | } | ||||
1740 | |||||
1741 | bool CodeGenPrepare::optimizeCmp(CmpInst *Cmp, bool &ModifiedDT) { | ||||
1742 | if (sinkCmpExpression(Cmp, *TLI)) | ||||
1743 | return true; | ||||
1744 | |||||
1745 | if (combineToUAddWithOverflow(Cmp, ModifiedDT)) | ||||
1746 | return true; | ||||
1747 | |||||
1748 | if (combineToUSubWithOverflow(Cmp, ModifiedDT)) | ||||
1749 | return true; | ||||
1750 | |||||
1751 | if (foldICmpWithDominatingICmp(Cmp, *TLI)) | ||||
1752 | return true; | ||||
1753 | |||||
1754 | return false; | ||||
1755 | } | ||||
1756 | |||||
1757 | /// Duplicate and sink the given 'and' instruction into user blocks where it is | ||||
1758 | /// used in a compare to allow isel to generate better code for targets where | ||||
1759 | /// this operation can be combined. | ||||
1760 | /// | ||||
1761 | /// Return true if any changes are made. | ||||
1762 | static bool sinkAndCmp0Expression(Instruction *AndI, const TargetLowering &TLI, | ||||
1763 | SetOfInstrs &InsertedInsts) { | ||||
1764 | // Double-check that we're not trying to optimize an instruction that was | ||||
1765 | // already optimized by some other part of this pass. | ||||
1766 | assert(!InsertedInsts.count(AndI) &&(static_cast <bool> (!InsertedInsts.count(AndI) && "Attempting to optimize already optimized and instruction") ? void (0) : __assert_fail ("!InsertedInsts.count(AndI) && \"Attempting to optimize already optimized and instruction\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1767, __extension__ __PRETTY_FUNCTION__ )) | ||||
1767 | "Attempting to optimize already optimized and instruction")(static_cast <bool> (!InsertedInsts.count(AndI) && "Attempting to optimize already optimized and instruction") ? void (0) : __assert_fail ("!InsertedInsts.count(AndI) && \"Attempting to optimize already optimized and instruction\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1767, __extension__ __PRETTY_FUNCTION__ )); | ||||
1768 | (void)InsertedInsts; | ||||
1769 | |||||
1770 | // Nothing to do for single use in same basic block. | ||||
1771 | if (AndI->hasOneUse() && | ||||
1772 | AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent()) | ||||
1773 | return false; | ||||
1774 | |||||
1775 | // Try to avoid cases where sinking/duplicating is likely to increase register | ||||
1776 | // pressure. | ||||
1777 | if (!isa<ConstantInt>(AndI->getOperand(0)) && | ||||
1778 | !isa<ConstantInt>(AndI->getOperand(1)) && | ||||
1779 | AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse()) | ||||
1780 | return false; | ||||
1781 | |||||
1782 | for (auto *U : AndI->users()) { | ||||
1783 | Instruction *User = cast<Instruction>(U); | ||||
1784 | |||||
1785 | // Only sink 'and' feeding icmp with 0. | ||||
1786 | if (!isa<ICmpInst>(User)) | ||||
1787 | return false; | ||||
1788 | |||||
1789 | auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1)); | ||||
1790 | if (!CmpC || !CmpC->isZero()) | ||||
1791 | return false; | ||||
1792 | } | ||||
1793 | |||||
1794 | if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI)) | ||||
1795 | return false; | ||||
1796 | |||||
1797 | LLVM_DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "found 'and' feeding only icmp 0;\n" ; } } while (false); | ||||
1798 | LLVM_DEBUG(AndI->getParent()->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { AndI->getParent()->dump(); } } while (false); | ||||
1799 | |||||
1800 | // Push the 'and' into the same block as the icmp 0. There should only be | ||||
1801 | // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any | ||||
1802 | // others, so we don't need to keep track of which BBs we insert into. | ||||
1803 | for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end(); | ||||
1804 | UI != E;) { | ||||
1805 | Use &TheUse = UI.getUse(); | ||||
1806 | Instruction *User = cast<Instruction>(*UI); | ||||
1807 | |||||
1808 | // Preincrement use iterator so we don't invalidate it. | ||||
1809 | ++UI; | ||||
1810 | |||||
1811 | LLVM_DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "sinking 'and' use: " << *User << "\n"; } } while (false); | ||||
1812 | |||||
1813 | // Keep the 'and' in the same place if the use is already in the same block. | ||||
1814 | Instruction *InsertPt = | ||||
1815 | User->getParent() == AndI->getParent() ? AndI : User; | ||||
1816 | Instruction *InsertedAnd = | ||||
1817 | BinaryOperator::Create(Instruction::And, AndI->getOperand(0), | ||||
1818 | AndI->getOperand(1), "", InsertPt); | ||||
1819 | // Propagate the debug info. | ||||
1820 | InsertedAnd->setDebugLoc(AndI->getDebugLoc()); | ||||
1821 | |||||
1822 | // Replace a use of the 'and' with a use of the new 'and'. | ||||
1823 | TheUse = InsertedAnd; | ||||
1824 | ++NumAndUses; | ||||
1825 | LLVM_DEBUG(User->getParent()->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { User->getParent()->dump(); } } while (false); | ||||
1826 | } | ||||
1827 | |||||
1828 | // We removed all uses, nuke the and. | ||||
1829 | AndI->eraseFromParent(); | ||||
1830 | return true; | ||||
1831 | } | ||||
1832 | |||||
1833 | /// Check if the candidates could be combined with a shift instruction, which | ||||
1834 | /// includes: | ||||
1835 | /// 1. Truncate instruction | ||||
1836 | /// 2. And instruction and the imm is a mask of the low bits: | ||||
1837 | /// imm & (imm+1) == 0 | ||||
1838 | static bool isExtractBitsCandidateUse(Instruction *User) { | ||||
1839 | if (!isa<TruncInst>(User)) { | ||||
1840 | if (User->getOpcode() != Instruction::And || | ||||
1841 | !isa<ConstantInt>(User->getOperand(1))) | ||||
1842 | return false; | ||||
1843 | |||||
1844 | const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue(); | ||||
1845 | |||||
1846 | if ((Cimm & (Cimm + 1)).getBoolValue()) | ||||
1847 | return false; | ||||
1848 | } | ||||
1849 | return true; | ||||
1850 | } | ||||
1851 | |||||
1852 | /// Sink both shift and truncate instruction to the use of truncate's BB. | ||||
1853 | static bool | ||||
1854 | SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, | ||||
1855 | DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts, | ||||
1856 | const TargetLowering &TLI, const DataLayout &DL) { | ||||
1857 | BasicBlock *UserBB = User->getParent(); | ||||
1858 | DenseMap<BasicBlock *, CastInst *> InsertedTruncs; | ||||
1859 | auto *TruncI = cast<TruncInst>(User); | ||||
1860 | bool MadeChange = false; | ||||
1861 | |||||
1862 | for (Value::user_iterator TruncUI = TruncI->user_begin(), | ||||
1863 | TruncE = TruncI->user_end(); | ||||
1864 | TruncUI != TruncE;) { | ||||
1865 | |||||
1866 | Use &TruncTheUse = TruncUI.getUse(); | ||||
1867 | Instruction *TruncUser = cast<Instruction>(*TruncUI); | ||||
1868 | // Preincrement use iterator so we don't invalidate it. | ||||
1869 | |||||
1870 | ++TruncUI; | ||||
1871 | |||||
1872 | int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode()); | ||||
1873 | if (!ISDOpcode) | ||||
1874 | continue; | ||||
1875 | |||||
1876 | // If the use is actually a legal node, there will not be an | ||||
1877 | // implicit truncate. | ||||
1878 | // FIXME: always querying the result type is just an | ||||
1879 | // approximation; some nodes' legality is determined by the | ||||
1880 | // operand or other means. There's no good way to find out though. | ||||
1881 | if (TLI.isOperationLegalOrCustom( | ||||
1882 | ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true))) | ||||
1883 | continue; | ||||
1884 | |||||
1885 | // Don't bother for PHI nodes. | ||||
1886 | if (isa<PHINode>(TruncUser)) | ||||
1887 | continue; | ||||
1888 | |||||
1889 | BasicBlock *TruncUserBB = TruncUser->getParent(); | ||||
1890 | |||||
1891 | if (UserBB == TruncUserBB) | ||||
1892 | continue; | ||||
1893 | |||||
1894 | BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB]; | ||||
1895 | CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB]; | ||||
1896 | |||||
1897 | if (!InsertedShift && !InsertedTrunc) { | ||||
1898 | BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt(); | ||||
1899 | assert(InsertPt != TruncUserBB->end())(static_cast <bool> (InsertPt != TruncUserBB->end()) ? void (0) : __assert_fail ("InsertPt != TruncUserBB->end()" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1899, __extension__ __PRETTY_FUNCTION__ )); | ||||
1900 | // Sink the shift | ||||
1901 | if (ShiftI->getOpcode() == Instruction::AShr) | ||||
1902 | InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, | ||||
1903 | "", &*InsertPt); | ||||
1904 | else | ||||
1905 | InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, | ||||
1906 | "", &*InsertPt); | ||||
1907 | InsertedShift->setDebugLoc(ShiftI->getDebugLoc()); | ||||
1908 | |||||
1909 | // Sink the trunc | ||||
1910 | BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt(); | ||||
1911 | TruncInsertPt++; | ||||
1912 | assert(TruncInsertPt != TruncUserBB->end())(static_cast <bool> (TruncInsertPt != TruncUserBB->end ()) ? void (0) : __assert_fail ("TruncInsertPt != TruncUserBB->end()" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1912, __extension__ __PRETTY_FUNCTION__ )); | ||||
1913 | |||||
1914 | InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift, | ||||
1915 | TruncI->getType(), "", &*TruncInsertPt); | ||||
1916 | InsertedTrunc->setDebugLoc(TruncI->getDebugLoc()); | ||||
1917 | |||||
1918 | MadeChange = true; | ||||
1919 | |||||
1920 | TruncTheUse = InsertedTrunc; | ||||
1921 | } | ||||
1922 | } | ||||
1923 | return MadeChange; | ||||
1924 | } | ||||
1925 | |||||
1926 | /// Sink the shift *right* instruction into user blocks if the uses could | ||||
1927 | /// potentially be combined with this shift instruction and generate BitExtract | ||||
1928 | /// instruction. It will only be applied if the architecture supports BitExtract | ||||
1929 | /// instruction. Here is an example: | ||||
1930 | /// BB1: | ||||
1931 | /// %x.extract.shift = lshr i64 %arg1, 32 | ||||
1932 | /// BB2: | ||||
1933 | /// %x.extract.trunc = trunc i64 %x.extract.shift to i16 | ||||
1934 | /// ==> | ||||
1935 | /// | ||||
1936 | /// BB2: | ||||
1937 | /// %x.extract.shift.1 = lshr i64 %arg1, 32 | ||||
1938 | /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16 | ||||
1939 | /// | ||||
1940 | /// CodeGen will recognize the pattern in BB2 and generate BitExtract | ||||
1941 | /// instruction. | ||||
1942 | /// Return true if any changes are made. | ||||
1943 | static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, | ||||
1944 | const TargetLowering &TLI, | ||||
1945 | const DataLayout &DL) { | ||||
1946 | BasicBlock *DefBB = ShiftI->getParent(); | ||||
1947 | |||||
1948 | /// Only insert instructions in each block once. | ||||
1949 | DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts; | ||||
1950 | |||||
1951 | bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType())); | ||||
1952 | |||||
1953 | bool MadeChange = false; | ||||
1954 | for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end(); | ||||
1955 | UI != E;) { | ||||
1956 | Use &TheUse = UI.getUse(); | ||||
1957 | Instruction *User = cast<Instruction>(*UI); | ||||
1958 | // Preincrement use iterator so we don't invalidate it. | ||||
1959 | ++UI; | ||||
1960 | |||||
1961 | // Don't bother for PHI nodes. | ||||
1962 | if (isa<PHINode>(User)) | ||||
1963 | continue; | ||||
1964 | |||||
1965 | if (!isExtractBitsCandidateUse(User)) | ||||
1966 | continue; | ||||
1967 | |||||
1968 | BasicBlock *UserBB = User->getParent(); | ||||
1969 | |||||
1970 | if (UserBB == DefBB) { | ||||
1971 | // If the shift and truncate instruction are in the same BB. The use of | ||||
1972 | // the truncate(TruncUse) may still introduce another truncate if not | ||||
1973 | // legal. In this case, we would like to sink both shift and truncate | ||||
1974 | // instruction to the BB of TruncUse. | ||||
1975 | // for example: | ||||
1976 | // BB1: | ||||
1977 | // i64 shift.result = lshr i64 opnd, imm | ||||
1978 | // trunc.result = trunc shift.result to i16 | ||||
1979 | // | ||||
1980 | // BB2: | ||||
1981 | // ----> We will have an implicit truncate here if the architecture does | ||||
1982 | // not have i16 compare. | ||||
1983 | // cmp i16 trunc.result, opnd2 | ||||
1984 | // | ||||
1985 | if (isa<TruncInst>(User) && | ||||
1986 | shiftIsLegal | ||||
1987 | // If the type of the truncate is legal, no truncate will be | ||||
1988 | // introduced in other basic blocks. | ||||
1989 | && (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType())))) | ||||
1990 | MadeChange = | ||||
1991 | SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL); | ||||
1992 | |||||
1993 | continue; | ||||
1994 | } | ||||
1995 | // If we have already inserted a shift into this block, use it. | ||||
1996 | BinaryOperator *&InsertedShift = InsertedShifts[UserBB]; | ||||
1997 | |||||
1998 | if (!InsertedShift) { | ||||
1999 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); | ||||
2000 | assert(InsertPt != UserBB->end())(static_cast <bool> (InsertPt != UserBB->end()) ? void (0) : __assert_fail ("InsertPt != UserBB->end()", "llvm/lib/CodeGen/CodeGenPrepare.cpp" , 2000, __extension__ __PRETTY_FUNCTION__)); | ||||
2001 | |||||
2002 | if (ShiftI->getOpcode() == Instruction::AShr) | ||||
2003 | InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, | ||||
2004 | "", &*InsertPt); | ||||
2005 | else | ||||
2006 | InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, | ||||
2007 | "", &*InsertPt); | ||||
2008 | InsertedShift->setDebugLoc(ShiftI->getDebugLoc()); | ||||
2009 | |||||
2010 | MadeChange = true; | ||||
2011 | } | ||||
2012 | |||||
2013 | // Replace a use of the shift with a use of the new shift. | ||||
2014 | TheUse = InsertedShift; | ||||
2015 | } | ||||
2016 | |||||
2017 | // If we removed all uses, or there are none, nuke the shift. | ||||
2018 | if (ShiftI->use_empty()) { | ||||
2019 | salvageDebugInfo(*ShiftI); | ||||
2020 | ShiftI->eraseFromParent(); | ||||
2021 | MadeChange = true; | ||||
2022 | } | ||||
2023 | |||||
2024 | return MadeChange; | ||||
2025 | } | ||||
2026 | |||||
2027 | /// If counting leading or trailing zeros is an expensive operation and a zero | ||||
2028 | /// input is defined, add a check for zero to avoid calling the intrinsic. | ||||
2029 | /// | ||||
2030 | /// We want to transform: | ||||
2031 | /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false) | ||||
2032 | /// | ||||
2033 | /// into: | ||||
2034 | /// entry: | ||||
2035 | /// %cmpz = icmp eq i64 %A, 0 | ||||
2036 | /// br i1 %cmpz, label %cond.end, label %cond.false | ||||
2037 | /// cond.false: | ||||
2038 | /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true) | ||||
2039 | /// br label %cond.end | ||||
2040 | /// cond.end: | ||||
2041 | /// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ] | ||||
2042 | /// | ||||
2043 | /// If the transform is performed, return true and set ModifiedDT to true. | ||||
2044 | static bool despeculateCountZeros(IntrinsicInst *CountZeros, | ||||
2045 | const TargetLowering *TLI, | ||||
2046 | const DataLayout *DL, bool &ModifiedDT) { | ||||
2047 | // If a zero input is undefined, it doesn't make sense to despeculate that. | ||||
2048 | if (match(CountZeros->getOperand(1), m_One())) | ||||
2049 | return false; | ||||
2050 | |||||
2051 | // If it's cheap to speculate, there's nothing to do. | ||||
2052 | Type *Ty = CountZeros->getType(); | ||||
2053 | auto IntrinsicID = CountZeros->getIntrinsicID(); | ||||
2054 | if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz(Ty)) || | ||||
2055 | (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz(Ty))) | ||||
2056 | return false; | ||||
2057 | |||||
2058 | // Only handle legal scalar cases. Anything else requires too much work. | ||||
2059 | unsigned SizeInBits = Ty->getScalarSizeInBits(); | ||||
2060 | if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits()) | ||||
2061 | return false; | ||||
2062 | |||||
2063 | // Bail if the value is never zero. | ||||
2064 | Use &Op = CountZeros->getOperandUse(0); | ||||
2065 | if (isKnownNonZero(Op, *DL)) | ||||
2066 | return false; | ||||
2067 | |||||
2068 | // The intrinsic will be sunk behind a compare against zero and branch. | ||||
2069 | BasicBlock *StartBlock = CountZeros->getParent(); | ||||
2070 | BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false"); | ||||
2071 | |||||
2072 | // Create another block after the count zero intrinsic. A PHI will be added | ||||
2073 | // in this block to select the result of the intrinsic or the bit-width | ||||
2074 | // constant if the input to the intrinsic is zero. | ||||
2075 | BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros)); | ||||
2076 | BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end"); | ||||
2077 | |||||
2078 | // Set up a builder to create a compare, conditional branch, and PHI. | ||||
2079 | IRBuilder<> Builder(CountZeros->getContext()); | ||||
2080 | Builder.SetInsertPoint(StartBlock->getTerminator()); | ||||
2081 | Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc()); | ||||
2082 | |||||
2083 | // Replace the unconditional branch that was created by the first split with | ||||
2084 | // a compare against zero and a conditional branch. | ||||
2085 | Value *Zero = Constant::getNullValue(Ty); | ||||
2086 | // Avoid introducing branch on poison. This also replaces the ctz operand. | ||||
2087 | if (!isGuaranteedNotToBeUndefOrPoison(Op)) | ||||
2088 | Op = Builder.CreateFreeze(Op, Op->getName() + ".fr"); | ||||
2089 | Value *Cmp = Builder.CreateICmpEQ(Op, Zero, "cmpz"); | ||||
2090 | Builder.CreateCondBr(Cmp, EndBlock, CallBlock); | ||||
2091 | StartBlock->getTerminator()->eraseFromParent(); | ||||
2092 | |||||
2093 | // Create a PHI in the end block to select either the output of the intrinsic | ||||
2094 | // or the bit width of the operand. | ||||
2095 | Builder.SetInsertPoint(&EndBlock->front()); | ||||
2096 | PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz"); | ||||
2097 | CountZeros->replaceAllUsesWith(PN); | ||||
2098 | Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits)); | ||||
2099 | PN->addIncoming(BitWidth, StartBlock); | ||||
2100 | PN->addIncoming(CountZeros, CallBlock); | ||||
2101 | |||||
2102 | // We are explicitly handling the zero case, so we can set the intrinsic's | ||||
2103 | // undefined zero argument to 'true'. This will also prevent reprocessing the | ||||
2104 | // intrinsic; we only despeculate when a zero input is defined. | ||||
2105 | CountZeros->setArgOperand(1, Builder.getTrue()); | ||||
2106 | ModifiedDT = true; | ||||
2107 | return true; | ||||
2108 | } | ||||
2109 | |||||
2110 | bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) { | ||||
2111 | BasicBlock *BB = CI->getParent(); | ||||
2112 | |||||
2113 | // Lower inline assembly if we can. | ||||
2114 | // If we found an inline asm expession, and if the target knows how to | ||||
2115 | // lower it to normal LLVM code, do so now. | ||||
2116 | if (CI->isInlineAsm()) { | ||||
2117 | if (TLI->ExpandInlineAsm(CI)) { | ||||
2118 | // Avoid invalidating the iterator. | ||||
2119 | CurInstIterator = BB->begin(); | ||||
2120 | // Avoid processing instructions out of order, which could cause | ||||
2121 | // reuse before a value is defined. | ||||
2122 | SunkAddrs.clear(); | ||||
2123 | return true; | ||||
2124 | } | ||||
2125 | // Sink address computing for memory operands into the block. | ||||
2126 | if (optimizeInlineAsmInst(CI)) | ||||
2127 | return true; | ||||
2128 | } | ||||
2129 | |||||
2130 | // Align the pointer arguments to this call if the target thinks it's a good | ||||
2131 | // idea | ||||
2132 | unsigned MinSize; | ||||
2133 | Align PrefAlign; | ||||
2134 | if (TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) { | ||||
2135 | for (auto &Arg : CI->args()) { | ||||
2136 | // We want to align both objects whose address is used directly and | ||||
2137 | // objects whose address is used in casts and GEPs, though it only makes | ||||
2138 | // sense for GEPs if the offset is a multiple of the desired alignment and | ||||
2139 | // if size - offset meets the size threshold. | ||||
2140 | if (!Arg->getType()->isPointerTy()) | ||||
2141 | continue; | ||||
2142 | APInt Offset(DL->getIndexSizeInBits( | ||||
2143 | cast<PointerType>(Arg->getType())->getAddressSpace()), | ||||
2144 | 0); | ||||
2145 | Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset); | ||||
2146 | uint64_t Offset2 = Offset.getLimitedValue(); | ||||
2147 | if (!isAligned(PrefAlign, Offset2)) | ||||
2148 | continue; | ||||
2149 | AllocaInst *AI; | ||||
2150 | if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlign() < PrefAlign && | ||||
2151 | DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2) | ||||
2152 | AI->setAlignment(PrefAlign); | ||||
2153 | // Global variables can only be aligned if they are defined in this | ||||
2154 | // object (i.e. they are uniquely initialized in this object), and | ||||
2155 | // over-aligning global variables that have an explicit section is | ||||
2156 | // forbidden. | ||||
2157 | GlobalVariable *GV; | ||||
2158 | if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() && | ||||
2159 | GV->getPointerAlignment(*DL) < PrefAlign && | ||||
2160 | DL->getTypeAllocSize(GV->getValueType()) >= MinSize + Offset2) | ||||
2161 | GV->setAlignment(PrefAlign); | ||||
2162 | } | ||||
2163 | // If this is a memcpy (or similar) then we may be able to improve the | ||||
2164 | // alignment | ||||
2165 | if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) { | ||||
2166 | Align DestAlign = getKnownAlignment(MI->getDest(), *DL); | ||||
2167 | MaybeAlign MIDestAlign = MI->getDestAlign(); | ||||
2168 | if (!MIDestAlign || DestAlign > *MIDestAlign) | ||||
2169 | MI->setDestAlignment(DestAlign); | ||||
2170 | if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { | ||||
2171 | MaybeAlign MTISrcAlign = MTI->getSourceAlign(); | ||||
2172 | Align SrcAlign = getKnownAlignment(MTI->getSource(), *DL); | ||||
2173 | if (!MTISrcAlign || SrcAlign > *MTISrcAlign) | ||||
2174 | MTI->setSourceAlignment(SrcAlign); | ||||
2175 | } | ||||
2176 | } | ||||
2177 | } | ||||
2178 | |||||
2179 | // If we have a cold call site, try to sink addressing computation into the | ||||
2180 | // cold block. This interacts with our handling for loads and stores to | ||||
2181 | // ensure that we can fold all uses of a potential addressing computation | ||||
2182 | // into their uses. TODO: generalize this to work over profiling data | ||||
2183 | if (CI->hasFnAttr(Attribute::Cold) && !OptSize && | ||||
2184 | !llvm::shouldOptimizeForSize(BB, PSI, BFI.get())) | ||||
2185 | for (auto &Arg : CI->args()) { | ||||
2186 | if (!Arg->getType()->isPointerTy()) | ||||
2187 | continue; | ||||
2188 | unsigned AS = Arg->getType()->getPointerAddressSpace(); | ||||
2189 | return optimizeMemoryInst(CI, Arg, Arg->getType(), AS); | ||||
2190 | } | ||||
2191 | |||||
2192 | IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); | ||||
2193 | if (II) { | ||||
2194 | switch (II->getIntrinsicID()) { | ||||
2195 | default: | ||||
2196 | break; | ||||
2197 | case Intrinsic::assume: | ||||
2198 | llvm_unreachable("llvm.assume should have been removed already")::llvm::llvm_unreachable_internal("llvm.assume should have been removed already" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 2198); | ||||
2199 | case Intrinsic::experimental_widenable_condition: { | ||||
2200 | // Give up on future widening oppurtunties so that we can fold away dead | ||||
2201 | // paths and merge blocks before going into block-local instruction | ||||
2202 | // selection. | ||||
2203 | if (II->use_empty()) { | ||||
2204 | II->eraseFromParent(); | ||||
2205 | return true; | ||||
2206 | } | ||||
2207 | Constant *RetVal = ConstantInt::getTrue(II->getContext()); | ||||
2208 | resetIteratorIfInvalidatedWhileCalling(BB, [&]() { | ||||
2209 | replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr); | ||||
2210 | }); | ||||
2211 | return true; | ||||
2212 | } | ||||
2213 | case Intrinsic::objectsize: | ||||
2214 | llvm_unreachable("llvm.objectsize.* should have been lowered already")::llvm::llvm_unreachable_internal("llvm.objectsize.* should have been lowered already" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 2214); | ||||
2215 | case Intrinsic::is_constant: | ||||
2216 | llvm_unreachable("llvm.is.constant.* should have been lowered already")::llvm::llvm_unreachable_internal("llvm.is.constant.* should have been lowered already" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 2216); | ||||
2217 | case Intrinsic::aarch64_stlxr: | ||||
2218 | case Intrinsic::aarch64_stxr: { | ||||
2219 | ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0)); | ||||
2220 | if (!ExtVal || !ExtVal->hasOneUse() || | ||||
2221 | ExtVal->getParent() == CI->getParent()) | ||||
2222 | return false; | ||||
2223 | // Sink a zext feeding stlxr/stxr before it, so it can be folded into it. | ||||
2224 | ExtVal->moveBefore(CI); | ||||
2225 | // Mark this instruction as "inserted by CGP", so that other | ||||
2226 | // optimizations don't touch it. | ||||
2227 | InsertedInsts.insert(ExtVal); | ||||
2228 | return true; | ||||
2229 | } | ||||
2230 | |||||
2231 | case Intrinsic::launder_invariant_group: | ||||
2232 | case Intrinsic::strip_invariant_group: { | ||||
2233 | Value *ArgVal = II->getArgOperand(0); | ||||
2234 | auto it = LargeOffsetGEPMap.find(II); | ||||
2235 | if (it != LargeOffsetGEPMap.end()) { | ||||
2236 | // Merge entries in LargeOffsetGEPMap to reflect the RAUW. | ||||
2237 | // Make sure not to have to deal with iterator invalidation | ||||
2238 | // after possibly adding ArgVal to LargeOffsetGEPMap. | ||||
2239 | auto GEPs = std::move(it->second); | ||||
2240 | LargeOffsetGEPMap[ArgVal].append(GEPs.begin(), GEPs.end()); | ||||
2241 | LargeOffsetGEPMap.erase(II); | ||||
2242 | } | ||||
2243 | |||||
2244 | II->replaceAllUsesWith(ArgVal); | ||||
2245 | II->eraseFromParent(); | ||||
2246 | return true; | ||||
2247 | } | ||||
2248 | case Intrinsic::cttz: | ||||
2249 | case Intrinsic::ctlz: | ||||
2250 | // If counting zeros is expensive, try to avoid it. | ||||
2251 | return despeculateCountZeros(II, TLI, DL, ModifiedDT); | ||||
2252 | case Intrinsic::fshl: | ||||
2253 | case Intrinsic::fshr: | ||||
2254 | return optimizeFunnelShift(II); | ||||
2255 | case Intrinsic::dbg_value: | ||||
2256 | return fixupDbgValue(II); | ||||
2257 | case Intrinsic::vscale: { | ||||
2258 | // If datalayout has no special restrictions on vector data layout, | ||||
2259 | // replace `llvm.vscale` by an equivalent constant expression | ||||
2260 | // to benefit from cheap constant propagation. | ||||
2261 | Type *ScalableVectorTy = | ||||
2262 | VectorType::get(Type::getInt8Ty(II->getContext()), 1, true); | ||||
2263 | if (DL->getTypeAllocSize(ScalableVectorTy).getKnownMinSize() == 8) { | ||||
2264 | auto *Null = Constant::getNullValue(ScalableVectorTy->getPointerTo()); | ||||
2265 | auto *One = ConstantInt::getSigned(II->getType(), 1); | ||||
2266 | auto *CGep = | ||||
2267 | ConstantExpr::getGetElementPtr(ScalableVectorTy, Null, One); | ||||
2268 | II->replaceAllUsesWith(ConstantExpr::getPtrToInt(CGep, II->getType())); | ||||
2269 | II->eraseFromParent(); | ||||
2270 | return true; | ||||
2271 | } | ||||
2272 | break; | ||||
2273 | } | ||||
2274 | case Intrinsic::masked_gather: | ||||
2275 | return optimizeGatherScatterInst(II, II->getArgOperand(0)); | ||||
2276 | case Intrinsic::masked_scatter: | ||||
2277 | return optimizeGatherScatterInst(II, II->getArgOperand(1)); | ||||
2278 | } | ||||
2279 | |||||
2280 | SmallVector<Value *, 2> PtrOps; | ||||
2281 | Type *AccessTy; | ||||
2282 | if (TLI->getAddrModeArguments(II, PtrOps, AccessTy)) | ||||
2283 | while (!PtrOps.empty()) { | ||||
2284 | Value *PtrVal = PtrOps.pop_back_val(); | ||||
2285 | unsigned AS = PtrVal->getType()->getPointerAddressSpace(); | ||||
2286 | if (optimizeMemoryInst(II, PtrVal, AccessTy, AS)) | ||||
2287 | return true; | ||||
2288 | } | ||||
2289 | } | ||||
2290 | |||||
2291 | // From here on out we're working with named functions. | ||||
2292 | if (!CI->getCalledFunction()) | ||||
2293 | return false; | ||||
2294 | |||||
2295 | // Lower all default uses of _chk calls. This is very similar | ||||
2296 | // to what InstCombineCalls does, but here we are only lowering calls | ||||
2297 | // to fortified library functions (e.g. __memcpy_chk) that have the default | ||||
2298 | // "don't know" as the objectsize. Anything else should be left alone. | ||||
2299 | FortifiedLibCallSimplifier Simplifier(TLInfo, true); | ||||
2300 | IRBuilder<> Builder(CI); | ||||
2301 | if (Value *V = Simplifier.optimizeCall(CI, Builder)) { | ||||
2302 | CI->replaceAllUsesWith(V); | ||||
2303 | CI->eraseFromParent(); | ||||
2304 | return true; | ||||
2305 | } | ||||
2306 | |||||
2307 | return false; | ||||
2308 | } | ||||
2309 | |||||
2310 | /// Look for opportunities to duplicate return instructions to the predecessor | ||||
2311 | /// to enable tail call optimizations. The case it is currently looking for is: | ||||
2312 | /// @code | ||||
2313 | /// bb0: | ||||
2314 | /// %tmp0 = tail call i32 @f0() | ||||
2315 | /// br label %return | ||||
2316 | /// bb1: | ||||
2317 | /// %tmp1 = tail call i32 @f1() | ||||
2318 | /// br label %return | ||||
2319 | /// bb2: | ||||
2320 | /// %tmp2 = tail call i32 @f2() | ||||
2321 | /// br label %return | ||||
2322 | /// return: | ||||
2323 | /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ] | ||||
2324 | /// ret i32 %retval | ||||
2325 | /// @endcode | ||||
2326 | /// | ||||
2327 | /// => | ||||
2328 | /// | ||||
2329 | /// @code | ||||
2330 | /// bb0: | ||||
2331 | /// %tmp0 = tail call i32 @f0() | ||||
2332 | /// ret i32 %tmp0 | ||||
2333 | /// bb1: | ||||
2334 | /// %tmp1 = tail call i32 @f1() | ||||
2335 | /// ret i32 %tmp1 | ||||
2336 | /// bb2: | ||||
2337 | /// %tmp2 = tail call i32 @f2() | ||||
2338 | /// ret i32 %tmp2 | ||||
2339 | /// @endcode | ||||
2340 | bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB, | ||||
2341 | bool &ModifiedDT) { | ||||
2342 | ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator()); | ||||
| |||||
2343 | if (!RetI
| ||||
2344 | return false; | ||||
2345 | |||||
2346 | PHINode *PN = nullptr; | ||||
2347 | ExtractValueInst *EVI = nullptr; | ||||
2348 | BitCastInst *BCI = nullptr; | ||||
2349 | Value *V = RetI->getReturnValue(); | ||||
2350 | if (V
| ||||
2351 | BCI = dyn_cast<BitCastInst>(V); | ||||
2352 | if (BCI) | ||||
2353 | V = BCI->getOperand(0); | ||||
2354 | |||||
2355 | EVI = dyn_cast<ExtractValueInst>(V); | ||||
2356 | if (EVI) { | ||||
2357 | V = EVI->getOperand(0); | ||||
2358 | if (!llvm::all_of(EVI->indices(), [](unsigned idx) { return idx == 0; })) | ||||
2359 | return false; | ||||
2360 | } | ||||
2361 | |||||
2362 | PN = dyn_cast<PHINode>(V); | ||||
2363 | if (!PN) | ||||
2364 | return false; | ||||
2365 | } | ||||
2366 | |||||
2367 | if (PN
| ||||
2368 | return false; | ||||
2369 | |||||
2370 | auto isLifetimeEndOrBitCastFor = [](const Instruction *Inst) { | ||||
2371 | const BitCastInst *BC = dyn_cast<BitCastInst>(Inst); | ||||
2372 | if (BC && BC->hasOneUse()) | ||||
2373 | Inst = BC->user_back(); | ||||
2374 | |||||
2375 | if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) | ||||
2376 | return II->getIntrinsicID() == Intrinsic::lifetime_end; | ||||
2377 | return false; | ||||
2378 | }; | ||||
2379 | |||||
2380 | // Make sure there are no instructions between the first instruction | ||||
2381 | // and return. | ||||
2382 | const Instruction *BI = BB->getFirstNonPHI(); | ||||
2383 | // Skip over debug and the bitcast. | ||||
2384 | while (isa<DbgInfoIntrinsic>(BI) || BI == BCI || BI == EVI || | ||||
2385 | isa<PseudoProbeInst>(BI) || isLifetimeEndOrBitCastFor(BI)) | ||||
2386 | BI = BI->getNextNode(); | ||||
| |||||
2387 | if (BI != RetI) | ||||
2388 | return false; | ||||
2389 | |||||
2390 | /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail | ||||
2391 | /// call. | ||||
2392 | const Function *F = BB->getParent(); | ||||
2393 | SmallVector<BasicBlock *, 4> TailCallBBs; | ||||
2394 | if (PN) { | ||||
2395 | for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) { | ||||
2396 | // Look through bitcasts. | ||||
2397 | Value *IncomingVal = PN->getIncomingValue(I)->stripPointerCasts(); | ||||
2398 | CallInst *CI = dyn_cast<CallInst>(IncomingVal); | ||||
2399 | BasicBlock *PredBB = PN->getIncomingBlock(I); | ||||
2400 | // Make sure the phi value is indeed produced by the tail call. | ||||
2401 | if (CI && CI->hasOneUse() && CI->getParent() == PredBB && | ||||
2402 | TLI->mayBeEmittedAsTailCall(CI) && | ||||
2403 | attributesPermitTailCall(F, CI, RetI, *TLI)) | ||||
2404 | TailCallBBs.push_back(PredBB); | ||||
2405 | } | ||||
2406 | } else { | ||||
2407 | SmallPtrSet<BasicBlock *, 4> VisitedBBs; | ||||
2408 | for (BasicBlock *Pred : predecessors(BB)) { | ||||
2409 | if (!VisitedBBs.insert(Pred).second) | ||||
2410 | continue; | ||||
2411 | if (Instruction *I = Pred->rbegin()->getPrevNonDebugInstruction(true)) { | ||||
2412 | CallInst *CI = dyn_cast<CallInst>(I); | ||||
2413 | if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) && | ||||
2414 | attributesPermitTailCall(F, CI, RetI, *TLI)) | ||||
2415 | TailCallBBs.push_back(Pred); | ||||
2416 | } | ||||
2417 | } | ||||
2418 | } | ||||
2419 | |||||
2420 | bool Changed = false; | ||||
2421 | for (auto const &TailCallBB : TailCallBBs) { | ||||
2422 | // Make sure the call instruction is followed by an unconditional branch to | ||||
2423 | // the return block. | ||||
2424 | BranchInst *BI = dyn_cast<BranchInst>(TailCallBB->getTerminator()); | ||||
2425 | if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB) | ||||
2426 | continue; | ||||
2427 | |||||
2428 | // Duplicate the return into TailCallBB. | ||||
2429 | (void)FoldReturnIntoUncondBranch(RetI, BB, TailCallBB); | ||||
2430 | assert(!VerifyBFIUpdates ||(static_cast <bool> (!VerifyBFIUpdates || BFI->getBlockFreq (BB) >= BFI->getBlockFreq(TailCallBB)) ? void (0) : __assert_fail ("!VerifyBFIUpdates || BFI->getBlockFreq(BB) >= BFI->getBlockFreq(TailCallBB)" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 2431, __extension__ __PRETTY_FUNCTION__ )) | ||||
2431 | BFI->getBlockFreq(BB) >= BFI->getBlockFreq(TailCallBB))(static_cast <bool> (!VerifyBFIUpdates || BFI->getBlockFreq (BB) >= BFI->getBlockFreq(TailCallBB)) ? void (0) : __assert_fail ("!VerifyBFIUpdates || BFI->getBlockFreq(BB) >= BFI->getBlockFreq(TailCallBB)" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 2431, __extension__ __PRETTY_FUNCTION__ )); | ||||
2432 | BFI->setBlockFreq( | ||||
2433 | BB, | ||||
2434 | (BFI->getBlockFreq(BB) - BFI->getBlockFreq(TailCallBB)).getFrequency()); | ||||
2435 | ModifiedDT = Changed = true; | ||||
2436 | ++NumRetsDup; | ||||
2437 | } | ||||
2438 | |||||
2439 | // If we eliminated all predecessors of the block, delete the block now. | ||||
2440 | if (Changed && !BB->hasAddressTaken() && pred_empty(BB)) | ||||
2441 | BB->eraseFromParent(); | ||||
2442 | |||||
2443 | return Changed; | ||||
2444 | } | ||||
2445 | |||||
2446 | //===----------------------------------------------------------------------===// | ||||
2447 | // Memory Optimization | ||||
2448 | //===----------------------------------------------------------------------===// | ||||
2449 | |||||
2450 | namespace { | ||||
2451 | |||||
2452 | /// This is an extended version of TargetLowering::AddrMode | ||||
2453 | /// which holds actual Value*'s for register values. | ||||
2454 | struct ExtAddrMode : public TargetLowering::AddrMode { | ||||
2455 | Value *BaseReg = nullptr; | ||||
2456 | Value *ScaledReg = nullptr; | ||||
2457 | Value *OriginalValue = nullptr; | ||||
2458 | bool InBounds = true; | ||||
2459 | |||||
2460 | enum FieldName { | ||||
2461 | NoField = 0x00, | ||||
2462 | BaseRegField = 0x01, | ||||
2463 | BaseGVField = 0x02, | ||||
2464 | BaseOffsField = 0x04, | ||||
2465 | ScaledRegField = 0x08, | ||||
2466 | ScaleField = 0x10, | ||||
2467 | MultipleFields = 0xff | ||||
2468 | }; | ||||
2469 | |||||
2470 | ExtAddrMode() = default; | ||||
2471 | |||||
2472 | void print(raw_ostream &OS) const; | ||||
2473 | void dump() const; | ||||
2474 | |||||
2475 | FieldName compare(const ExtAddrMode &other) { | ||||
2476 | // First check that the types are the same on each field, as differing types | ||||
2477 | // is something we can't cope with later on. | ||||
2478 | if (BaseReg && other.BaseReg && | ||||
2479 | BaseReg->getType() != other.BaseReg->getType()) | ||||
2480 | return MultipleFields; | ||||
2481 | if (BaseGV && other.BaseGV && BaseGV->getType() != other.BaseGV->getType()) | ||||
2482 | return MultipleFields; | ||||
2483 | if (ScaledReg && other.ScaledReg && | ||||
2484 | ScaledReg->getType() != other.ScaledReg->getType()) | ||||
2485 | return MultipleFields; | ||||
2486 | |||||
2487 | // Conservatively reject 'inbounds' mismatches. | ||||
2488 | if (InBounds != other.InBounds) | ||||
2489 | return MultipleFields; | ||||
2490 | |||||
2491 | // Check each field to see if it differs. | ||||
2492 | unsigned Result = NoField; | ||||
2493 | if (BaseReg != other.BaseReg) | ||||
2494 | Result |= BaseRegField; | ||||
2495 | if (BaseGV != other.BaseGV) | ||||
2496 | Result |= BaseGVField; | ||||
2497 | if (BaseOffs != other.BaseOffs) | ||||
2498 | Result |= BaseOffsField; | ||||
2499 | if (ScaledReg != other.ScaledReg) | ||||
2500 | Result |= ScaledRegField; | ||||
2501 | // Don't count 0 as being a different scale, because that actually means | ||||
2502 | // unscaled (which will already be counted by having no ScaledReg). | ||||
2503 | if (Scale && other.Scale && Scale != other.Scale) | ||||
2504 | Result |= ScaleField; | ||||
2505 | |||||
2506 | if (countPopulation(Result) > 1) | ||||
2507 | return MultipleFields; | ||||
2508 | else | ||||
2509 | return static_cast<FieldName>(Result); | ||||
2510 | } | ||||
2511 | |||||
2512 | // An AddrMode is trivial if it involves no calculation i.e. it is just a base | ||||
2513 | // with no offset. | ||||
2514 | bool isTrivial() { | ||||
2515 | // An AddrMode is (BaseGV + BaseReg + BaseOffs + ScaleReg * Scale) so it is | ||||
2516 | // trivial if at most one of these terms is nonzero, except that BaseGV and | ||||
2517 | // BaseReg both being zero actually means a null pointer value, which we | ||||
2518 | // consider to be 'non-zero' here. | ||||
2519 | return !BaseOffs && !Scale && !(BaseGV && BaseReg); | ||||
2520 | } | ||||
2521 | |||||
2522 | Value *GetFieldAsValue(FieldName Field, Type *IntPtrTy) { | ||||
2523 | switch (Field) { | ||||
2524 | default: | ||||
2525 | return nullptr; | ||||
2526 | case BaseRegField: | ||||
2527 | return BaseReg; | ||||
2528 | case BaseGVField: | ||||
2529 | return BaseGV; | ||||
2530 | case ScaledRegField: | ||||
2531 | return ScaledReg; | ||||
2532 | case BaseOffsField: | ||||
2533 | return ConstantInt::get(IntPtrTy, BaseOffs); | ||||
2534 | } | ||||
2535 | } | ||||
2536 | |||||
2537 | void SetCombinedField(FieldName Field, Value *V, | ||||
2538 | const SmallVectorImpl<ExtAddrMode> &AddrModes) { | ||||
2539 | switch (Field) { | ||||
2540 | default: | ||||
2541 | llvm_unreachable("Unhandled fields are expected to be rejected earlier")::llvm::llvm_unreachable_internal("Unhandled fields are expected to be rejected earlier" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 2541); | ||||
2542 | break; | ||||
2543 | case ExtAddrMode::BaseRegField: | ||||
2544 | BaseReg = V; | ||||
2545 | break; | ||||
2546 | case ExtAddrMode::BaseGVField: | ||||
2547 | // A combined BaseGV is an Instruction, not a GlobalValue, so it goes | ||||
2548 | // in the BaseReg field. | ||||
2549 | assert(BaseReg == nullptr)(static_cast <bool> (BaseReg == nullptr) ? void (0) : __assert_fail ("BaseReg == nullptr", "llvm/lib/CodeGen/CodeGenPrepare.cpp" , 2549, __extension__ __PRETTY_FUNCTION__)); | ||||
2550 | BaseReg = V; | ||||
2551 | BaseGV = nullptr; | ||||
2552 | break; | ||||
2553 | case ExtAddrMode::ScaledRegField: | ||||
2554 | ScaledReg = V; | ||||
2555 | // If we have a mix of scaled and unscaled addrmodes then we want scale | ||||
2556 | // to be the scale and not zero. | ||||
2557 | if (!Scale) | ||||
2558 | for (const ExtAddrMode &AM : AddrModes) | ||||
2559 | if (AM.Scale) { | ||||
2560 | Scale = AM.Scale; | ||||
2561 | break; | ||||
2562 | } | ||||
2563 | break; | ||||
2564 | case ExtAddrMode::BaseOffsField: | ||||
2565 | // The offset is no longer a constant, so it goes in ScaledReg with a | ||||
2566 | // scale of 1. | ||||
2567 | assert(ScaledReg == nullptr)(static_cast <bool> (ScaledReg == nullptr) ? void (0) : __assert_fail ("ScaledReg == nullptr", "llvm/lib/CodeGen/CodeGenPrepare.cpp" , 2567, __extension__ __PRETTY_FUNCTION__)); | ||||
2568 | ScaledReg = V; | ||||
2569 | Scale = 1; | ||||
2570 | BaseOffs = 0; | ||||
2571 | break; | ||||
2572 | } | ||||
2573 | } | ||||
2574 | }; | ||||
2575 | |||||
2576 | #ifndef NDEBUG | ||||
2577 | static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) { | ||||
2578 | AM.print(OS); | ||||
2579 | return OS; | ||||
2580 | } | ||||
2581 | #endif | ||||
2582 | |||||
2583 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) | ||||
2584 | void ExtAddrMode::print(raw_ostream &OS) const { | ||||
2585 | bool NeedPlus = false; | ||||
2586 | OS << "["; | ||||
2587 | if (InBounds) | ||||
2588 | OS << "inbounds "; | ||||
2589 | if (BaseGV) { | ||||
2590 | OS << (NeedPlus ? " + " : "") << "GV:"; | ||||
2591 | BaseGV->printAsOperand(OS, /*PrintType=*/false); | ||||
2592 | NeedPlus = true; | ||||
2593 | } | ||||
2594 | |||||
2595 | if (BaseOffs) { | ||||
2596 | OS << (NeedPlus ? " + " : "") << BaseOffs; | ||||
2597 | NeedPlus = true; | ||||
2598 | } | ||||
2599 | |||||
2600 | if (BaseReg) { | ||||
2601 | OS << (NeedPlus ? " + " : "") << "Base:"; | ||||
2602 | BaseReg->printAsOperand(OS, /*PrintType=*/false); | ||||
2603 | NeedPlus = true; | ||||
2604 | } | ||||
2605 | if (Scale) { | ||||
2606 | OS << (NeedPlus ? " + " : "") << Scale << "*"; | ||||
2607 | ScaledReg->printAsOperand(OS, /*PrintType=*/false); | ||||
2608 | } | ||||
2609 | |||||
2610 | OS << ']'; | ||||
2611 | } | ||||
2612 | |||||
2613 | LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void ExtAddrMode::dump() const { | ||||
2614 | print(dbgs()); | ||||
2615 | dbgs() << '\n'; | ||||
2616 | } | ||||
2617 | #endif | ||||
2618 | |||||
2619 | } // end anonymous namespace | ||||
2620 | |||||
2621 | namespace { | ||||
2622 | |||||
2623 | /// This class provides transaction based operation on the IR. | ||||
2624 | /// Every change made through this class is recorded in the internal state and | ||||
2625 | /// can be undone (rollback) until commit is called. | ||||
2626 | /// CGP does not check if instructions could be speculatively executed when | ||||
2627 | /// moved. Preserving the original location would pessimize the debugging | ||||
2628 | /// experience, as well as negatively impact the quality of sample PGO. | ||||
2629 | class TypePromotionTransaction { | ||||
2630 | /// This represents the common interface of the individual transaction. | ||||
2631 | /// Each class implements the logic for doing one specific modification on | ||||
2632 | /// the IR via the TypePromotionTransaction. | ||||
2633 | class TypePromotionAction { | ||||
2634 | protected: | ||||
2635 | /// The Instruction modified. | ||||
2636 | Instruction *Inst; | ||||
2637 | |||||
2638 | public: | ||||
2639 | /// Constructor of the action. | ||||
2640 | /// The constructor performs the related action on the IR. | ||||
2641 | TypePromotionAction(Instruction *Inst) : Inst(Inst) {} | ||||
2642 | |||||
2643 | virtual ~TypePromotionAction() = default; | ||||
2644 | |||||
2645 | /// Undo the modification done by this action. | ||||
2646 | /// When this method is called, the IR must be in the same state as it was | ||||
2647 | /// before this action was applied. | ||||
2648 | /// \pre Undoing the action works if and only if the IR is in the exact same | ||||
2649 | /// state as it was directly after this action was applied. | ||||
2650 | virtual void undo() = 0; | ||||
2651 | |||||
2652 | /// Advocate every change made by this action. | ||||
2653 | /// When the results on the IR of the action are to be kept, it is important | ||||
2654 | /// to call this function, otherwise hidden information may be kept forever. | ||||
2655 | virtual void commit() { | ||||
2656 | // Nothing to be done, this action is not doing anything. | ||||
2657 | } | ||||
2658 | }; | ||||
2659 | |||||
2660 | /// Utility to remember the position of an instruction. | ||||
2661 | class InsertionHandler { | ||||
2662 | /// Position of an instruction. | ||||
2663 | /// Either an instruction: | ||||
2664 | /// - Is the first in a basic block: BB is used. | ||||
2665 | /// - Has a previous instruction: PrevInst is used. | ||||
2666 | union { | ||||
2667 | Instruction *PrevInst; | ||||
2668 | BasicBlock *BB; | ||||
2669 | } Point; | ||||
2670 | |||||
2671 | /// Remember whether or not the instruction had a previous instruction. | ||||
2672 | bool HasPrevInstruction; | ||||
2673 | |||||
2674 | public: | ||||
2675 | /// Record the position of \p Inst. | ||||
2676 | InsertionHandler(Instruction *Inst) { | ||||
2677 | BasicBlock::iterator It = Inst->getIterator(); | ||||
2678 | HasPrevInstruction = (It != (Inst->getParent()->begin())); | ||||
2679 | if (HasPrevInstruction) | ||||
2680 | Point.PrevInst = &*--It; | ||||
2681 | else | ||||
2682 | Point.BB = Inst->getParent(); | ||||
2683 | } | ||||
2684 | |||||
2685 | /// Insert \p Inst at the recorded position. | ||||
2686 | void insert(Instruction *Inst) { | ||||
2687 | if (HasPrevInstruction) { | ||||
2688 | if (Inst->getParent()) | ||||
2689 | Inst->removeFromParent(); | ||||
2690 | Inst->insertAfter(Point.PrevInst); | ||||
2691 | } else { | ||||
2692 | Instruction *Position = &*Point.BB->getFirstInsertionPt(); | ||||
2693 | if (Inst->getParent()) | ||||
2694 | Inst->moveBefore(Position); | ||||
2695 | else | ||||
2696 | Inst->insertBefore(Position); | ||||
2697 | } | ||||
2698 | } | ||||
2699 | }; | ||||
2700 | |||||
2701 | /// Move an instruction before another. | ||||
2702 | class InstructionMoveBefore : public TypePromotionAction { | ||||
2703 | /// Original position of the instruction. | ||||
2704 | InsertionHandler Position; | ||||
2705 | |||||
2706 | public: | ||||
2707 | /// Move \p Inst before \p Before. | ||||
2708 | InstructionMoveBefore(Instruction *Inst, Instruction *Before) | ||||
2709 | : TypePromotionAction(Inst), Position(Inst) { | ||||
2710 | LLVM_DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Beforedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: move: " << * Inst << "\nbefore: " << *Before << "\n"; } } while (false) | ||||
2711 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: move: " << * Inst << "\nbefore: " << *Before << "\n"; } } while (false); | ||||
2712 | Inst->moveBefore(Before); | ||||
2713 | } | ||||
2714 | |||||
2715 | /// Move the instruction back to its original position. | ||||
2716 | void undo() override { | ||||
2717 | LLVM_DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: moveBefore: " << *Inst << "\n"; } } while (false); | ||||
2718 | Position.insert(Inst); | ||||
2719 | } | ||||
2720 | }; | ||||
2721 | |||||
2722 | /// Set the operand of an instruction with a new value. | ||||
2723 | class OperandSetter : public TypePromotionAction { | ||||
2724 | /// Original operand of the instruction. | ||||
2725 | Value *Origin; | ||||
2726 | |||||
2727 | /// Index of the modified instruction. | ||||
2728 | unsigned Idx; | ||||
2729 | |||||
2730 | public: | ||||
2731 | /// Set \p Idx operand of \p Inst with \p NewVal. | ||||
2732 | OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal) | ||||
2733 | : TypePromotionAction(Inst), Idx(Idx) { | ||||
2734 | LLVM_DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: setOperand: " << Idx << "\n" << "for:" << *Inst << "\n" << "with:" << *NewVal << "\n"; } } while ( false) | ||||
2735 | << "for:" << *Inst << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: setOperand: " << Idx << "\n" << "for:" << *Inst << "\n" << "with:" << *NewVal << "\n"; } } while ( false) | ||||
2736 | << "with:" << *NewVal << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: setOperand: " << Idx << "\n" << "for:" << *Inst << "\n" << "with:" << *NewVal << "\n"; } } while ( false); | ||||
2737 | Origin = Inst->getOperand(Idx); | ||||
2738 | Inst->setOperand(Idx, NewVal); | ||||
2739 | } | ||||
2740 | |||||
2741 | /// Restore the original value of the instruction. | ||||
2742 | void undo() override { | ||||
2743 | LLVM_DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: setOperand:" << Idx << "\n" << "for: " << *Inst << "\n" << "with: " << *Origin << "\n"; } } while ( false) | ||||
2744 | << "for: " << *Inst << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: setOperand:" << Idx << "\n" << "for: " << *Inst << "\n" << "with: " << *Origin << "\n"; } } while ( false) | ||||
2745 | << "with: " << *Origin << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: setOperand:" << Idx << "\n" << "for: " << *Inst << "\n" << "with: " << *Origin << "\n"; } } while ( false); | ||||
2746 | Inst->setOperand(Idx, Origin); | ||||
2747 | } | ||||
2748 | }; | ||||
2749 | |||||
2750 | /// Hide the operands of an instruction. | ||||
2751 | /// Do as if this instruction was not using any of its operands. | ||||
2752 | class OperandsHider : public TypePromotionAction { | ||||
2753 | /// The list of original operands. | ||||
2754 | SmallVector<Value *, 4> OriginalValues; | ||||
2755 | |||||
2756 | public: | ||||
2757 | /// Remove \p Inst from the uses of the operands of \p Inst. | ||||
2758 | OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) { | ||||
2759 | LLVM_DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: OperandsHider: " << *Inst << "\n"; } } while (false); | ||||
2760 | unsigned NumOpnds = Inst->getNumOperands(); | ||||
2761 | OriginalValues.reserve(NumOpnds); | ||||
2762 | for (unsigned It = 0; It < NumOpnds; ++It) { | ||||
2763 | // Save the current operand. | ||||
2764 | Value *Val = Inst->getOperand(It); | ||||
2765 | OriginalValues.push_back(Val); | ||||
2766 | // Set a dummy one. | ||||
2767 | // We could use OperandSetter here, but that would imply an overhead | ||||
2768 | // that we are not willing to pay. | ||||
2769 | Inst->setOperand(It, UndefValue::get(Val->getType())); | ||||
2770 | } | ||||
2771 | } | ||||
2772 | |||||
2773 | /// Restore the original list of uses. | ||||
2774 | void undo() override { | ||||
2775 | LLVM_DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: OperandsHider: " << *Inst << "\n"; } } while (false); | ||||
2776 | for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It) | ||||
2777 | Inst->setOperand(It, OriginalValues[It]); | ||||
2778 | } | ||||
2779 | }; | ||||
2780 | |||||
2781 | /// Build a truncate instruction. | ||||
2782 | class TruncBuilder : public TypePromotionAction { | ||||
2783 | Value *Val; | ||||
2784 | |||||
2785 | public: | ||||
2786 | /// Build a truncate instruction of \p Opnd producing a \p Ty | ||||
2787 | /// result. | ||||
2788 | /// trunc Opnd to Ty. | ||||
2789 | TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) { | ||||
2790 | IRBuilder<> Builder(Opnd); | ||||
2791 | Builder.SetCurrentDebugLocation(DebugLoc()); | ||||
2792 | Val = Builder.CreateTrunc(Opnd, Ty, "promoted"); | ||||
2793 | LLVM_DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: TruncBuilder: " << *Val << "\n"; } } while (false); | ||||
2794 | } | ||||
2795 | |||||
2796 | /// Get the built value. | ||||
2797 | Value *getBuiltValue() { return Val; } | ||||
2798 | |||||
2799 | /// Remove the built instruction. | ||||
2800 | void undo() override { | ||||
2801 | LLVM_DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: TruncBuilder: " << *Val << "\n"; } } while (false); | ||||
2802 | if (Instruction *IVal = dyn_cast<Instruction>(Val)) | ||||
2803 | IVal->eraseFromParent(); | ||||
2804 | } | ||||
2805 | }; | ||||
2806 | |||||
2807 | /// Build a sign extension instruction. | ||||
2808 | class SExtBuilder : public TypePromotionAction { | ||||
2809 | Value *Val; | ||||
2810 | |||||
2811 | public: | ||||
2812 | /// Build a sign extension instruction of \p Opnd producing a \p Ty | ||||
2813 | /// result. | ||||
2814 | /// sext Opnd to Ty. | ||||
2815 | SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) | ||||
2816 | : TypePromotionAction(InsertPt) { | ||||
2817 | IRBuilder<> Builder(InsertPt); | ||||
2818 | Val = Builder.CreateSExt(Opnd, Ty, "promoted"); | ||||
2819 | LLVM_DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: SExtBuilder: " << *Val << "\n"; } } while (false); | ||||
2820 | } | ||||
2821 | |||||
2822 | /// Get the built value. | ||||
2823 | Value *getBuiltValue() { return Val; } | ||||
2824 | |||||
2825 | /// Remove the built instruction. | ||||
2826 | void undo() override { | ||||
2827 | LLVM_DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: SExtBuilder: " << *Val << "\n"; } } while (false); | ||||
2828 | if (Instruction *IVal = dyn_cast<Instruction>(Val)) | ||||
2829 | IVal->eraseFromParent(); | ||||
2830 | } | ||||
2831 | }; | ||||
2832 | |||||
2833 | /// Build a zero extension instruction. | ||||
2834 | class ZExtBuilder : public TypePromotionAction { | ||||
2835 | Value *Val; | ||||
2836 | |||||
2837 | public: | ||||
2838 | /// Build a zero extension instruction of \p Opnd producing a \p Ty | ||||
2839 | /// result. | ||||
2840 | /// zext Opnd to Ty. | ||||
2841 | ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) | ||||
2842 | : TypePromotionAction(InsertPt) { | ||||
2843 | IRBuilder<> Builder(InsertPt); | ||||
2844 | Builder.SetCurrentDebugLocation(DebugLoc()); | ||||
2845 | Val = Builder.CreateZExt(Opnd, Ty, "promoted"); | ||||
2846 | LLVM_DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: ZExtBuilder: " << *Val << "\n"; } } while (false); | ||||
2847 | } | ||||
2848 | |||||
2849 | /// Get the built value. | ||||
2850 | Value *getBuiltValue() { return Val; } | ||||
2851 | |||||
2852 | /// Remove the built instruction. | ||||
2853 | void undo() override { | ||||
2854 | LLVM_DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: ZExtBuilder: " << *Val << "\n"; } } while (false); | ||||
2855 | if (Instruction *IVal = dyn_cast<Instruction>(Val)) | ||||
2856 | IVal->eraseFromParent(); | ||||
2857 | } | ||||
2858 | }; | ||||
2859 | |||||
2860 | /// Mutate an instruction to another type. | ||||
2861 | class TypeMutator : public TypePromotionAction { | ||||
2862 | /// Record the original type. | ||||
2863 | Type *OrigTy; | ||||
2864 | |||||
2865 | public: | ||||
2866 | /// Mutate the type of \p Inst into \p NewTy. | ||||
2867 | TypeMutator(Instruction *Inst, Type *NewTy) | ||||
2868 | : TypePromotionAction(Inst), OrigTy(Inst->getType()) { | ||||
2869 | LLVM_DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTydo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy << "\n"; } } while (false) | ||||
2870 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy << "\n"; } } while (false); | ||||
2871 | Inst->mutateType(NewTy); | ||||
2872 | } | ||||
2873 | |||||
2874 | /// Mutate the instruction back to its original type. | ||||
2875 | void undo() override { | ||||
2876 | LLVM_DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTydo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy << "\n"; } } while (false) | ||||
2877 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy << "\n"; } } while (false); | ||||
2878 | Inst->mutateType(OrigTy); | ||||
2879 | } | ||||
2880 | }; | ||||
2881 | |||||
2882 | /// Replace the uses of an instruction by another instruction. | ||||
2883 | class UsesReplacer : public TypePromotionAction { | ||||
2884 | /// Helper structure to keep track of the replaced uses. | ||||
2885 | struct InstructionAndIdx { | ||||
2886 | /// The instruction using the instruction. | ||||
2887 | Instruction *Inst; | ||||
2888 | |||||
2889 | /// The index where this instruction is used for Inst. | ||||
2890 | unsigned Idx; | ||||
2891 | |||||
2892 | InstructionAndIdx(Instruction *Inst, unsigned Idx) | ||||
2893 | : Inst(Inst), Idx(Idx) {} | ||||
2894 | }; | ||||
2895 | |||||
2896 | /// Keep track of the original uses (pair Instruction, Index). | ||||
2897 | SmallVector<InstructionAndIdx, 4> OriginalUses; | ||||
2898 | /// Keep track of the debug users. | ||||
2899 | SmallVector<DbgValueInst *, 1> DbgValues; | ||||
2900 | |||||
2901 | /// Keep track of the new value so that we can undo it by replacing | ||||
2902 | /// instances of the new value with the original value. | ||||
2903 | Value *New; | ||||
2904 | |||||
2905 | using use_iterator = SmallVectorImpl<InstructionAndIdx>::iterator; | ||||
2906 | |||||
2907 | public: | ||||
2908 | /// Replace all the use of \p Inst by \p New. | ||||
2909 | UsesReplacer(Instruction *Inst, Value *New) | ||||
2910 | : TypePromotionAction(Inst), New(New) { | ||||
2911 | LLVM_DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *Newdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New << "\n"; } } while (false) | ||||
2912 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New << "\n"; } } while (false); | ||||
2913 | // Record the original uses. | ||||
2914 | for (Use &U : Inst->uses()) { | ||||
2915 | Instruction *UserI = cast<Instruction>(U.getUser()); | ||||
2916 | OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo())); | ||||
2917 | } | ||||
2918 | // Record the debug uses separately. They are not in the instruction's | ||||
2919 | // use list, but they are replaced by RAUW. | ||||
2920 | findDbgValues(DbgValues, Inst); | ||||
2921 | |||||
2922 | // Now, we can replace the uses. | ||||
2923 | Inst->replaceAllUsesWith(New); | ||||
2924 | } | ||||
2925 | |||||
2926 | /// Reassign the original uses of Inst to Inst. | ||||
2927 | void undo() override { | ||||
2928 | LLVM_DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: UsersReplacer: " << *Inst << "\n"; } } while (false); | ||||
2929 | for (InstructionAndIdx &Use : OriginalUses) | ||||
2930 | Use.Inst->setOperand(Use.Idx, Inst); | ||||
2931 | // RAUW has replaced all original uses with references to the new value, | ||||
2932 | // including the debug uses. Since we are undoing the replacements, | ||||
2933 | // the original debug uses must also be reinstated to maintain the | ||||
2934 | // correctness and utility of debug value instructions. | ||||
2935 | for (auto *DVI : DbgValues) | ||||
2936 | DVI->replaceVariableLocationOp(New, Inst); | ||||
2937 | } | ||||
2938 | }; | ||||
2939 | |||||
2940 | /// Remove an instruction from the IR. | ||||
2941 | class InstructionRemover : public TypePromotionAction { | ||||
2942 | /// Original position of the instruction. | ||||
2943 | InsertionHandler Inserter; | ||||
2944 | |||||
2945 | /// Helper structure to hide all the link to the instruction. In other | ||||
2946 | /// words, this helps to do as if the instruction was removed. | ||||
2947 | OperandsHider Hider; | ||||
2948 | |||||
2949 | /// Keep track of the uses replaced, if any. | ||||
2950 | UsesReplacer *Replacer = nullptr; | ||||
2951 | |||||
2952 | /// Keep track of instructions removed. | ||||
2953 | SetOfInstrs &RemovedInsts; | ||||
2954 | |||||
2955 | public: | ||||
2956 | /// Remove all reference of \p Inst and optionally replace all its | ||||
2957 | /// uses with New. | ||||
2958 | /// \p RemovedInsts Keep track of the instructions removed by this Action. | ||||
2959 | /// \pre If !Inst->use_empty(), then New != nullptr | ||||
2960 | InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts, | ||||
2961 | Value *New = nullptr) | ||||
2962 | : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst), | ||||
2963 | RemovedInsts(RemovedInsts) { | ||||
2964 | if (New) | ||||
2965 | Replacer = new UsesReplacer(Inst, New); | ||||
2966 | LLVM_DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: InstructionRemover: " << *Inst << "\n"; } } while (false); | ||||
2967 | RemovedInsts.insert(Inst); | ||||
2968 | /// The instructions removed here will be freed after completing | ||||
2969 | /// optimizeBlock() for all blocks as we need to keep track of the | ||||
2970 | /// removed instructions during promotion. | ||||
2971 | Inst->removeFromParent(); | ||||
2972 | } | ||||
2973 | |||||
2974 | ~InstructionRemover() override { delete Replacer; } | ||||
2975 | |||||
2976 | /// Resurrect the instruction and reassign it to the proper uses if | ||||
2977 | /// new value was provided when build this action. | ||||
2978 | void undo() override { | ||||
2979 | LLVM_DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: InstructionRemover: " << *Inst << "\n"; } } while (false); | ||||
2980 | Inserter.insert(Inst); | ||||
2981 | if (Replacer) | ||||
2982 | Replacer->undo(); | ||||
2983 | Hider.undo(); | ||||
2984 | RemovedInsts.erase(Inst); | ||||
2985 | } | ||||
2986 | }; | ||||
2987 | |||||
2988 | public: | ||||
2989 | /// Restoration point. | ||||
2990 | /// The restoration point is a pointer to an action instead of an iterator | ||||
2991 | /// because the iterator may be invalidated but not the pointer. | ||||
2992 | using ConstRestorationPt = const TypePromotionAction *; | ||||
2993 | |||||
2994 | TypePromotionTransaction(SetOfInstrs &RemovedInsts) | ||||
2995 | : RemovedInsts(RemovedInsts) {} | ||||
2996 | |||||
2997 | /// Advocate every changes made in that transaction. Return true if any change | ||||
2998 | /// happen. | ||||
2999 | bool commit(); | ||||
3000 | |||||
3001 | /// Undo all the changes made after the given point. | ||||
3002 | void rollback(ConstRestorationPt Point); | ||||
3003 | |||||
3004 | /// Get the current restoration point. | ||||
3005 | ConstRestorationPt getRestorationPoint() const; | ||||
3006 | |||||
3007 | /// \name API for IR modification with state keeping to support rollback. | ||||
3008 | /// @{ | ||||
3009 | /// Same as Instruction::setOperand. | ||||
3010 | void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal); | ||||
3011 | |||||
3012 | /// Same as Instruction::eraseFromParent. | ||||
3013 | void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr); | ||||
3014 | |||||
3015 | /// Same as Value::replaceAllUsesWith. | ||||
3016 | void replaceAllUsesWith(Instruction *Inst, Value *New); | ||||
3017 | |||||
3018 | /// Same as Value::mutateType. | ||||
3019 | void mutateType(Instruction *Inst, Type *NewTy); | ||||
3020 | |||||
3021 | /// Same as IRBuilder::createTrunc. | ||||
3022 | Value *createTrunc(Instruction *Opnd, Type *Ty); | ||||
3023 | |||||
3024 | /// Same as IRBuilder::createSExt. | ||||
3025 | Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty); | ||||
3026 | |||||
3027 | /// Same as IRBuilder::createZExt. | ||||
3028 | Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty); | ||||
3029 | |||||
3030 | /// Same as Instruction::moveBefore. | ||||
3031 | void moveBefore(Instruction *Inst, Instruction *Before); | ||||
3032 | /// @} | ||||
3033 | |||||
3034 | private: | ||||
3035 | /// The ordered list of actions made so far. | ||||
3036 | SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions; | ||||
3037 | |||||
3038 | using CommitPt = | ||||
3039 | SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator; | ||||
3040 | |||||
3041 | SetOfInstrs &RemovedInsts; | ||||
3042 | }; | ||||
3043 | |||||
3044 | } // end anonymous namespace | ||||
3045 | |||||
3046 | void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx, | ||||
3047 | Value *NewVal) { | ||||
3048 | Actions.push_back(std::make_unique<TypePromotionTransaction::OperandSetter>( | ||||
3049 | Inst, Idx, NewVal)); | ||||
3050 | } | ||||
3051 | |||||
3052 | void TypePromotionTransaction::eraseInstruction(Instruction *Inst, | ||||
3053 | Value *NewVal) { | ||||
3054 | Actions.push_back( | ||||
3055 | std::make_unique<TypePromotionTransaction::InstructionRemover>( | ||||
3056 | Inst, RemovedInsts, NewVal)); | ||||
3057 | } | ||||
3058 | |||||
3059 | void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst, | ||||
3060 | Value *New) { | ||||
3061 | Actions.push_back( | ||||
3062 | std::make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New)); | ||||
3063 | } | ||||
3064 | |||||
3065 | void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) { | ||||
3066 | Actions.push_back( | ||||
3067 | std::make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy)); | ||||
3068 | } | ||||
3069 | |||||
3070 | Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, Type *Ty) { | ||||
3071 | std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty)); | ||||
3072 | Value *Val = Ptr->getBuiltValue(); | ||||
3073 | Actions.push_back(std::move(Ptr)); | ||||
3074 | return Val; | ||||
3075 | } | ||||
3076 | |||||
3077 | Value *TypePromotionTransaction::createSExt(Instruction *Inst, Value *Opnd, | ||||
3078 | Type *Ty) { | ||||
3079 | std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty)); | ||||
3080 | Value *Val = Ptr->getBuiltValue(); | ||||
3081 | Actions.push_back(std::move(Ptr)); | ||||
3082 | return Val; | ||||
3083 | } | ||||
3084 | |||||
3085 | Value *TypePromotionTransaction::createZExt(Instruction *Inst, Value *Opnd, | ||||
3086 | Type *Ty) { | ||||
3087 | std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty)); | ||||
3088 | Value *Val = Ptr->getBuiltValue(); | ||||
3089 | Actions.push_back(std::move(Ptr)); | ||||
3090 | return Val; | ||||
3091 | } | ||||
3092 | |||||
3093 | void TypePromotionTransaction::moveBefore(Instruction *Inst, | ||||
3094 | Instruction *Before) { | ||||
3095 | Actions.push_back( | ||||
3096 | std::make_unique<TypePromotionTransaction::InstructionMoveBefore>( | ||||
3097 | Inst, Before)); | ||||
3098 | } | ||||
3099 | |||||
3100 | TypePromotionTransaction::ConstRestorationPt | ||||
3101 | TypePromotionTransaction::getRestorationPoint() const { | ||||
3102 | return !Actions.empty() ? Actions.back().get() : nullptr; | ||||
3103 | } | ||||
3104 | |||||
3105 | bool TypePromotionTransaction::commit() { | ||||
3106 | for (std::unique_ptr<TypePromotionAction> &Action : Actions) | ||||
3107 | Action->commit(); | ||||
3108 | bool Modified = !Actions.empty(); | ||||
3109 | Actions.clear(); | ||||
3110 | return Modified; | ||||
3111 | } | ||||
3112 | |||||
3113 | void TypePromotionTransaction::rollback( | ||||
3114 | TypePromotionTransaction::ConstRestorationPt Point) { | ||||
3115 | while (!Actions.empty() && Point != Actions.back().get()) { | ||||
3116 | std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val(); | ||||
3117 | Curr->undo(); | ||||
3118 | } | ||||
3119 | } | ||||
3120 | |||||
3121 | namespace { | ||||
3122 | |||||
3123 | /// A helper class for matching addressing modes. | ||||
3124 | /// | ||||
3125 | /// This encapsulates the logic for matching the target-legal addressing modes. | ||||
3126 | class AddressingModeMatcher { | ||||
3127 | SmallVectorImpl<Instruction *> &AddrModeInsts; | ||||
3128 | const TargetLowering &TLI; | ||||
3129 | const TargetRegisterInfo &TRI; | ||||
3130 | const DataLayout &DL; | ||||
3131 | const LoopInfo &LI; | ||||
3132 | const std::function<const DominatorTree &()> getDTFn; | ||||
3133 | |||||
3134 | /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and | ||||
3135 | /// the memory instruction that we're computing this address for. | ||||
3136 | Type *AccessTy; | ||||
3137 | unsigned AddrSpace; | ||||
3138 | Instruction *MemoryInst; | ||||
3139 | |||||
3140 | /// This is the addressing mode that we're building up. This is | ||||
3141 | /// part of the return value of this addressing mode matching stuff. | ||||
3142 | ExtAddrMode &AddrMode; | ||||
3143 | |||||
3144 | /// The instructions inserted by other CodeGenPrepare optimizations. | ||||
3145 | const SetOfInstrs &InsertedInsts; | ||||
3146 | |||||
3147 | /// A map from the instructions to their type before promotion. | ||||
3148 | InstrToOrigTy &PromotedInsts; | ||||
3149 | |||||
3150 | /// The ongoing transaction where every action should be registered. | ||||
3151 | TypePromotionTransaction &TPT; | ||||
3152 | |||||
3153 | // A GEP which has too large offset to be folded into the addressing mode. | ||||
3154 | std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP; | ||||
3155 | |||||
3156 | /// This is set to true when we should not do profitability checks. | ||||
3157 | /// When true, IsProfitableToFoldIntoAddressingMode always returns true. | ||||
3158 | bool IgnoreProfitability; | ||||
3159 | |||||
3160 | /// True if we are optimizing for size. | ||||
3161 | bool OptSize; | ||||
3162 | |||||
3163 | ProfileSummaryInfo *PSI; | ||||
3164 | BlockFrequencyInfo *BFI; | ||||
3165 | |||||
3166 | AddressingModeMatcher( | ||||
3167 | SmallVectorImpl<Instruction *> &AMI, const TargetLowering &TLI, | ||||
3168 | const TargetRegisterInfo &TRI, const LoopInfo &LI, | ||||
3169 | const std::function<const DominatorTree &()> getDTFn, Type *AT, | ||||
3170 | unsigned AS, Instruction *MI, ExtAddrMode &AM, | ||||
3171 | const SetOfInstrs &InsertedInsts, InstrToOrigTy &PromotedInsts, | ||||
3172 | TypePromotionTransaction &TPT, | ||||
3173 | std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP, | ||||
3174 | bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) | ||||
3175 | : AddrModeInsts(AMI), TLI(TLI), TRI(TRI), | ||||
3176 | DL(MI->getModule()->getDataLayout()), LI(LI), getDTFn(getDTFn), | ||||
3177 | AccessTy(AT), AddrSpace(AS), MemoryInst(MI), AddrMode(AM), | ||||
3178 | InsertedInsts(InsertedInsts), PromotedInsts(PromotedInsts), TPT(TPT), | ||||
3179 | LargeOffsetGEP(LargeOffsetGEP), OptSize(OptSize), PSI(PSI), BFI(BFI) { | ||||
3180 | IgnoreProfitability = false; | ||||
3181 | } | ||||
3182 | |||||
3183 | public: | ||||
3184 | /// Find the maximal addressing mode that a load/store of V can fold, | ||||
3185 | /// give an access type of AccessTy. This returns a list of involved | ||||
3186 | /// instructions in AddrModeInsts. | ||||
3187 | /// \p InsertedInsts The instructions inserted by other CodeGenPrepare | ||||
3188 | /// optimizations. | ||||
3189 | /// \p PromotedInsts maps the instructions to their type before promotion. | ||||
3190 | /// \p The ongoing transaction where every action should be registered. | ||||
3191 | static ExtAddrMode | ||||
3192 | Match(Value *V, Type *AccessTy, unsigned AS, Instruction *MemoryInst, | ||||
3193 | SmallVectorImpl<Instruction *> &AddrModeInsts, | ||||
3194 | const TargetLowering &TLI, const LoopInfo &LI, | ||||
3195 | const std::function<const DominatorTree &()> getDTFn, | ||||
3196 | const TargetRegisterInfo &TRI, const SetOfInstrs &InsertedInsts, | ||||
3197 | InstrToOrigTy &PromotedInsts, TypePromotionTransaction &TPT, | ||||
3198 | std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP, | ||||
3199 | bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) { | ||||
3200 | ExtAddrMode Result; | ||||
3201 | |||||
3202 | bool Success = AddressingModeMatcher(AddrModeInsts, TLI, TRI, LI, getDTFn, | ||||
3203 | AccessTy, AS, MemoryInst, Result, | ||||
3204 | InsertedInsts, PromotedInsts, TPT, | ||||
3205 | LargeOffsetGEP, OptSize, PSI, BFI) | ||||
3206 | .matchAddr(V, 0); | ||||
3207 | (void)Success; | ||||
3208 | assert(Success && "Couldn't select *anything*?")(static_cast <bool> (Success && "Couldn't select *anything*?" ) ? void (0) : __assert_fail ("Success && \"Couldn't select *anything*?\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 3208, __extension__ __PRETTY_FUNCTION__ )); | ||||
3209 | return Result; | ||||
3210 | } | ||||
3211 | |||||
3212 | private: | ||||
3213 | bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); | ||||
3214 | bool matchAddr(Value *Addr, unsigned Depth); | ||||
3215 | bool matchOperationAddr(User *AddrInst, unsigned Opcode, unsigned Depth, | ||||
3216 | bool *MovedAway = nullptr); | ||||
3217 | bool isProfitableToFoldIntoAddressingMode(Instruction *I, | ||||
3218 | ExtAddrMode &AMBefore, | ||||
3219 | ExtAddrMode &AMAfter); | ||||
3220 | bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2); | ||||
3221 | bool isPromotionProfitable(unsigned NewCost, unsigned OldCost, | ||||
3222 | Value *PromotedOperand) const; | ||||
3223 | }; | ||||
3224 | |||||
3225 | class PhiNodeSet; | ||||
3226 | |||||
3227 | /// An iterator for PhiNodeSet. | ||||
3228 | class PhiNodeSetIterator { | ||||
3229 | PhiNodeSet *const Set; | ||||
3230 | size_t CurrentIndex = 0; | ||||
3231 | |||||
3232 | public: | ||||
3233 | /// The constructor. Start should point to either a valid element, or be equal | ||||
3234 | /// to the size of the underlying SmallVector of the PhiNodeSet. | ||||
3235 | PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start); | ||||
3236 | PHINode *operator*() const; | ||||
3237 | PhiNodeSetIterator &operator++(); | ||||
3238 | bool operator==(const PhiNodeSetIterator &RHS) const; | ||||
3239 | bool operator!=(const PhiNodeSetIterator &RHS) const; | ||||
3240 | }; | ||||
3241 | |||||
3242 | /// Keeps a set of PHINodes. | ||||
3243 | /// | ||||
3244 | /// This is a minimal set implementation for a specific use case: | ||||
3245 | /// It is very fast when there are very few elements, but also provides good | ||||
3246 | /// performance when there are many. It is similar to SmallPtrSet, but also | ||||
3247 | /// provides iteration by insertion order, which is deterministic and stable | ||||
3248 | /// across runs. It is also similar to SmallSetVector, but provides removing | ||||
3249 | /// elements in O(1) time. This is achieved by not actually removing the element | ||||
3250 | /// from the underlying vector, so comes at the cost of using more memory, but | ||||
3251 | /// that is fine, since PhiNodeSets are used as short lived objects. | ||||
3252 | class PhiNodeSet { | ||||
3253 | friend class PhiNodeSetIterator; | ||||
3254 | |||||
3255 | using MapType = SmallDenseMap<PHINode *, size_t, 32>; | ||||
3256 | using iterator = PhiNodeSetIterator; | ||||
3257 | |||||
3258 | /// Keeps the elements in the order of their insertion in the underlying | ||||
3259 | /// vector. To achieve constant time removal, it never deletes any element. | ||||
3260 | SmallVector<PHINode *, 32> NodeList; | ||||
3261 | |||||
3262 | /// Keeps the elements in the underlying set implementation. This (and not the | ||||
3263 | /// NodeList defined above) is the source of truth on whether an element | ||||
3264 | /// is actually in the collection. | ||||
3265 | MapType NodeMap; | ||||
3266 | |||||
3267 | /// Points to the first valid (not deleted) element when the set is not empty | ||||
3268 | /// and the value is not zero. Equals to the size of the underlying vector | ||||
3269 | /// when the set is empty. When the value is 0, as in the beginning, the | ||||
3270 | /// first element may or may not be valid. | ||||
3271 | size_t FirstValidElement = 0; | ||||
3272 | |||||
3273 | public: | ||||
3274 | /// Inserts a new element to the collection. | ||||
3275 | /// \returns true if the element is actually added, i.e. was not in the | ||||
3276 | /// collection before the operation. | ||||
3277 | bool insert(PHINode *Ptr) { | ||||
3278 | if (NodeMap.insert(std::make_pair(Ptr, NodeList.size())).second) { | ||||
3279 | NodeList.push_back(Ptr); | ||||
3280 | return true; | ||||
3281 | } | ||||
3282 | return false; | ||||
3283 | } | ||||
3284 | |||||
3285 | /// Removes the element from the collection. | ||||
3286 | /// \returns whether the element is actually removed, i.e. was in the | ||||
3287 | /// collection before the operation. | ||||
3288 | bool erase(PHINode *Ptr) { | ||||
3289 | if (NodeMap.erase(Ptr)) { | ||||
3290 | SkipRemovedElements(FirstValidElement); | ||||
3291 | return true; | ||||
3292 | } | ||||
3293 | return false; | ||||
3294 | } | ||||
3295 | |||||
3296 | /// Removes all elements and clears the collection. | ||||
3297 | void clear() { | ||||
3298 | NodeMap.clear(); | ||||
3299 | NodeList.clear(); | ||||
3300 | FirstValidElement = 0; | ||||
3301 | } | ||||
3302 | |||||
3303 | /// \returns an iterator that will iterate the elements in the order of | ||||
3304 | /// insertion. | ||||
3305 | iterator begin() { | ||||
3306 | if (FirstValidElement == 0) | ||||
3307 | SkipRemovedElements(FirstValidElement); | ||||
3308 | return PhiNodeSetIterator(this, FirstValidElement); | ||||
3309 | } | ||||
3310 | |||||
3311 | /// \returns an iterator that points to the end of the collection. | ||||
3312 | iterator end() { return PhiNodeSetIterator(this, NodeList.size()); } | ||||
3313 | |||||
3314 | /// Returns the number of elements in the collection. | ||||
3315 | size_t size() const { return NodeMap.size(); } | ||||
3316 | |||||
3317 | /// \returns 1 if the given element is in the collection, and 0 if otherwise. | ||||
3318 | size_t count(PHINode *Ptr) const { return NodeMap.count(Ptr); } | ||||
3319 | |||||
3320 | private: | ||||
3321 | /// Updates the CurrentIndex so that it will point to a valid element. | ||||
3322 | /// | ||||
3323 | /// If the element of NodeList at CurrentIndex is valid, it does not | ||||
3324 | /// change it. If there are no more valid elements, it updates CurrentIndex | ||||
3325 | /// to point to the end of the NodeList. | ||||
3326 | void SkipRemovedElements(size_t &CurrentIndex) { | ||||
3327 | while (CurrentIndex < NodeList.size()) { | ||||
3328 | auto it = NodeMap.find(NodeList[CurrentIndex]); | ||||
3329 | // If the element has been deleted and added again later, NodeMap will | ||||
3330 | // point to a different index, so CurrentIndex will still be invalid. | ||||
3331 | if (it != NodeMap.end() && it->second == CurrentIndex) | ||||
3332 | break; | ||||
3333 | ++CurrentIndex; | ||||
3334 | } | ||||
3335 | } | ||||
3336 | }; | ||||
3337 | |||||
3338 | PhiNodeSetIterator::PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start) | ||||
3339 | : Set(Set), CurrentIndex(Start) {} | ||||
3340 | |||||
3341 | PHINode *PhiNodeSetIterator::operator*() const { | ||||
3342 | assert(CurrentIndex < Set->NodeList.size() &&(static_cast <bool> (CurrentIndex < Set->NodeList .size() && "PhiNodeSet access out of range") ? void ( 0) : __assert_fail ("CurrentIndex < Set->NodeList.size() && \"PhiNodeSet access out of range\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 3343, __extension__ __PRETTY_FUNCTION__ )) | ||||
3343 | "PhiNodeSet access out of range")(static_cast <bool> (CurrentIndex < Set->NodeList .size() && "PhiNodeSet access out of range") ? void ( 0) : __assert_fail ("CurrentIndex < Set->NodeList.size() && \"PhiNodeSet access out of range\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 3343, __extension__ __PRETTY_FUNCTION__ )); | ||||
3344 | return Set->NodeList[CurrentIndex]; | ||||
3345 | } | ||||
3346 | |||||
3347 | PhiNodeSetIterator &PhiNodeSetIterator::operator++() { | ||||
3348 | assert(CurrentIndex < Set->NodeList.size() &&(static_cast <bool> (CurrentIndex < Set->NodeList .size() && "PhiNodeSet access out of range") ? void ( 0) : __assert_fail ("CurrentIndex < Set->NodeList.size() && \"PhiNodeSet access out of range\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 3349, __extension__ __PRETTY_FUNCTION__ )) | ||||
3349 | "PhiNodeSet access out of range")(static_cast <bool> (CurrentIndex < Set->NodeList .size() && "PhiNodeSet access out of range") ? void ( 0) : __assert_fail ("CurrentIndex < Set->NodeList.size() && \"PhiNodeSet access out of range\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 3349, __extension__ __PRETTY_FUNCTION__ )); | ||||
3350 | ++CurrentIndex; | ||||
3351 | Set->SkipRemovedElements(CurrentIndex); | ||||
3352 | return *this; | ||||
3353 | } | ||||
3354 | |||||
3355 | bool PhiNodeSetIterator::operator==(const PhiNodeSetIterator &RHS) const { | ||||
3356 | return CurrentIndex == RHS.CurrentIndex; | ||||
3357 | } | ||||
3358 | |||||
3359 | bool PhiNodeSetIterator::operator!=(const PhiNodeSetIterator &RHS) const { | ||||
3360 | return !((*this) == RHS); | ||||
3361 | } | ||||
3362 | |||||
3363 | /// Keep track of simplification of Phi nodes. | ||||
3364 | /// Accept the set of all phi nodes and erase phi node from this set | ||||
3365 | /// if it is simplified. | ||||
3366 | class SimplificationTracker { | ||||
3367 | DenseMap<Value *, Value *> Storage; | ||||
3368 | const SimplifyQuery &SQ; | ||||
3369 | // Tracks newly created Phi nodes. The elements are iterated by insertion | ||||
3370 | // order. | ||||
3371 | PhiNodeSet AllPhiNodes; | ||||
3372 | // Tracks newly created Select nodes. | ||||
3373 | SmallPtrSet<SelectInst *, 32> AllSelectNodes; | ||||
3374 | |||||
3375 | public: | ||||
3376 | SimplificationTracker(const SimplifyQuery &sq) : SQ(sq) {} | ||||
3377 | |||||
3378 | Value *Get(Value *V) { | ||||
3379 | do { | ||||
3380 | auto SV = Storage.find(V); | ||||
3381 | if (SV == Storage.end()) | ||||
3382 | return V; | ||||
3383 | V = SV->second; | ||||
3384 | } while (true); | ||||
3385 | } | ||||
3386 | |||||
3387 | Value *Simplify(Value *Val) { | ||||
3388 | SmallVector<Value *, 32> WorkList; | ||||
3389 | SmallPtrSet<Value *, 32> Visited; | ||||
3390 | WorkList.push_back(Val); | ||||
3391 | while (!WorkList.empty()) { | ||||
3392 | auto *P = WorkList.pop_back_val(); | ||||
3393 | if (!Visited.insert(P).second) | ||||
3394 | continue; | ||||
3395 | if (auto *PI = dyn_cast<Instruction>(P)) | ||||
3396 | if (Value *V = simplifyInstruction(cast<Instruction>(PI), SQ)) { | ||||
3397 | for (auto *U : PI->users()) | ||||
3398 | WorkList.push_back(cast<Value>(U)); | ||||
3399 | Put(PI, V); | ||||
3400 | PI->replaceAllUsesWith(V); | ||||
3401 | if (auto *PHI = dyn_cast<PHINode>(PI)) | ||||
3402 | AllPhiNodes.erase(PHI); | ||||
3403 | if (auto *Select = dyn_cast<SelectInst>(PI)) | ||||
3404 | AllSelectNodes.erase(Select); | ||||
3405 | PI->eraseFromParent(); | ||||
3406 | } | ||||
3407 | } | ||||
3408 | return Get(Val); | ||||
3409 | } | ||||
3410 | |||||
3411 | void Put(Value *From, Value *To) { Storage.insert({From, To}); } | ||||
3412 | |||||
3413 | void ReplacePhi(PHINode *From, PHINode *To) { | ||||
3414 | Value *OldReplacement = Get(From); | ||||
3415 | while (OldReplacement != From) { | ||||
3416 | From = To; | ||||
3417 | To = dyn_cast<PHINode>(OldReplacement); | ||||
3418 | OldReplacement = Get(From); | ||||
3419 | } | ||||
3420 | assert(To && Get(To) == To && "Replacement PHI node is already replaced.")(static_cast <bool> (To && Get(To) == To && "Replacement PHI node is already replaced.") ? void (0) : __assert_fail ("To && Get(To) == To && \"Replacement PHI node is already replaced.\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 3420, __extension__ __PRETTY_FUNCTION__ )); | ||||
3421 | Put(From, To); | ||||
3422 | From->replaceAllUsesWith(To); | ||||
3423 | AllPhiNodes.erase(From); | ||||
3424 | From->eraseFromParent(); | ||||
3425 | } | ||||
3426 | |||||
3427 | PhiNodeSet &newPhiNodes() { return AllPhiNodes; } | ||||
3428 | |||||
3429 | void insertNewPhi(PHINode *PN) { AllPhiNodes.insert(PN); } | ||||
3430 | |||||
3431 | void insertNewSelect(SelectInst *SI) { AllSelectNodes.insert(SI); } | ||||
3432 | |||||
3433 | unsigned countNewPhiNodes() const { return AllPhiNodes.size(); } | ||||
3434 | |||||
3435 | unsigned countNewSelectNodes() const { return AllSelectNodes.size(); } | ||||
3436 | |||||
3437 | void destroyNewNodes(Type *CommonType) { | ||||
3438 | // For safe erasing, replace the uses with dummy value first. | ||||
3439 | auto *Dummy = PoisonValue::get(CommonType); | ||||
3440 | for (auto *I : AllPhiNodes) { | ||||
3441 | I->replaceAllUsesWith(Dummy); | ||||
3442 | I->eraseFromParent(); | ||||
3443 | } | ||||
3444 | AllPhiNodes.clear(); | ||||
3445 | for (auto *I : AllSelectNodes) { | ||||
3446 | I->replaceAllUsesWith(Dummy); | ||||
3447 | I->eraseFromParent(); | ||||
3448 | } | ||||
3449 | AllSelectNodes.clear(); | ||||
3450 | } | ||||
3451 | }; | ||||
3452 | |||||
3453 | /// A helper class for combining addressing modes. | ||||
3454 | class AddressingModeCombiner { | ||||
3455 | typedef DenseMap<Value *, Value *> FoldAddrToValueMapping; | ||||
3456 | typedef std::pair<PHINode *, PHINode *> PHIPair; | ||||
3457 | |||||
3458 | private: | ||||
3459 | /// The addressing modes we've collected. | ||||
3460 | SmallVector<ExtAddrMode, 16> AddrModes; | ||||
3461 | |||||
3462 | /// The field in which the AddrModes differ, when we have more than one. | ||||
3463 | ExtAddrMode::FieldName DifferentField = ExtAddrMode::NoField; | ||||
3464 | |||||
3465 | /// Are the AddrModes that we have all just equal to their original values? | ||||
3466 | bool AllAddrModesTrivial = true; | ||||
3467 | |||||
3468 | /// Common Type for all different fields in addressing modes. | ||||
3469 | Type *CommonType = nullptr; | ||||
3470 | |||||
3471 | /// SimplifyQuery for simplifyInstruction utility. | ||||
3472 | const SimplifyQuery &SQ; | ||||
3473 | |||||
3474 | /// Original Address. | ||||
3475 | Value *Original; | ||||
3476 | |||||
3477 | public: | ||||
3478 | AddressingModeCombiner(const SimplifyQuery &_SQ, Value *OriginalValue) | ||||
3479 | : SQ(_SQ), Original(OriginalValue) {} | ||||
3480 | |||||
3481 | /// Get the combined AddrMode | ||||
3482 | const ExtAddrMode &getAddrMode() const { return AddrModes[0]; } | ||||
3483 | |||||
3484 | /// Add a new AddrMode if it's compatible with the AddrModes we already | ||||
3485 | /// have. | ||||
3486 | /// \return True iff we succeeded in doing so. | ||||
3487 | bool addNewAddrMode(ExtAddrMode &NewAddrMode) { | ||||
3488 | // Take note of if we have any non-trivial AddrModes, as we need to detect | ||||
3489 | // when all AddrModes are trivial as then we would introduce a phi or select | ||||
3490 | // which just duplicates what's already there. | ||||
3491 | AllAddrModesTrivial = AllAddrModesTrivial && NewAddrMode.isTrivial(); | ||||
3492 | |||||
3493 | // If this is the first addrmode then everything is fine. | ||||
3494 | if (AddrModes.empty()) { | ||||
3495 | AddrModes.emplace_back(NewAddrMode); | ||||
3496 | return true; | ||||
3497 | } | ||||
3498 | |||||
3499 | // Figure out how different this is from the other address modes, which we | ||||
3500 | // can do just by comparing against the first one given that we only care | ||||
3501 | // about the cumulative difference. | ||||
3502 | ExtAddrMode::FieldName ThisDifferentField = | ||||
3503 | AddrModes[0].compare(NewAddrMode); | ||||
3504 | if (DifferentField == ExtAddrMode::NoField) | ||||
3505 | DifferentField = ThisDifferentField; | ||||
3506 | else if (DifferentField != ThisDifferentField) | ||||
3507 | DifferentField = ExtAddrMode::MultipleFields; | ||||
3508 | |||||
3509 | // If NewAddrMode differs in more than one dimension we cannot handle it. | ||||
3510 | bool CanHandle = DifferentField != ExtAddrMode::MultipleFields; | ||||
3511 | |||||
3512 | // If Scale Field is different then we reject. | ||||
3513 | CanHandle = CanHandle && DifferentField != ExtAddrMode::ScaleField; | ||||
3514 | |||||
3515 | // We also must reject the case when base offset is different and | ||||
3516 | // scale reg is not null, we cannot handle this case due to merge of | ||||
3517 | // different offsets will be used as ScaleReg. | ||||
3518 | CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseOffsField || | ||||
3519 | !NewAddrMode.ScaledReg); | ||||
3520 | |||||
3521 | // We also must reject the case when GV is different and BaseReg installed | ||||
3522 | // due to we want to use base reg as a merge of GV values. | ||||
3523 | CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseGVField || | ||||
3524 | !NewAddrMode.HasBaseReg); | ||||
3525 | |||||
3526 | // Even if NewAddMode is the same we still need to collect it due to | ||||
3527 | // original value is different. And later we will need all original values | ||||
3528 | // as anchors during finding the common Phi node. | ||||
3529 | if (CanHandle) | ||||
3530 | AddrModes.emplace_back(NewAddrMode); | ||||
3531 | else | ||||
3532 | AddrModes.clear(); | ||||
3533 | |||||
3534 | return CanHandle; | ||||
3535 | } | ||||
3536 | |||||
3537 | /// Combine the addressing modes we've collected into a single | ||||
3538 | /// addressing mode. | ||||
3539 | /// \return True iff we successfully combined them or we only had one so | ||||
3540 | /// didn't need to combine them anyway. | ||||
3541 | bool combineAddrModes() { | ||||
3542 | // If we have no AddrModes then they can't be combined. | ||||
3543 | if (AddrModes.size() == 0) | ||||
3544 | return false; | ||||
3545 | |||||
3546 | // A single AddrMode can trivially be combined. | ||||
3547 | if (AddrModes.size() == 1 || DifferentField == ExtAddrMode::NoField) | ||||
3548 | return true; | ||||
3549 | |||||
3550 | // If the AddrModes we collected are all just equal to the value they are | ||||
3551 | // derived from then combining them wouldn't do anything useful. | ||||
3552 | if (AllAddrModesTrivial) | ||||
3553 | return false; | ||||
3554 | |||||
3555 | if (!addrModeCombiningAllowed()) | ||||
3556 | return false; | ||||
3557 | |||||
3558 | // Build a map between <original value, basic block where we saw it> to | ||||
3559 | // value of base register. | ||||
3560 | // Bail out if there is no common type. | ||||
3561 | FoldAddrToValueMapping Map; | ||||
3562 | if (!initializeMap(Map)) | ||||
3563 | return false; | ||||
3564 | |||||
3565 | Value *CommonValue = findCommon(Map); | ||||
3566 | if (CommonValue) | ||||
3567 | AddrModes[0].SetCombinedField(DifferentField, CommonValue, AddrModes); | ||||
3568 | return CommonValue != nullptr; | ||||
3569 | } | ||||
3570 | |||||
3571 | private: | ||||
3572 | /// Initialize Map with anchor values. For address seen | ||||
3573 | /// we set the value of different field saw in this address. | ||||
3574 | /// At the same time we find a common type for different field we will | ||||
3575 | /// use to create new Phi/Select nodes. Keep it in CommonType field. | ||||
3576 | /// Return false if there is no common type found. | ||||
3577 | bool initializeMap(FoldAddrToValueMapping &Map) { | ||||
3578 | // Keep track of keys where the value is null. We will need to replace it | ||||
3579 | // with constant null when we know the common type. | ||||
3580 | SmallVector<Value *, 2> NullValue; | ||||
3581 | Type *IntPtrTy = SQ.DL.getIntPtrType(AddrModes[0].OriginalValue->getType()); | ||||
3582 | for (auto &AM : AddrModes) { | ||||
3583 | Value *DV = AM.GetFieldAsValue(DifferentField, IntPtrTy); | ||||
3584 | if (DV) { | ||||
3585 | auto *Type = DV->getType(); | ||||
3586 | if (CommonType && CommonType != Type) | ||||
3587 | return false; | ||||
3588 | CommonType = Type; | ||||
3589 | Map[AM.OriginalValue] = DV; | ||||
3590 | } else { | ||||
3591 | NullValue.push_back(AM.OriginalValue); | ||||
3592 | } | ||||
3593 | } | ||||
3594 | assert(CommonType && "At least one non-null value must be!")(static_cast <bool> (CommonType && "At least one non-null value must be!" ) ? void (0) : __assert_fail ("CommonType && \"At least one non-null value must be!\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 3594, __extension__ __PRETTY_FUNCTION__ )); | ||||
3595 | for (auto *V : NullValue) | ||||
3596 | Map[V] = Constant::getNullValue(CommonType); | ||||
3597 | return true; | ||||
3598 | } | ||||
3599 | |||||
3600 | /// We have mapping between value A and other value B where B was a field in | ||||
3601 | /// addressing mode represented by A. Also we have an original value C | ||||
3602 | /// representing an address we start with. Traversing from C through phi and | ||||
3603 | /// selects we ended up with A's in a map. This utility function tries to find | ||||
3604 | /// a value V which is a field in addressing mode C and traversing through phi | ||||
3605 | /// nodes and selects we will end up in corresponded values B in a map. | ||||
3606 | /// The utility will create a new Phi/Selects if needed. | ||||
3607 | // The simple example looks as follows: | ||||
3608 | // BB1: | ||||
3609 | // p1 = b1 + 40 | ||||
3610 | // br cond BB2, BB3 | ||||
3611 | // BB2: | ||||
3612 | // p2 = b2 + 40 | ||||
3613 | // br BB3 | ||||
3614 | // BB3: | ||||
3615 | // p = phi [p1, BB1], [p2, BB2] | ||||
3616 | // v = load p | ||||
3617 | // Map is | ||||
3618 | // p1 -> b1 | ||||
3619 | // p2 -> b2 | ||||
3620 | // Request is | ||||
3621 | // p -> ? | ||||
3622 | // The function tries to find or build phi [b1, BB1], [b2, BB2] in BB3. | ||||
3623 | Value *findCommon(FoldAddrToValueMapping &Map) { | ||||
3624 | // Tracks the simplification of newly created phi nodes. The reason we use | ||||
3625 | // this mapping is because we will add new created Phi nodes in AddrToBase. | ||||
3626 | // Simplification of Phi nodes is recursive, so some Phi node may | ||||
3627 | // be simplified after we added it to AddrToBase. In reality this | ||||
3628 | // simplification is possible only if original phi/selects were not | ||||
3629 | // simplified yet. | ||||
3630 | // Using this mapping we can find the current value in AddrToBase. | ||||
3631 | SimplificationTracker ST(SQ); | ||||
3632 | |||||
3633 | // First step, DFS to create PHI nodes for all intermediate blocks. | ||||
3634 | // Also fill traverse order for the second step. | ||||
3635 | SmallVector<Value *, 32> TraverseOrder; | ||||
3636 | InsertPlaceholders(Map, TraverseOrder, ST); | ||||
3637 | |||||
3638 | // Second Step, fill new nodes by merged values and simplify if possible. | ||||
3639 | FillPlaceholders(Map, TraverseOrder, ST); | ||||
3640 | |||||
3641 | if (!AddrSinkNewSelects && ST.countNewSelectNodes() > 0) { | ||||
3642 | ST.destroyNewNodes(CommonType); | ||||
3643 | return nullptr; | ||||
3644 | } | ||||
3645 | |||||
3646 | // Now we'd like to match New Phi nodes to existed ones. | ||||
3647 | unsigned PhiNotMatchedCount = 0; | ||||
3648 | if (!MatchPhiSet(ST, AddrSinkNewPhis, PhiNotMatchedCount)) { | ||||
3649 | ST.destroyNewNodes(CommonType); | ||||
3650 | return nullptr; | ||||
3651 | } | ||||
3652 | |||||
3653 | auto *Result = ST.Get(Map.find(Original)->second); | ||||
3654 | if (Result) { | ||||
3655 | NumMemoryInstsPhiCreated += ST.countNewPhiNodes() + PhiNotMatchedCount; | ||||
3656 | NumMemoryInstsSelectCreated += ST.countNewSelectNodes(); | ||||
3657 | } | ||||
3658 | return Result; | ||||
3659 | } | ||||
3660 | |||||
3661 | /// Try to match PHI node to Candidate. | ||||
3662 | /// Matcher tracks the matched Phi nodes. | ||||
3663 | bool MatchPhiNode(PHINode *PHI, PHINode *Candidate, | ||||
3664 | SmallSetVector<PHIPair, 8> &Matcher, | ||||
3665 | PhiNodeSet &PhiNodesToMatch) { | ||||
3666 | SmallVector<PHIPair, 8> WorkList; | ||||
3667 | Matcher.insert({PHI, Candidate}); | ||||
3668 | SmallSet<PHINode *, 8> MatchedPHIs; | ||||
3669 | MatchedPHIs.insert(PHI); | ||||
3670 | WorkList.push_back({PHI, Candidate}); | ||||
3671 | SmallSet<PHIPair, 8> Visited; | ||||
3672 | while (!WorkList.empty()) { | ||||
3673 | auto Item = WorkList.pop_back_val(); | ||||
3674 | if (!Visited.insert(Item).second) | ||||
3675 | continue; | ||||
3676 | // We iterate over all incoming values to Phi to compare them. | ||||
3677 | // If values are different and both of them Phi and the first one is a | ||||
3678 | // Phi we added (subject to match) and both of them is in the same basic | ||||
3679 | // block then we can match our pair if values match. So we state that | ||||
3680 | // these values match and add it to work list to verify that. | ||||
3681 | for (auto *B : Item.first->blocks()) { | ||||
3682 | Value *FirstValue = Item.first->getIncomingValueForBlock(B); | ||||
3683 | Value *SecondValue = Item.second->getIncomingValueForBlock(B); | ||||
3684 | if (FirstValue == SecondValue) | ||||
3685 | continue; | ||||
3686 | |||||
3687 | PHINode *FirstPhi = dyn_cast<PHINode>(FirstValue); | ||||
3688 | PHINode *SecondPhi = dyn_cast<PHINode>(SecondValue); | ||||
3689 | |||||
3690 | // One of them is not Phi or | ||||
3691 | // The first one is not Phi node from the set we'd like to match or | ||||
3692 | // Phi nodes from different basic blocks then | ||||
3693 | // we will not be able to match. | ||||
3694 | if (!FirstPhi || !SecondPhi || !PhiNodesToMatch.count(FirstPhi) || | ||||
3695 | FirstPhi->getParent() != SecondPhi->getParent()) | ||||
3696 | return false; | ||||
3697 | |||||
3698 | // If we already matched them then continue. | ||||
3699 | if (Matcher.count({FirstPhi, SecondPhi})) | ||||
3700 | continue; | ||||
3701 | // So the values are different and does not match. So we need them to | ||||
3702 | // match. (But we register no more than one match per PHI node, so that | ||||
3703 | // we won't later try to replace them twice.) | ||||
3704 | if (MatchedPHIs.insert(FirstPhi).second) | ||||
3705 | Matcher.insert({FirstPhi, SecondPhi}); | ||||
3706 | // But me must check it. | ||||
3707 | WorkList.push_back({FirstPhi, SecondPhi}); | ||||
3708 | } | ||||
3709 | } | ||||
3710 | return true; | ||||
3711 | } | ||||
3712 | |||||
3713 | /// For the given set of PHI nodes (in the SimplificationTracker) try | ||||
3714 | /// to find their equivalents. | ||||
3715 | /// Returns false if this matching fails and creation of new Phi is disabled. | ||||
3716 | bool MatchPhiSet(SimplificationTracker &ST, bool AllowNewPhiNodes, | ||||
3717 | unsigned &PhiNotMatchedCount) { | ||||
3718 | // Matched and PhiNodesToMatch iterate their elements in a deterministic | ||||
3719 | // order, so the replacements (ReplacePhi) are also done in a deterministic | ||||
3720 | // order. | ||||
3721 | SmallSetVector<PHIPair, 8> Matched; | ||||
3722 | SmallPtrSet<PHINode *, 8> WillNotMatch; | ||||
3723 | PhiNodeSet &PhiNodesToMatch = ST.newPhiNodes(); | ||||
3724 | while (PhiNodesToMatch.size()) { | ||||
3725 | PHINode *PHI = *PhiNodesToMatch.begin(); | ||||
3726 | |||||
3727 | // Add us, if no Phi nodes in the basic block we do not match. | ||||
3728 | WillNotMatch.clear(); | ||||
3729 | WillNotMatch.insert(PHI); | ||||
3730 | |||||
3731 | // Traverse all Phis until we found equivalent or fail to do that. | ||||
3732 | bool IsMatched = false; | ||||
3733 | for (auto &P : PHI->getParent()->phis()) { | ||||
3734 | // Skip new Phi nodes. | ||||
3735 | if (PhiNodesToMatch.count(&P)) | ||||
3736 | continue; | ||||
3737 | if ((IsMatched = MatchPhiNode(PHI, &P, Matched, PhiNodesToMatch))) | ||||
3738 | break; | ||||
3739 | // If it does not match, collect all Phi nodes from matcher. | ||||
3740 | // if we end up with no match, them all these Phi nodes will not match | ||||
3741 | // later. | ||||
3742 | for (auto M : Matched) | ||||
3743 | WillNotMatch.insert(M.first); | ||||
3744 | Matched.clear(); | ||||
3745 | } | ||||
3746 | if (IsMatched) { | ||||
3747 | // Replace all matched values and erase them. | ||||
3748 | for (auto MV : Matched) | ||||
3749 | ST.ReplacePhi(MV.first, MV.second); | ||||
3750 | Matched.clear(); | ||||
3751 | continue; | ||||
3752 | } | ||||
3753 | // If we are not allowed to create new nodes then bail out. | ||||
3754 | if (!AllowNewPhiNodes) | ||||
3755 | return false; | ||||
3756 | // Just remove all seen values in matcher. They will not match anything. | ||||
3757 | PhiNotMatchedCount += WillNotMatch.size(); | ||||
3758 | for (auto *P : WillNotMatch) | ||||
3759 | PhiNodesToMatch.erase(P); | ||||
3760 | } | ||||
3761 | return true; | ||||
3762 | } | ||||
3763 | /// Fill the placeholders with values from predecessors and simplify them. | ||||
3764 | void FillPlaceholders(FoldAddrToValueMapping &Map, | ||||
3765 | SmallVectorImpl<Value *> &TraverseOrder, | ||||
3766 | SimplificationTracker &ST) { | ||||
3767 | while (!TraverseOrder.empty()) { | ||||
3768 | Value *Current = TraverseOrder.pop_back_val(); | ||||
3769 | assert(Map.find(Current) != Map.end() && "No node to fill!!!")(static_cast <bool> (Map.find(Current) != Map.end() && "No node to fill!!!") ? void (0) : __assert_fail ("Map.find(Current) != Map.end() && \"No node to fill!!!\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 3769, __extension__ __PRETTY_FUNCTION__ )); | ||||
3770 | Value *V = Map[Current]; | ||||
3771 | |||||
3772 | if (SelectInst *Select = dyn_cast<SelectInst>(V)) { | ||||
3773 | // CurrentValue also must be Select. | ||||
3774 | auto *CurrentSelect = cast<SelectInst>(Current); | ||||
3775 | auto *TrueValue = CurrentSelect->getTrueValue(); | ||||
3776 | assert(Map.find(TrueValue) != Map.end() && "No True Value!")(static_cast <bool> (Map.find(TrueValue) != Map.end() && "No True Value!") ? void (0) : __assert_fail ("Map.find(TrueValue) != Map.end() && \"No True Value!\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 3776, __extension__ __PRETTY_FUNCTION__ )); | ||||
3777 | Select->setTrueValue(ST.Get(Map[TrueValue])); | ||||
3778 | auto *FalseValue = CurrentSelect->getFalseValue(); | ||||
3779 | assert(Map.find(FalseValue) != Map.end() && "No False Value!")(static_cast <bool> (Map.find(FalseValue) != Map.end() && "No False Value!") ? void (0) : __assert_fail ("Map.find(FalseValue) != Map.end() && \"No False Value!\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 3779, __extension__ __PRETTY_FUNCTION__ )); | ||||
3780 | Select->setFalseValue(ST.Get(Map[FalseValue])); | ||||
3781 | } else { | ||||
3782 | // Must be a Phi node then. | ||||
3783 | auto *PHI = cast<PHINode>(V); | ||||
3784 | // Fill the Phi node with values from predecessors. | ||||
3785 | for (auto *B : predecessors(PHI->getParent())) { | ||||
3786 | Value *PV = cast<PHINode>(Current)->getIncomingValueForBlock(B); | ||||
3787 | assert(Map.find(PV) != Map.end() && "No predecessor Value!")(static_cast <bool> (Map.find(PV) != Map.end() && "No predecessor Value!") ? void (0) : __assert_fail ("Map.find(PV) != Map.end() && \"No predecessor Value!\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 3787, __extension__ __PRETTY_FUNCTION__ )); | ||||
3788 | PHI->addIncoming(ST.Get(Map[PV]), B); | ||||
3789 | } | ||||
3790 | } | ||||
3791 | Map[Current] = ST.Simplify(V); | ||||
3792 | } | ||||
3793 | } | ||||
3794 | |||||
3795 | /// Starting from original value recursively iterates over def-use chain up to | ||||
3796 | /// known ending values represented in a map. For each traversed phi/select | ||||
3797 | /// inserts a placeholder Phi or Select. | ||||
3798 | /// Reports all new created Phi/Select nodes by adding them to set. | ||||
3799 | /// Also reports and order in what values have been traversed. | ||||
3800 | void InsertPlaceholders(FoldAddrToValueMapping &Map, | ||||
3801 | SmallVectorImpl<Value *> &TraverseOrder, | ||||
3802 | SimplificationTracker &ST) { | ||||
3803 | SmallVector<Value *, 32> Worklist; | ||||
3804 | assert((isa<PHINode>(Original) || isa<SelectInst>(Original)) &&(static_cast <bool> ((isa<PHINode>(Original) || isa <SelectInst>(Original)) && "Address must be a Phi or Select node" ) ? void (0) : __assert_fail ("(isa<PHINode>(Original) || isa<SelectInst>(Original)) && \"Address must be a Phi or Select node\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 3805, __extension__ __PRETTY_FUNCTION__ )) | ||||
3805 | "Address must be a Phi or Select node")(static_cast <bool> ((isa<PHINode>(Original) || isa <SelectInst>(Original)) && "Address must be a Phi or Select node" ) ? void (0) : __assert_fail ("(isa<PHINode>(Original) || isa<SelectInst>(Original)) && \"Address must be a Phi or Select node\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 3805, __extension__ __PRETTY_FUNCTION__ )); | ||||
3806 | auto *Dummy = PoisonValue::get(CommonType); | ||||
3807 | Worklist.push_back(Original); | ||||
3808 | while (!Worklist.empty()) { | ||||
3809 | Value *Current = Worklist.pop_back_val(); | ||||
3810 | // if it is already visited or it is an ending value then skip it. | ||||
3811 | if (Map.find(Current) != Map.end()) | ||||
3812 | continue; | ||||
3813 | TraverseOrder.push_back(Current); | ||||
3814 | |||||
3815 | // CurrentValue must be a Phi node or select. All others must be covered | ||||
3816 | // by anchors. | ||||
3817 | if (SelectInst *CurrentSelect = dyn_cast<SelectInst>(Current)) { | ||||
3818 | // Is it OK to get metadata from OrigSelect?! | ||||
3819 | // Create a Select placeholder with dummy value. | ||||
3820 | SelectInst *Select = SelectInst::Create( | ||||
3821 | CurrentSelect->getCondition(), Dummy, Dummy, | ||||
3822 | CurrentSelect->getName(), CurrentSelect, CurrentSelect); | ||||
3823 | Map[Current] = Select; | ||||
3824 | ST.insertNewSelect(Select); | ||||
3825 | // We are interested in True and False values. | ||||
3826 | Worklist.push_back(CurrentSelect->getTrueValue()); | ||||
3827 | Worklist.push_back(CurrentSelect->getFalseValue()); | ||||
3828 | } else { | ||||
3829 | // It must be a Phi node then. | ||||
3830 | PHINode *CurrentPhi = cast<PHINode>(Current); | ||||
3831 | unsigned PredCount = CurrentPhi->getNumIncomingValues(); | ||||
3832 | PHINode *PHI = | ||||
3833 | PHINode::Create(CommonType, PredCount, "sunk_phi", CurrentPhi); | ||||
3834 | Map[Current] = PHI; | ||||
3835 | ST.insertNewPhi(PHI); | ||||
3836 | append_range(Worklist, CurrentPhi->incoming_values()); | ||||
3837 | } | ||||
3838 | } | ||||
3839 | } | ||||
3840 | |||||
3841 | bool addrModeCombiningAllowed() { | ||||
3842 | if (DisableComplexAddrModes) | ||||
3843 | return false; | ||||
3844 | switch (DifferentField) { | ||||
3845 | default: | ||||
3846 | return false; | ||||
3847 | case ExtAddrMode::BaseRegField: | ||||
3848 | return AddrSinkCombineBaseReg; | ||||
3849 | case ExtAddrMode::BaseGVField: | ||||
3850 | return AddrSinkCombineBaseGV; | ||||
3851 | case ExtAddrMode::BaseOffsField: | ||||
3852 | return AddrSinkCombineBaseOffs; | ||||
3853 | case ExtAddrMode::ScaledRegField: | ||||
3854 | return AddrSinkCombineScaledReg; | ||||
3855 | } | ||||
3856 | } | ||||
3857 | }; | ||||
3858 | } // end anonymous namespace | ||||
3859 | |||||
3860 | /// Try adding ScaleReg*Scale to the current addressing mode. | ||||
3861 | /// Return true and update AddrMode if this addr mode is legal for the target, | ||||
3862 | /// false if not. | ||||
3863 | bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale, | ||||
3864 | unsigned Depth) { | ||||
3865 | // If Scale is 1, then this is the same as adding ScaleReg to the addressing | ||||
3866 | // mode. Just process that directly. | ||||
3867 | if (Scale == 1) | ||||
3868 | return matchAddr(ScaleReg, Depth); | ||||
3869 | |||||
3870 | // If the scale is 0, it takes nothing to add this. | ||||
3871 | if (Scale == 0) | ||||
3872 | return true; | ||||
3873 | |||||
3874 | // If we already have a scale of this value, we can add to it, otherwise, we | ||||
3875 | // need an available scale field. | ||||
3876 | if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) | ||||
3877 | return false; | ||||
3878 | |||||
3879 | ExtAddrMode TestAddrMode = AddrMode; | ||||
3880 | |||||
3881 | // Add scale to turn X*4+X*3 -> X*7. This could also do things like | ||||
3882 | // [A+B + A*7] -> [B+A*8]. | ||||
3883 | TestAddrMode.Scale += Scale; | ||||
3884 | TestAddrMode.ScaledReg = ScaleReg; | ||||
3885 | |||||
3886 | // If the new address isn't legal, bail out. | ||||
3887 | if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) | ||||
3888 | return false; | ||||
3889 | |||||
3890 | // It was legal, so commit it. | ||||
3891 | AddrMode = TestAddrMode; | ||||
3892 | |||||
3893 | // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now | ||||
3894 | // to see if ScaleReg is actually X+C. If so, we can turn this into adding | ||||
3895 | // X*Scale + C*Scale to addr mode. If we found available IV increment, do not | ||||
3896 | // go any further: we can reuse it and cannot eliminate it. | ||||
3897 | ConstantInt *CI = nullptr; | ||||
3898 | Value *AddLHS = nullptr; | ||||
3899 | if (isa<Instruction>(ScaleReg) && // not a constant expr. | ||||
3900 | match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI))) && | ||||
3901 | !isIVIncrement(ScaleReg, &LI) && CI->getValue().isSignedIntN(64)) { | ||||
3902 | TestAddrMode.InBounds = false; | ||||
3903 | TestAddrMode.ScaledReg = AddLHS; | ||||
3904 | TestAddrMode.BaseOffs += CI->getSExtValue() * TestAddrMode.Scale; | ||||
3905 | |||||
3906 | // If this addressing mode is legal, commit it and remember that we folded | ||||
3907 | // this instruction. | ||||
3908 | if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) { | ||||
3909 | AddrModeInsts.push_back(cast<Instruction>(ScaleReg)); | ||||
3910 | AddrMode = TestAddrMode; | ||||
3911 | return true; | ||||
3912 | } | ||||
3913 | // Restore status quo. | ||||
3914 | TestAddrMode = AddrMode; | ||||
3915 | } | ||||
3916 | |||||
3917 | // If this is an add recurrence with a constant step, return the increment | ||||
3918 | // instruction and the canonicalized step. | ||||
3919 | auto GetConstantStep = | ||||
3920 | [this](const Value *V) -> Optional<std::pair<Instruction *, APInt>> { | ||||
3921 | auto *PN = dyn_cast<PHINode>(V); | ||||
3922 | if (!PN) | ||||
3923 | return None; | ||||
3924 | auto IVInc = getIVIncrement(PN, &LI); | ||||
3925 | if (!IVInc) | ||||
3926 | return None; | ||||
3927 | // TODO: The result of the intrinsics above is two-compliment. However when | ||||
3928 | // IV inc is expressed as add or sub, iv.next is potentially a poison value. | ||||
3929 | // If it has nuw or nsw flags, we need to make sure that these flags are | ||||
3930 | // inferrable at the point of memory instruction. Otherwise we are replacing | ||||
3931 | // well-defined two-compliment computation with poison. Currently, to avoid | ||||
3932 | // potentially complex analysis needed to prove this, we reject such cases. | ||||
3933 | if (auto *OIVInc = dyn_cast<OverflowingBinaryOperator>(IVInc->first)) | ||||
3934 | if (OIVInc->hasNoSignedWrap() || OIVInc->hasNoUnsignedWrap()) | ||||
3935 | return None; | ||||
3936 | if (auto *ConstantStep = dyn_cast<ConstantInt>(IVInc->second)) | ||||
3937 | return std::make_pair(IVInc->first, ConstantStep->getValue()); | ||||
3938 | return None; | ||||
3939 | }; | ||||
3940 | |||||
3941 | // Try to account for the following special case: | ||||
3942 | // 1. ScaleReg is an inductive variable; | ||||
3943 | // 2. We use it with non-zero offset; | ||||
3944 | // 3. IV's increment is available at the point of memory instruction. | ||||
3945 | // | ||||
3946 | // In this case, we may reuse the IV increment instead of the IV Phi to | ||||
3947 | // achieve the following advantages: | ||||
3948 | // 1. If IV step matches the offset, we will have no need in the offset; | ||||
3949 | // 2. Even if they don't match, we will reduce the overlap of living IV | ||||
3950 | // and IV increment, that will potentially lead to better register | ||||
3951 | // assignment. | ||||
3952 | if (AddrMode.BaseOffs) { | ||||
3953 | if (auto IVStep = GetConstantStep(ScaleReg)) { | ||||
3954 | Instruction *IVInc = IVStep->first; | ||||
3955 | // The following assert is important to ensure a lack of infinite loops. | ||||
3956 | // This transforms is (intentionally) the inverse of the one just above. | ||||
3957 | // If they don't agree on the definition of an increment, we'd alternate | ||||
3958 | // back and forth indefinitely. | ||||
3959 | assert(isIVIncrement(IVInc, &LI) && "implied by GetConstantStep")(static_cast <bool> (isIVIncrement(IVInc, &LI) && "implied by GetConstantStep") ? void (0) : __assert_fail ("isIVIncrement(IVInc, &LI) && \"implied by GetConstantStep\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 3959, __extension__ __PRETTY_FUNCTION__ )); | ||||
3960 | APInt Step = IVStep->second; | ||||
3961 | APInt Offset = Step * AddrMode.Scale; | ||||
3962 | if (Offset.isSignedIntN(64)) { | ||||
3963 | TestAddrMode.InBounds = false; | ||||
3964 | TestAddrMode.ScaledReg = IVInc; | ||||
3965 | TestAddrMode.BaseOffs -= Offset.getLimitedValue(); | ||||
3966 | // If this addressing mode is legal, commit it.. | ||||
3967 | // (Note that we defer the (expensive) domtree base legality check | ||||
3968 | // to the very last possible point.) | ||||
3969 | if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace) && | ||||
3970 | getDTFn().dominates(IVInc, MemoryInst)) { | ||||
3971 | AddrModeInsts.push_back(cast<Instruction>(IVInc)); | ||||
3972 | AddrMode = TestAddrMode; | ||||
3973 | return true; | ||||
3974 | } | ||||
3975 | // Restore status quo. | ||||
3976 | TestAddrMode = AddrMode; | ||||
3977 | } | ||||
3978 | } | ||||
3979 | } | ||||
3980 | |||||
3981 | // Otherwise, just return what we have. | ||||
3982 | return true; | ||||
3983 | } | ||||
3984 | |||||
3985 | /// This is a little filter, which returns true if an addressing computation | ||||
3986 | /// involving I might be folded into a load/store accessing it. | ||||
3987 | /// This doesn't need to be perfect, but needs to accept at least | ||||
3988 | /// the set of instructions that MatchOperationAddr can. | ||||
3989 | static bool MightBeFoldableInst(Instruction *I) { | ||||
3990 | switch (I->getOpcode()) { | ||||
3991 | case Instruction::BitCast: | ||||
3992 | case Instruction::AddrSpaceCast: | ||||
3993 | // Don't touch identity bitcasts. | ||||
3994 | if (I->getType() == I->getOperand(0)->getType()) | ||||
3995 | return false; | ||||
3996 | return I->getType()->isIntOrPtrTy(); | ||||
3997 | case Instruction::PtrToInt: | ||||
3998 | // PtrToInt is always a noop, as we know that the int type is pointer sized. | ||||
3999 | return true; | ||||
4000 | case Instruction::IntToPtr: | ||||
4001 | // We know the input is intptr_t, so this is foldable. | ||||
4002 | return true; | ||||
4003 | case Instruction::Add: | ||||
4004 | return true; | ||||
4005 | case Instruction::Mul: | ||||
4006 | case Instruction::Shl: | ||||
4007 | // Can only handle X*C and X << C. | ||||
4008 | return isa<ConstantInt>(I->getOperand(1)); | ||||
4009 | case Instruction::GetElementPtr: | ||||
4010 | return true; | ||||
4011 | default: | ||||
4012 | return false; | ||||
4013 | } | ||||
4014 | } | ||||
4015 | |||||
4016 | /// Check whether or not \p Val is a legal instruction for \p TLI. | ||||
4017 | /// \note \p Val is assumed to be the product of some type promotion. | ||||
4018 | /// Therefore if \p Val has an undefined state in \p TLI, this is assumed | ||||
4019 | /// to be legal, as the non-promoted value would have had the same state. | ||||
4020 | static bool isPromotedInstructionLegal(const TargetLowering &TLI, | ||||
4021 | const DataLayout &DL, Value *Val) { | ||||
4022 | Instruction *PromotedInst = dyn_cast<Instruction>(Val); | ||||
4023 | if (!PromotedInst) | ||||
4024 | return false; | ||||
4025 | int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode()); | ||||
4026 | // If the ISDOpcode is undefined, it was undefined before the promotion. | ||||
4027 | if (!ISDOpcode) | ||||
4028 | return true; | ||||
4029 | // Otherwise, check if the promoted instruction is legal or not. | ||||
4030 | return TLI.isOperationLegalOrCustom( | ||||
4031 | ISDOpcode, TLI.getValueType(DL, PromotedInst->getType())); | ||||
4032 | } | ||||
4033 | |||||
4034 | namespace { | ||||
4035 | |||||
4036 | /// Hepler class to perform type promotion. | ||||
4037 | class TypePromotionHelper { | ||||
4038 | /// Utility function to add a promoted instruction \p ExtOpnd to | ||||
4039 | /// \p PromotedInsts and record the type of extension we have seen. | ||||
4040 | static void addPromotedInst(InstrToOrigTy &PromotedInsts, | ||||
4041 | Instruction *ExtOpnd, bool IsSExt) { | ||||
4042 | ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension; | ||||
4043 | InstrToOrigTy::iterator It = PromotedInsts.find(ExtOpnd); | ||||
4044 | if (It != PromotedInsts.end()) { | ||||
4045 | // If the new extension is same as original, the information in | ||||
4046 | // PromotedInsts[ExtOpnd] is still correct. | ||||
4047 | if (It->second.getInt() == ExtTy) | ||||
4048 | return; | ||||
4049 | |||||
4050 | // Now the new extension is different from old extension, we make | ||||
4051 | // the type information invalid by setting extension type to | ||||
4052 | // BothExtension. | ||||
4053 | ExtTy = BothExtension; | ||||
4054 | } | ||||
4055 | PromotedInsts[ExtOpnd] = TypeIsSExt(ExtOpnd->getType(), ExtTy); | ||||
4056 | } | ||||
4057 | |||||
4058 | /// Utility function to query the original type of instruction \p Opnd | ||||
4059 | /// with a matched extension type. If the extension doesn't match, we | ||||
4060 | /// cannot use the information we had on the original type. | ||||
4061 | /// BothExtension doesn't match any extension type. | ||||
4062 | static const Type *getOrigType(const InstrToOrigTy &PromotedInsts, | ||||
4063 | Instruction *Opnd, bool IsSExt) { | ||||
4064 | ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension; | ||||
4065 | InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd); | ||||
4066 | if (It != PromotedInsts.end() && It->second.getInt() == ExtTy) | ||||
4067 | return It->second.getPointer(); | ||||
4068 | return nullptr; | ||||
4069 | } | ||||
4070 | |||||
4071 | /// Utility function to check whether or not a sign or zero extension | ||||
4072 | /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by | ||||
4073 | /// either using the operands of \p Inst or promoting \p Inst. | ||||
4074 | /// The type of the extension is defined by \p IsSExt. | ||||
4075 | /// In other words, check if: | ||||
4076 | /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType. | ||||
4077 | /// #1 Promotion applies: | ||||
4078 | /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...). | ||||
4079 | /// #2 Operand reuses: | ||||
4080 | /// ext opnd1 to ConsideredExtType. | ||||
4081 | /// \p PromotedInsts maps the instructions to their type before promotion. | ||||
4082 | static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType, | ||||
4083 | const InstrToOrigTy &PromotedInsts, bool IsSExt); | ||||
4084 | |||||
4085 | /// Utility function to determine if \p OpIdx should be promoted when | ||||
4086 | /// promoting \p Inst. | ||||
4087 | static bool shouldExtOperand(const Instruction *Inst, int OpIdx) { | ||||
4088 | return !(isa<SelectInst>(Inst) && OpIdx == 0); | ||||
4089 | } | ||||
4090 | |||||
4091 | /// Utility function to promote the operand of \p Ext when this | ||||
4092 | /// operand is a promotable trunc or sext or zext. | ||||
4093 | /// \p PromotedInsts maps the instructions to their type before promotion. | ||||
4094 | /// \p CreatedInstsCost[out] contains the cost of all instructions | ||||
4095 | /// created to promote the operand of Ext. | ||||
4096 | /// Newly added extensions are inserted in \p Exts. | ||||
4097 | /// Newly added truncates are inserted in \p Truncs. | ||||
4098 | /// Should never be called directly. | ||||
4099 | /// \return The promoted value which is used instead of Ext. | ||||
4100 | static Value *promoteOperandForTruncAndAnyExt( | ||||
4101 | Instruction *Ext, TypePromotionTransaction &TPT, | ||||
4102 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, | ||||
4103 | SmallVectorImpl<Instruction *> *Exts, | ||||
4104 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI); | ||||
4105 | |||||
4106 | /// Utility function to promote the operand of \p Ext when this | ||||
4107 | /// operand is promotable and is not a supported trunc or sext. | ||||
4108 | /// \p PromotedInsts maps the instructions to their type before promotion. | ||||
4109 | /// \p CreatedInstsCost[out] contains the cost of all the instructions | ||||
4110 | /// created to promote the operand of Ext. | ||||
4111 | /// Newly added extensions are inserted in \p Exts. | ||||
4112 | /// Newly added truncates are inserted in \p Truncs. | ||||
4113 | /// Should never be called directly. | ||||
4114 | /// \return The promoted value which is used instead of Ext. | ||||
4115 | static Value *promoteOperandForOther(Instruction *Ext, | ||||
4116 | TypePromotionTransaction &TPT, | ||||
4117 | InstrToOrigTy &PromotedInsts, | ||||
4118 | unsigned &CreatedInstsCost, | ||||
4119 | SmallVectorImpl<Instruction *> *Exts, | ||||
4120 | SmallVectorImpl<Instruction *> *Truncs, | ||||
4121 | const TargetLowering &TLI, bool IsSExt); | ||||
4122 | |||||
4123 | /// \see promoteOperandForOther. | ||||
4124 | static Value *signExtendOperandForOther( | ||||
4125 | Instruction *Ext, TypePromotionTransaction &TPT, | ||||
4126 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, | ||||
4127 | SmallVectorImpl<Instruction *> *Exts, | ||||
4128 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { | ||||
4129 | return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, | ||||
4130 | Exts, Truncs, TLI, true); | ||||
4131 | } | ||||
4132 | |||||
4133 | /// \see promoteOperandForOther. | ||||
4134 | static Value *zeroExtendOperandForOther( | ||||
4135 | Instruction *Ext, TypePromotionTransaction &TPT, | ||||
4136 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, | ||||
4137 | SmallVectorImpl<Instruction *> *Exts, | ||||
4138 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { | ||||
4139 | return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, | ||||
4140 | Exts, Truncs, TLI, false); | ||||
4141 | } | ||||
4142 | |||||
4143 | public: | ||||
4144 | /// Type for the utility function that promotes the operand of Ext. | ||||
4145 | using Action = Value *(*)(Instruction *Ext, TypePromotionTransaction &TPT, | ||||
4146 | InstrToOrigTy &PromotedInsts, | ||||
4147 | unsigned &CreatedInstsCost, | ||||
4148 | SmallVectorImpl<Instruction *> *Exts, | ||||
4149 | SmallVectorImpl<Instruction *> *Truncs, | ||||
4150 | const TargetLowering &TLI); | ||||
4151 | |||||
4152 | /// Given a sign/zero extend instruction \p Ext, return the appropriate | ||||
4153 | /// action to promote the operand of \p Ext instead of using Ext. | ||||
4154 | /// \return NULL if no promotable action is possible with the current | ||||
4155 | /// sign extension. | ||||
4156 | /// \p InsertedInsts keeps track of all the instructions inserted by the | ||||
4157 | /// other CodeGenPrepare optimizations. This information is important | ||||
4158 | /// because we do not want to promote these instructions as CodeGenPrepare | ||||
4159 | /// will reinsert them later. Thus creating an infinite loop: create/remove. | ||||
4160 | /// \p PromotedInsts maps the instructions to their type before promotion. | ||||
4161 | static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts, | ||||
4162 | const TargetLowering &TLI, | ||||
4163 | const InstrToOrigTy &PromotedInsts); | ||||
4164 | }; | ||||
4165 | |||||
4166 | } // end anonymous namespace | ||||
4167 | |||||
4168 | bool TypePromotionHelper::canGetThrough(const Instruction *Inst, | ||||
4169 | Type *ConsideredExtType, | ||||
4170 | const InstrToOrigTy &PromotedInsts, | ||||
4171 | bool IsSExt) { | ||||
4172 | // The promotion helper does not know how to deal with vector types yet. | ||||
4173 | // To be able to fix that, we would need to fix the places where we | ||||
4174 | // statically extend, e.g., constants and such. | ||||
4175 | if (Inst->getType()->isVectorTy()) | ||||
4176 | return false; | ||||
4177 | |||||
4178 | // We can always get through zext. | ||||
4179 | if (isa<ZExtInst>(Inst)) | ||||
4180 | return true; | ||||
4181 | |||||
4182 | // sext(sext) is ok too. | ||||
4183 | if (IsSExt && isa<SExtInst>(Inst)) | ||||
4184 | return true; | ||||
4185 | |||||
4186 | // We can get through binary operator, if it is legal. In other words, the | ||||
4187 | // binary operator must have a nuw or nsw flag. | ||||
4188 | if (const auto *BinOp = dyn_cast<BinaryOperator>(Inst)) | ||||
4189 | if (isa<OverflowingBinaryOperator>(BinOp) && | ||||
4190 | ((!IsSExt && BinOp->hasNoUnsignedWrap()) || | ||||
4191 | (IsSExt && BinOp->hasNoSignedWrap()))) | ||||
4192 | return true; | ||||
4193 | |||||
4194 | // ext(and(opnd, cst)) --> and(ext(opnd), ext(cst)) | ||||
4195 | if ((Inst->getOpcode() == Instruction::And || | ||||
4196 | Inst->getOpcode() == Instruction::Or)) | ||||
4197 | return true; | ||||
4198 | |||||
4199 | // ext(xor(opnd, cst)) --> xor(ext(opnd), ext(cst)) | ||||
4200 | if (Inst->getOpcode() == Instruction::Xor) { | ||||
4201 | // Make sure it is not a NOT. | ||||
4202 | if (const auto *Cst = dyn_cast<ConstantInt>(Inst->getOperand(1))) | ||||
4203 | if (!Cst->getValue().isAllOnes()) | ||||
4204 | return true; | ||||
4205 | } | ||||
4206 | |||||
4207 | // zext(shrl(opnd, cst)) --> shrl(zext(opnd), zext(cst)) | ||||
4208 | // It may change a poisoned value into a regular value, like | ||||
4209 | // zext i32 (shrl i8 %val, 12) --> shrl i32 (zext i8 %val), 12 | ||||
4210 | // poisoned value regular value | ||||
4211 | // It should be OK since undef covers valid value. | ||||
4212 | if (Inst->getOpcode() == Instruction::LShr && !IsSExt) | ||||
4213 | return true; | ||||
4214 | |||||
4215 | // and(ext(shl(opnd, cst)), cst) --> and(shl(ext(opnd), ext(cst)), cst) | ||||
4216 | // It may change a poisoned value into a regular value, like | ||||
4217 | // zext i32 (shl i8 %val, 12) --> shl i32 (zext i8 %val), 12 | ||||
4218 | // poisoned value regular value | ||||
4219 | // It should be OK since undef covers valid value. | ||||
4220 | if (Inst->getOpcode() == Instruction::Shl && Inst->hasOneUse()) { | ||||
4221 | const auto *ExtInst = cast<const Instruction>(*Inst->user_begin()); | ||||
4222 | if (ExtInst->hasOneUse()) { | ||||
4223 | const auto *AndInst = dyn_cast<const Instruction>(*ExtInst->user_begin()); | ||||
4224 | if (AndInst && AndInst->getOpcode() == Instruction::And) { | ||||
4225 | const auto *Cst = dyn_cast<ConstantInt>(AndInst->getOperand(1)); | ||||
4226 | if (Cst && | ||||
4227 | Cst->getValue().isIntN(Inst->getType()->getIntegerBitWidth())) | ||||
4228 | return true; | ||||
4229 | } | ||||
4230 | } | ||||
4231 | } | ||||
4232 | |||||
4233 | // Check if we can do the following simplification. | ||||
4234 | // ext(trunc(opnd)) --> ext(opnd) | ||||
4235 | if (!isa<TruncInst>(Inst)) | ||||
4236 | return false; | ||||
4237 | |||||
4238 | Value *OpndVal = Inst->getOperand(0); | ||||
4239 | // Check if we can use this operand in the extension. | ||||
4240 | // If the type is larger than the result type of the extension, we cannot. | ||||
4241 | if (!OpndVal->getType()->isIntegerTy() || | ||||
4242 | OpndVal->getType()->getIntegerBitWidth() > | ||||
4243 | ConsideredExtType->getIntegerBitWidth()) | ||||
4244 | return false; | ||||
4245 | |||||
4246 | // If the operand of the truncate is not an instruction, we will not have | ||||
4247 | // any information on the dropped bits. | ||||
4248 | // (Actually we could for constant but it is not worth the extra logic). | ||||
4249 | Instruction *Opnd = dyn_cast<Instruction>(OpndVal); | ||||
4250 | if (!Opnd) | ||||
4251 | return false; | ||||
4252 | |||||
4253 | // Check if the source of the type is narrow enough. | ||||
4254 | // I.e., check that trunc just drops extended bits of the same kind of | ||||
4255 | // the extension. | ||||
4256 | // #1 get the type of the operand and check the kind of the extended bits. | ||||
4257 | const Type *OpndType = getOrigType(PromotedInsts, Opnd, IsSExt); | ||||
4258 | if (OpndType) | ||||
4259 | ; | ||||
4260 | else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd))) | ||||
4261 | OpndType = Opnd->getOperand(0)->getType(); | ||||
4262 | else | ||||
4263 | return false; | ||||
4264 | |||||
4265 | // #2 check that the truncate just drops extended bits. | ||||
4266 | return Inst->getType()->getIntegerBitWidth() >= | ||||
4267 | OpndType->getIntegerBitWidth(); | ||||
4268 | } | ||||
4269 | |||||
4270 | TypePromotionHelper::Action TypePromotionHelper::getAction( | ||||
4271 | Instruction *Ext, const SetOfInstrs &InsertedInsts, | ||||
4272 | const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) { | ||||
4273 | assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&(static_cast <bool> ((isa<SExtInst>(Ext) || isa< ZExtInst>(Ext)) && "Unexpected instruction type") ? void (0) : __assert_fail ("(isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && \"Unexpected instruction type\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 4274, __extension__ __PRETTY_FUNCTION__ )) | ||||
4274 | "Unexpected instruction type")(static_cast <bool> ((isa<SExtInst>(Ext) || isa< ZExtInst>(Ext)) && "Unexpected instruction type") ? void (0) : __assert_fail ("(isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && \"Unexpected instruction type\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 4274, __extension__ __PRETTY_FUNCTION__ )); | ||||
4275 | Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0)); | ||||
4276 | Type *ExtTy = Ext->getType(); | ||||
4277 | bool IsSExt = isa<SExtInst>(Ext); | ||||
4278 | // If the operand of the extension is not an instruction, we cannot | ||||
4279 | // get through. | ||||
4280 | // If it, check we can get through. | ||||
4281 | if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt)) | ||||
4282 | return nullptr; | ||||
4283 | |||||
4284 | // Do not promote if the operand has been added by codegenprepare. | ||||
4285 | // Otherwise, it means we are undoing an optimization that is likely to be | ||||
4286 | // redone, thus causing potential infinite loop. | ||||
4287 | if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd)) | ||||
4288 | return nullptr; | ||||
4289 | |||||
4290 | // SExt or Trunc instructions. | ||||
4291 | // Return the related handler. | ||||
4292 | if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) || | ||||
4293 | isa<ZExtInst>(ExtOpnd)) | ||||
4294 | return promoteOperandForTruncAndAnyExt; | ||||
4295 | |||||
4296 | // Regular instruction. | ||||
4297 | // Abort early if we will have to insert non-free instructions. | ||||
4298 | if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType())) | ||||
4299 | return nullptr; | ||||
4300 | return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther; | ||||
4301 | } | ||||
4302 | |||||
4303 | Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt( | ||||
4304 | Instruction *SExt, TypePromotionTransaction &TPT, | ||||
4305 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, | ||||
4306 | SmallVectorImpl<Instruction *> *Exts, | ||||
4307 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { | ||||
4308 | // By construction, the operand of SExt is an instruction. Otherwise we cannot | ||||
4309 | // get through it and this method should not be called. | ||||
4310 | Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); | ||||
4311 | Value *ExtVal = SExt; | ||||
4312 | bool HasMergedNonFreeExt = false; | ||||
4313 | if (isa<ZExtInst>(SExtOpnd)) { | ||||
4314 | // Replace s|zext(zext(opnd)) | ||||
4315 | // => zext(opnd). | ||||
4316 | HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd); | ||||
4317 | Value *ZExt = | ||||
4318 | TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType()); | ||||
4319 | TPT.replaceAllUsesWith(SExt, ZExt); | ||||
4320 | TPT.eraseInstruction(SExt); | ||||
4321 | ExtVal = ZExt; | ||||
4322 | } else { | ||||
4323 | // Replace z|sext(trunc(opnd)) or sext(sext(opnd)) | ||||
4324 | // => z|sext(opnd). | ||||
4325 | TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0)); | ||||
4326 | } | ||||
4327 | CreatedInstsCost = 0; | ||||
4328 | |||||
4329 | // Remove dead code. | ||||
4330 | if (SExtOpnd->use_empty()) | ||||
4331 | TPT.eraseInstruction(SExtOpnd); | ||||
4332 | |||||
4333 | // Check if the extension is still needed. | ||||
4334 | Instruction *ExtInst = dyn_cast<Instruction>(ExtVal); | ||||
4335 | if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) { | ||||
4336 | if (ExtInst) { | ||||
4337 | if (Exts) | ||||
4338 | Exts->push_back(ExtInst); | ||||
4339 | CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt; | ||||
4340 | } | ||||
4341 | return ExtVal; | ||||
4342 | } | ||||
4343 | |||||
4344 | // At this point we have: ext ty opnd to ty. | ||||
4345 | // Reassign the uses of ExtInst to the opnd and remove ExtInst. | ||||
4346 | Value *NextVal = ExtInst->getOperand(0); | ||||
4347 | TPT.eraseInstruction(ExtInst, NextVal); | ||||
4348 | return NextVal; | ||||
4349 | } | ||||
4350 | |||||
4351 | Value *TypePromotionHelper::promoteOperandForOther( | ||||
4352 | Instruction *Ext, TypePromotionTransaction &TPT, | ||||
4353 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, | ||||
4354 | SmallVectorImpl<Instruction *> *Exts, | ||||
4355 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI, | ||||
4356 | bool IsSExt) { | ||||
4357 | // By construction, the operand of Ext is an instruction. Otherwise we cannot | ||||
4358 | // get through it and this method should not be called. | ||||
4359 | Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0)); | ||||
4360 | CreatedInstsCost = 0; | ||||
4361 | if (!ExtOpnd->hasOneUse()) { | ||||
4362 | // ExtOpnd will be promoted. | ||||
4363 | // All its uses, but Ext, will need to use a truncated value of the | ||||
4364 | // promoted version. | ||||
4365 | // Create the truncate now. | ||||
4366 | Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType()); | ||||
4367 | if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) { | ||||
4368 | // Insert it just after the definition. | ||||
4369 | ITrunc->moveAfter(ExtOpnd); | ||||
4370 | if (Truncs) | ||||
4371 | Truncs->push_back(ITrunc); | ||||
4372 | } | ||||
4373 | |||||
4374 | TPT.replaceAllUsesWith(ExtOpnd, Trunc); | ||||
4375 | // Restore the operand of Ext (which has been replaced by the previous call | ||||
4376 | // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext. | ||||
4377 | TPT.setOperand(Ext, 0, ExtOpnd); | ||||
4378 | } | ||||
4379 | |||||
4380 | // Get through the Instruction: | ||||
4381 | // 1. Update its type. | ||||
4382 | // 2. Replace the uses of Ext by Inst. | ||||
4383 | // 3. Extend each operand that needs to be extended. | ||||
4384 | |||||
4385 | // Remember the original type of the instruction before promotion. | ||||
4386 | // This is useful to know that the high bits are sign extended bits. | ||||
4387 | addPromotedInst(PromotedInsts, ExtOpnd, IsSExt); | ||||
4388 | // Step #1. | ||||
4389 | TPT.mutateType(ExtOpnd, Ext->getType()); | ||||
4390 | // Step #2. | ||||
4391 | TPT.replaceAllUsesWith(Ext, ExtOpnd); | ||||
4392 | // Step #3. | ||||
4393 | Instruction *ExtForOpnd = Ext; | ||||
4394 | |||||
4395 | LLVM_DEBUG(dbgs() << "Propagate Ext to operands\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Propagate Ext to operands\n" ; } } while (false); | ||||
4396 | for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx; | ||||
4397 | ++OpIdx) { | ||||
4398 | LLVM_DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Operand:\n" << * (ExtOpnd->getOperand(OpIdx)) << '\n'; } } while (false ); | ||||
4399 | if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() || | ||||
4400 | !shouldExtOperand(ExtOpnd, OpIdx)) { | ||||
4401 | LLVM_DEBUG(dbgs() << "No need to propagate\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "No need to propagate\n" ; } } while (false); | ||||
4402 | continue; | ||||
4403 | } | ||||
4404 | // Check if we can statically extend the operand. | ||||
4405 | Value *Opnd = ExtOpnd->getOperand(OpIdx); | ||||
4406 | if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) { | ||||
4407 | LLVM_DEBUG(dbgs() << "Statically extend\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Statically extend\n"; } } while (false); | ||||
4408 | unsigned BitWidth = Ext->getType()->getIntegerBitWidth(); | ||||
4409 | APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth) | ||||
4410 | : Cst->getValue().zext(BitWidth); | ||||
4411 | TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal)); | ||||
4412 | continue; | ||||
4413 | } | ||||
4414 | // UndefValue are typed, so we have to statically sign extend them. | ||||
4415 | if (isa<UndefValue>(Opnd)) { | ||||
4416 | LLVM_DEBUG(dbgs() << "Statically extend\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Statically extend\n"; } } while (false); | ||||
4417 | TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType())); | ||||
4418 | continue; | ||||
4419 | } | ||||
4420 | |||||
4421 | // Otherwise we have to explicitly sign extend the operand. | ||||
4422 | // Check if Ext was reused to extend an operand. | ||||
4423 | if (!ExtForOpnd) { | ||||
4424 | // If yes, create a new one. | ||||
4425 | LLVM_DEBUG(dbgs() << "More operands to ext\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "More operands to ext\n" ; } } while (false); | ||||
4426 | Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType()) | ||||
4427 | : TPT.createZExt(Ext, Opnd, Ext->getType()); | ||||
4428 | if (!isa<Instruction>(ValForExtOpnd)) { | ||||
4429 | TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd); | ||||
4430 | continue; | ||||
4431 | } | ||||
4432 | ExtForOpnd = cast<Instruction>(ValForExtOpnd); | ||||
4433 | } | ||||
4434 | if (Exts) | ||||
4435 | Exts->push_back(ExtForOpnd); | ||||
4436 | TPT.setOperand(ExtForOpnd, 0, Opnd); | ||||
4437 | |||||
4438 | // Move the sign extension before the insertion point. | ||||
4439 | TPT.moveBefore(ExtForOpnd, ExtOpnd); | ||||
4440 | TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd); | ||||
4441 | CreatedInstsCost += !TLI.isExtFree(ExtForOpnd); | ||||
4442 | // If more sext are required, new instructions will have to be created. | ||||
4443 | ExtForOpnd = nullptr; | ||||
4444 | } | ||||
4445 | if (ExtForOpnd == Ext) { | ||||
4446 | LLVM_DEBUG(dbgs() << "Extension is useless now\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Extension is useless now\n" ; } } while (false); | ||||
4447 | TPT.eraseInstruction(Ext); | ||||
4448 | } | ||||
4449 | return ExtOpnd; | ||||
4450 | } | ||||
4451 | |||||
4452 | /// Check whether or not promoting an instruction to a wider type is profitable. | ||||
4453 | /// \p NewCost gives the cost of extension instructions created by the | ||||
4454 | /// promotion. | ||||
4455 | /// \p OldCost gives the cost of extension instructions before the promotion | ||||
4456 | /// plus the number of instructions that have been | ||||
4457 | /// matched in the addressing mode the promotion. | ||||
4458 | /// \p PromotedOperand is the value that has been promoted. | ||||
4459 | /// \return True if the promotion is profitable, false otherwise. | ||||
4460 | bool AddressingModeMatcher::isPromotionProfitable( | ||||
4461 | unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const { | ||||
4462 | LLVM_DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCostdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost << '\n'; } } while (false) | ||||
4463 | << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost << '\n'; } } while (false); | ||||
4464 | // The cost of the new extensions is greater than the cost of the | ||||
4465 | // old extension plus what we folded. | ||||
4466 | // This is not profitable. | ||||
4467 | if (NewCost > OldCost) | ||||
4468 | return false; | ||||
4469 | if (NewCost < OldCost) | ||||
4470 | return true; | ||||
4471 | // The promotion is neutral but it may help folding the sign extension in | ||||
4472 | // loads for instance. | ||||
4473 | // Check that we did not create an illegal instruction. | ||||
4474 | return isPromotedInstructionLegal(TLI, DL, PromotedOperand); | ||||
4475 | } | ||||
4476 | |||||
4477 | /// Given an instruction or constant expr, see if we can fold the operation | ||||
4478 | /// into the addressing mode. If so, update the addressing mode and return | ||||
4479 | /// true, otherwise return false without modifying AddrMode. | ||||
4480 | /// If \p MovedAway is not NULL, it contains the information of whether or | ||||
4481 | /// not AddrInst has to be folded into the addressing mode on success. | ||||
4482 | /// If \p MovedAway == true, \p AddrInst will not be part of the addressing | ||||
4483 | /// because it has been moved away. | ||||
4484 | /// Thus AddrInst must not be added in the matched instructions. | ||||
4485 | /// This state can happen when AddrInst is a sext, since it may be moved away. | ||||
4486 | /// Therefore, AddrInst may not be valid when MovedAway is true and it must | ||||
4487 | /// not be referenced anymore. | ||||
4488 | bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode, | ||||
4489 | unsigned Depth, | ||||
4490 | bool *MovedAway) { | ||||
4491 | // Avoid exponential behavior on extremely deep expression trees. | ||||
4492 | if (Depth >= 5) | ||||
4493 | return false; | ||||
4494 | |||||
4495 | // By default, all matched instructions stay in place. | ||||
4496 | if (MovedAway) | ||||
4497 | *MovedAway = false; | ||||
4498 | |||||
4499 | switch (Opcode) { | ||||
4500 | case Instruction::PtrToInt: | ||||
4501 | // PtrToInt is always a noop, as we know that the int type is pointer sized. | ||||
4502 | return matchAddr(AddrInst->getOperand(0), Depth); | ||||
4503 | case Instruction::IntToPtr: { | ||||
4504 | auto AS = AddrInst->getType()->getPointerAddressSpace(); | ||||
4505 | auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); | ||||
4506 | // This inttoptr is a no-op if the integer type is pointer sized. | ||||
4507 | if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy) | ||||
4508 | return matchAddr(AddrInst->getOperand(0), Depth); | ||||
4509 | return false; | ||||
4510 | } | ||||
4511 | case Instruction::BitCast: | ||||
4512 | // BitCast is always a noop, and we can handle it as long as it is | ||||
4513 | // int->int or pointer->pointer (we don't want int<->fp or something). | ||||
4514 | if (AddrInst->getOperand(0)->getType()->isIntOrPtrTy() && | ||||
4515 | // Don't touch identity bitcasts. These were probably put here by LSR, | ||||
4516 | // and we don't want to mess around with them. Assume it knows what it | ||||
4517 | // is doing. | ||||
4518 | AddrInst->getOperand(0)->getType() != AddrInst->getType()) | ||||
4519 | return matchAddr(AddrInst->getOperand(0), Depth); | ||||
4520 | return false; | ||||
4521 | case Instruction::AddrSpaceCast: { | ||||
4522 | unsigned SrcAS = | ||||
4523 | AddrInst->getOperand(0)->getType()->getPointerAddressSpace(); | ||||
4524 | unsigned DestAS = AddrInst->getType()->getPointerAddressSpace(); | ||||
4525 | if (TLI.getTargetMachine().isNoopAddrSpaceCast(SrcAS, DestAS)) | ||||
4526 | return matchAddr(AddrInst->getOperand(0), Depth); | ||||
4527 | return false; | ||||
4528 | } | ||||
4529 | case Instruction::Add: { | ||||
4530 | // Check to see if we can merge in the RHS then the LHS. If so, we win. | ||||
4531 | ExtAddrMode BackupAddrMode = AddrMode; | ||||
4532 | unsigned OldSize = AddrModeInsts.size(); | ||||
4533 | // Start a transaction at this point. | ||||
4534 | // The LHS may match but not the RHS. | ||||
4535 | // Therefore, we need a higher level restoration point to undo partially | ||||
4536 | // matched operation. | ||||
4537 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | ||||
4538 | TPT.getRestorationPoint(); | ||||
4539 | |||||
4540 | AddrMode.InBounds = false; | ||||
4541 | if (matchAddr(AddrInst->getOperand(1), Depth + 1) && | ||||
4542 | matchAddr(AddrInst->getOperand(0), Depth + 1)) | ||||
4543 | return true; | ||||
4544 | |||||
4545 | // Restore the old addr mode info. | ||||
4546 | AddrMode = BackupAddrMode; | ||||
4547 | AddrModeInsts.resize(OldSize); | ||||
4548 | TPT.rollback(LastKnownGood); | ||||
4549 | |||||
4550 | // Otherwise this was over-aggressive. Try merging in the LHS then the RHS. | ||||
4551 | if (matchAddr(AddrInst->getOperand(0), Depth + 1) && | ||||
4552 | matchAddr(AddrInst->getOperand(1), Depth + 1)) | ||||
4553 | return true; | ||||
4554 | |||||
4555 | // Otherwise we definitely can't merge the ADD in. | ||||
4556 | AddrMode = BackupAddrMode; | ||||
4557 | AddrModeInsts.resize(OldSize); | ||||
4558 | TPT.rollback(LastKnownGood); | ||||
4559 | break; | ||||
4560 | } | ||||
4561 | // case Instruction::Or: | ||||
4562 | // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. | ||||
4563 | // break; | ||||
4564 | case Instruction::Mul: | ||||
4565 | case Instruction::Shl: { | ||||
4566 | // Can only handle X*C and X << C. | ||||
4567 | AddrMode.InBounds = false; | ||||
4568 | ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1)); | ||||
4569 | if (!RHS || RHS->getBitWidth() > 64) | ||||
4570 | return false; | ||||
4571 | int64_t Scale = Opcode == Instruction::Shl | ||||
4572 | ? 1LL << RHS->getLimitedValue(RHS->getBitWidth() - 1) | ||||
4573 | : RHS->getSExtValue(); | ||||
4574 | |||||
4575 | return matchScaledValue(AddrInst->getOperand(0), Scale, Depth); | ||||
4576 | } | ||||
4577 | case Instruction::GetElementPtr: { | ||||
4578 | // Scan the GEP. We check it if it contains constant offsets and at most | ||||
4579 | // one variable offset. | ||||
4580 | int VariableOperand = -1; | ||||
4581 | unsigned VariableScale = 0; | ||||
4582 | |||||
4583 | int64_t ConstantOffset = 0; | ||||
4584 | gep_type_iterator GTI = gep_type_begin(AddrInst); | ||||
4585 | for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { | ||||
4586 | if (StructType *STy = GTI.getStructTypeOrNull()) { | ||||
4587 | const StructLayout *SL = DL.getStructLayout(STy); | ||||
4588 | unsigned Idx = | ||||
4589 | cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue(); | ||||
4590 | ConstantOffset += SL->getElementOffset(Idx); | ||||
4591 | } else { | ||||
4592 | TypeSize TS = DL.getTypeAllocSize(GTI.getIndexedType()); | ||||
4593 | if (TS.isNonZero()) { | ||||
4594 | // The optimisations below currently only work for fixed offsets. | ||||
4595 | if (TS.isScalable()) | ||||
4596 | return false; | ||||
4597 | int64_t TypeSize = TS.getFixedSize(); | ||||
4598 | if (ConstantInt *CI = | ||||
4599 | dyn_cast<ConstantInt>(AddrInst->getOperand(i))) { | ||||
4600 | const APInt &CVal = CI->getValue(); | ||||
4601 | if (CVal.getMinSignedBits() <= 64) { | ||||
4602 | ConstantOffset += CVal.getSExtValue() * TypeSize; | ||||
4603 | continue; | ||||
4604 | } | ||||
4605 | } | ||||
4606 | // We only allow one variable index at the moment. | ||||
4607 | if (VariableOperand != -1) | ||||
4608 | return false; | ||||
4609 | |||||
4610 | // Remember the variable index. | ||||
4611 | VariableOperand = i; | ||||
4612 | VariableScale = TypeSize; | ||||
4613 | } | ||||
4614 | } | ||||
4615 | } | ||||
4616 | |||||
4617 | // A common case is for the GEP to only do a constant offset. In this case, | ||||
4618 | // just add it to the disp field and check validity. | ||||
4619 | if (VariableOperand == -1) { | ||||
4620 | AddrMode.BaseOffs += ConstantOffset; | ||||
4621 | if (ConstantOffset == 0 || | ||||
4622 | TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) { | ||||
4623 | // Check to see if we can fold the base pointer in too. | ||||
4624 | if (matchAddr(AddrInst->getOperand(0), Depth + 1)) { | ||||
4625 | if (!cast<GEPOperator>(AddrInst)->isInBounds()) | ||||
4626 | AddrMode.InBounds = false; | ||||
4627 | return true; | ||||
4628 | } | ||||
4629 | } else if (EnableGEPOffsetSplit && isa<GetElementPtrInst>(AddrInst) && | ||||
4630 | TLI.shouldConsiderGEPOffsetSplit() && Depth == 0 && | ||||
4631 | ConstantOffset > 0) { | ||||
4632 | // Record GEPs with non-zero offsets as candidates for splitting in the | ||||
4633 | // event that the offset cannot fit into the r+i addressing mode. | ||||
4634 | // Simple and common case that only one GEP is used in calculating the | ||||
4635 | // address for the memory access. | ||||
4636 | Value *Base = AddrInst->getOperand(0); | ||||
4637 | auto *BaseI = dyn_cast<Instruction>(Base); | ||||
4638 | auto *GEP = cast<GetElementPtrInst>(AddrInst); | ||||
4639 | if (isa<Argument>(Base) || isa<GlobalValue>(Base) || | ||||
4640 | (BaseI && !isa<CastInst>(BaseI) && | ||||
4641 | !isa<GetElementPtrInst>(BaseI))) { | ||||
4642 | // Make sure the parent block allows inserting non-PHI instructions | ||||
4643 | // before the terminator. | ||||
4644 | BasicBlock *Parent = | ||||
4645 | BaseI ? BaseI->getParent() : &GEP->getFunction()->getEntryBlock(); | ||||
4646 | if (!Parent->getTerminator()->isEHPad()) | ||||
4647 | LargeOffsetGEP = std::make_pair(GEP, ConstantOffset); | ||||
4648 | } | ||||
4649 | } | ||||
4650 | AddrMode.BaseOffs -= ConstantOffset; | ||||
4651 | return false; | ||||
4652 | } | ||||
4653 | |||||
4654 | // Save the valid addressing mode in case we can't match. | ||||
4655 | ExtAddrMode BackupAddrMode = AddrMode; | ||||
4656 | unsigned OldSize = AddrModeInsts.size(); | ||||
4657 | |||||
4658 | // See if the scale and offset amount is valid for this target. | ||||
4659 | AddrMode.BaseOffs += ConstantOffset; | ||||
4660 | if (!cast<GEPOperator>(AddrInst)->isInBounds()) | ||||
4661 | AddrMode.InBounds = false; | ||||
4662 | |||||
4663 | // Match the base operand of the GEP. | ||||
4664 | if (!matchAddr(AddrInst->getOperand(0), Depth + 1)) { | ||||
4665 | // If it couldn't be matched, just stuff the value in a register. | ||||
4666 | if (AddrMode.HasBaseReg) { | ||||
4667 | AddrMode = BackupAddrMode; | ||||
4668 | AddrModeInsts.resize(OldSize); | ||||
4669 | return false; | ||||
4670 | } | ||||
4671 | AddrMode.HasBaseReg = true; | ||||
4672 | AddrMode.BaseReg = AddrInst->getOperand(0); | ||||
4673 | } | ||||
4674 | |||||
4675 | // Match the remaining variable portion of the GEP. | ||||
4676 | if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale, | ||||
4677 | Depth)) { | ||||
4678 | // If it couldn't be matched, try stuffing the base into a register | ||||
4679 | // instead of matching it, and retrying the match of the scale. | ||||
4680 | AddrMode = BackupAddrMode; | ||||
4681 | AddrModeInsts.resize(OldSize); | ||||
4682 | if (AddrMode.HasBaseReg) | ||||
4683 | return false; | ||||
4684 | AddrMode.HasBaseReg = true; | ||||
4685 | AddrMode.BaseReg = AddrInst->getOperand(0); | ||||
4686 | AddrMode.BaseOffs += ConstantOffset; | ||||
4687 | if (!matchScaledValue(AddrInst->getOperand(VariableOperand), | ||||
4688 | VariableScale, Depth)) { | ||||
4689 | // If even that didn't work, bail. | ||||
4690 | AddrMode = BackupAddrMode; | ||||
4691 | AddrModeInsts.resize(OldSize); | ||||
4692 | return false; | ||||
4693 | } | ||||
4694 | } | ||||
4695 | |||||
4696 | return true; | ||||
4697 | } | ||||
4698 | case Instruction::SExt: | ||||
4699 | case Instruction::ZExt: { | ||||
4700 | Instruction *Ext = dyn_cast<Instruction>(AddrInst); | ||||
4701 | if (!Ext) | ||||
4702 | return false; | ||||
4703 | |||||
4704 | // Try to move this ext out of the way of the addressing mode. | ||||
4705 | // Ask for a method for doing so. | ||||
4706 | TypePromotionHelper::Action TPH = | ||||
4707 | TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts); | ||||
4708 | if (!TPH) | ||||
4709 | return false; | ||||
4710 | |||||
4711 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | ||||
4712 | TPT.getRestorationPoint(); | ||||
4713 | unsigned CreatedInstsCost = 0; | ||||
4714 | unsigned ExtCost = !TLI.isExtFree(Ext); | ||||
4715 | Value *PromotedOperand = | ||||
4716 | TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI); | ||||
4717 | // SExt has been moved away. | ||||
4718 | // Thus either it will be rematched later in the recursive calls or it is | ||||
4719 | // gone. Anyway, we must not fold it into the addressing mode at this point. | ||||
4720 | // E.g., | ||||
4721 | // op = add opnd, 1 | ||||
4722 | // idx = ext op | ||||
4723 | // addr = gep base, idx | ||||
4724 | // is now: | ||||
4725 | // promotedOpnd = ext opnd <- no match here | ||||
4726 | // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls) | ||||
4727 | // addr = gep base, op <- match | ||||
4728 | if (MovedAway) | ||||
4729 | *MovedAway = true; | ||||
4730 | |||||
4731 | assert(PromotedOperand &&(static_cast <bool> (PromotedOperand && "TypePromotionHelper should have filtered out those cases" ) ? void (0) : __assert_fail ("PromotedOperand && \"TypePromotionHelper should have filtered out those cases\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 4732, __extension__ __PRETTY_FUNCTION__ )) | ||||
4732 | "TypePromotionHelper should have filtered out those cases")(static_cast <bool> (PromotedOperand && "TypePromotionHelper should have filtered out those cases" ) ? void (0) : __assert_fail ("PromotedOperand && \"TypePromotionHelper should have filtered out those cases\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 4732, __extension__ __PRETTY_FUNCTION__ )); | ||||
4733 | |||||
4734 | ExtAddrMode BackupAddrMode = AddrMode; | ||||
4735 | unsigned OldSize = AddrModeInsts.size(); | ||||
4736 | |||||
4737 | if (!matchAddr(PromotedOperand, Depth) || | ||||
4738 | // The total of the new cost is equal to the cost of the created | ||||
4739 | // instructions. | ||||
4740 | // The total of the old cost is equal to the cost of the extension plus | ||||
4741 | // what we have saved in the addressing mode. | ||||
4742 | !isPromotionProfitable(CreatedInstsCost, | ||||
4743 | ExtCost + (AddrModeInsts.size() - OldSize), | ||||
4744 | PromotedOperand)) { | ||||
4745 | AddrMode = BackupAddrMode; | ||||
4746 | AddrModeInsts.resize(OldSize); | ||||
4747 | LLVM_DEBUG(dbgs() << "Sign extension does not pay off: rollback\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Sign extension does not pay off: rollback\n" ; } } while (false); | ||||
4748 | TPT.rollback(LastKnownGood); | ||||
4749 | return false; | ||||
4750 | } | ||||
4751 | return true; | ||||
4752 | } | ||||
4753 | } | ||||
4754 | return false; | ||||
4755 | } | ||||
4756 | |||||
4757 | /// If we can, try to add the value of 'Addr' into the current addressing mode. | ||||
4758 | /// If Addr can't be added to AddrMode this returns false and leaves AddrMode | ||||
4759 | /// unmodified. This assumes that Addr is either a pointer type or intptr_t | ||||
4760 | /// for the target. | ||||
4761 | /// | ||||
4762 | bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) { | ||||
4763 | // Start a transaction at this point that we will rollback if the matching | ||||
4764 | // fails. | ||||
4765 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | ||||
4766 | TPT.getRestorationPoint(); | ||||
4767 | if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) { | ||||
4768 | if (CI->getValue().isSignedIntN(64)) { | ||||
4769 | // Fold in immediates if legal for the target. | ||||
4770 | AddrMode.BaseOffs += CI->getSExtValue(); | ||||
4771 | if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) | ||||
4772 | return true; | ||||
4773 | AddrMode.BaseOffs -= CI->getSExtValue(); | ||||
4774 | } | ||||
4775 | } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) { | ||||
4776 | // If this is a global variable, try to fold it into the addressing mode. | ||||
4777 | if (!AddrMode.BaseGV) { | ||||
4778 | AddrMode.BaseGV = GV; | ||||
4779 | if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) | ||||
4780 | return true; | ||||
4781 | AddrMode.BaseGV = nullptr; | ||||
4782 | } | ||||
4783 | } else if (Instruction *I = dyn_cast<Instruction>(Addr)) { | ||||
4784 | ExtAddrMode BackupAddrMode = AddrMode; | ||||
4785 | unsigned OldSize = AddrModeInsts.size(); | ||||
4786 | |||||
4787 | // Check to see if it is possible to fold this operation. | ||||
4788 | bool MovedAway = false; | ||||
4789 | if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) { | ||||
4790 | // This instruction may have been moved away. If so, there is nothing | ||||
4791 | // to check here. | ||||
4792 | if (MovedAway) | ||||
4793 | return true; | ||||
4794 | // Okay, it's possible to fold this. Check to see if it is actually | ||||
4795 | // *profitable* to do so. We use a simple cost model to avoid increasing | ||||
4796 | // register pressure too much. | ||||
4797 | if (I->hasOneUse() || | ||||
4798 | isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) { | ||||
4799 | AddrModeInsts.push_back(I); | ||||
4800 | return true; | ||||
4801 | } | ||||
4802 | |||||
4803 | // It isn't profitable to do this, roll back. | ||||
4804 | AddrMode = BackupAddrMode; | ||||
4805 | AddrModeInsts.resize(OldSize); | ||||
4806 | TPT.rollback(LastKnownGood); | ||||
4807 | } | ||||
4808 | } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) { | ||||
4809 | if (matchOperationAddr(CE, CE->getOpcode(), Depth)) | ||||
4810 | return true; | ||||
4811 | TPT.rollback(LastKnownGood); | ||||
4812 | } else if (isa<ConstantPointerNull>(Addr)) { | ||||
4813 | // Null pointer gets folded without affecting the addressing mode. | ||||
4814 | return true; | ||||
4815 | } | ||||
4816 | |||||
4817 | // Worse case, the target should support [reg] addressing modes. :) | ||||
4818 | if (!AddrMode.HasBaseReg) { | ||||
4819 | AddrMode.HasBaseReg = true; | ||||
4820 | AddrMode.BaseReg = Addr; | ||||
4821 | // Still check for legality in case the target supports [imm] but not [i+r]. | ||||
4822 | if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) | ||||
4823 | return true; | ||||
4824 | AddrMode.HasBaseReg = false; | ||||
4825 | AddrMode.BaseReg = nullptr; | ||||
4826 | } | ||||
4827 | |||||
4828 | // If the base register is already taken, see if we can do [r+r]. | ||||
4829 | if (AddrMode.Scale == 0) { | ||||
4830 | AddrMode.Scale = 1; | ||||
4831 | AddrMode.ScaledReg = Addr; | ||||
4832 | if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) | ||||
4833 | return true; | ||||
4834 | AddrMode.Scale = 0; | ||||
4835 | AddrMode.ScaledReg = nullptr; | ||||
4836 | } | ||||
4837 | // Couldn't match. | ||||
4838 | TPT.rollback(LastKnownGood); | ||||
4839 | return false; | ||||
4840 | } | ||||
4841 | |||||
4842 | /// Check to see if all uses of OpVal by the specified inline asm call are due | ||||
4843 | /// to memory operands. If so, return true, otherwise return false. | ||||
4844 | static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, | ||||
4845 | const TargetLowering &TLI, | ||||
4846 | const TargetRegisterInfo &TRI) { | ||||
4847 | const Function *F = CI->getFunction(); | ||||
4848 | TargetLowering::AsmOperandInfoVector TargetConstraints = | ||||
4849 | TLI.ParseConstraints(F->getParent()->getDataLayout(), &TRI, *CI); | ||||
4850 | |||||
4851 | for (TargetLowering::AsmOperandInfo &OpInfo : TargetConstraints) { | ||||
4852 | // Compute the constraint code and ConstraintType to use. | ||||
4853 | TLI.ComputeConstraintToUse(OpInfo, SDValue()); | ||||
4854 | |||||
4855 | // If this asm operand is our Value*, and if it isn't an indirect memory | ||||
4856 | // operand, we can't fold it! TODO: Also handle C_Address? | ||||
4857 | if (OpInfo.CallOperandVal == OpVal && | ||||
4858 | (OpInfo.ConstraintType != TargetLowering::C_Memory || | ||||
4859 | !OpInfo.isIndirect)) | ||||
4860 | return false; | ||||
4861 | } | ||||
4862 | |||||
4863 | return true; | ||||
4864 | } | ||||
4865 | |||||
4866 | // Max number of memory uses to look at before aborting the search to conserve | ||||
4867 | // compile time. | ||||
4868 | static constexpr int MaxMemoryUsesToScan = 20; | ||||
4869 | |||||
4870 | /// Recursively walk all the uses of I until we find a memory use. | ||||
4871 | /// If we find an obviously non-foldable instruction, return true. | ||||
4872 | /// Add accessed addresses and types to MemoryUses. | ||||
4873 | static bool FindAllMemoryUses( | ||||
4874 | Instruction *I, SmallVectorImpl<std::pair<Value *, Type *>> &MemoryUses, | ||||
4875 | SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI, | ||||
4876 | const TargetRegisterInfo &TRI, bool OptSize, ProfileSummaryInfo *PSI, | ||||
4877 | BlockFrequencyInfo *BFI, int SeenInsts = 0) { | ||||
4878 | // If we already considered this instruction, we're done. | ||||
4879 | if (!ConsideredInsts.insert(I).second) | ||||
4880 | return false; | ||||
4881 | |||||
4882 | // If this is an obviously unfoldable instruction, bail out. | ||||
4883 | if (!MightBeFoldableInst(I)) | ||||
4884 | return true; | ||||
4885 | |||||
4886 | // Loop over all the uses, recursively processing them. | ||||
4887 | for (Use &U : I->uses()) { | ||||
4888 | // Conservatively return true if we're seeing a large number or a deep chain | ||||
4889 | // of users. This avoids excessive compilation times in pathological cases. | ||||
4890 | if (SeenInsts++ >= MaxMemoryUsesToScan) | ||||
4891 | return true; | ||||
4892 | |||||
4893 | Instruction *UserI = cast<Instruction>(U.getUser()); | ||||
4894 | if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) { | ||||
4895 | MemoryUses.push_back({U.get(), LI->getType()}); | ||||
4896 | continue; | ||||
4897 | } | ||||
4898 | |||||
4899 | if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) { | ||||
4900 | if (U.getOperandNo() != StoreInst::getPointerOperandIndex()) | ||||
4901 | return true; // Storing addr, not into addr. | ||||
4902 | MemoryUses.push_back({U.get(), SI->getValueOperand()->getType()}); | ||||
4903 | continue; | ||||
4904 | } | ||||
4905 | |||||
4906 | if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) { | ||||
4907 | if (U.getOperandNo() != AtomicRMWInst::getPointerOperandIndex()) | ||||
4908 | return true; // Storing addr, not into addr. | ||||
4909 | MemoryUses.push_back({U.get(), RMW->getValOperand()->getType()}); | ||||
4910 | continue; | ||||
4911 | } | ||||
4912 | |||||
4913 | if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) { | ||||
4914 | if (U.getOperandNo() != AtomicCmpXchgInst::getPointerOperandIndex()) | ||||
4915 | return true; // Storing addr, not into addr. | ||||
4916 | MemoryUses.push_back({U.get(), CmpX->getCompareOperand()->getType()}); | ||||
4917 | continue; | ||||
4918 | } | ||||
4919 | |||||
4920 | if (CallInst *CI = dyn_cast<CallInst>(UserI)) { | ||||
4921 | if (CI->hasFnAttr(Attribute::Cold)) { | ||||
4922 | // If this is a cold call, we can sink the addressing calculation into | ||||
4923 | // the cold path. See optimizeCallInst | ||||
4924 | bool OptForSize = | ||||
4925 | OptSize || llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI); | ||||
4926 | if (!OptForSize) | ||||
4927 | continue; | ||||
4928 | } | ||||
4929 | |||||
4930 | InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledOperand()); | ||||
4931 | if (!IA) | ||||
4932 | return true; | ||||
4933 | |||||
4934 | // If this is a memory operand, we're cool, otherwise bail out. | ||||
4935 | if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI)) | ||||
4936 | return true; | ||||
4937 | continue; | ||||
4938 | } | ||||
4939 | |||||
4940 | if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI, OptSize, | ||||
4941 | PSI, BFI, SeenInsts)) | ||||
4942 | return true; | ||||
4943 | } | ||||
4944 | |||||
4945 | return false; | ||||
4946 | } | ||||
4947 | |||||
4948 | /// Return true if Val is already known to be live at the use site that we're | ||||
4949 | /// folding it into. If so, there is no cost to include it in the addressing | ||||
4950 | /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the | ||||
4951 | /// instruction already. | ||||
4952 | bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val, | ||||
4953 | Value *KnownLive1, | ||||
4954 | Value *KnownLive2) { | ||||
4955 | // If Val is either of the known-live values, we know it is live! | ||||
4956 | if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2) | ||||
4957 | return true; | ||||
4958 | |||||
4959 | // All values other than instructions and arguments (e.g. constants) are live. | ||||
4960 | if (!isa<Instruction>(Val) && !isa<Argument>(Val)) | ||||
4961 | return true; | ||||
4962 | |||||
4963 | // If Val is a constant sized alloca in the entry block, it is live, this is | ||||
4964 | // true because it is just a reference to the stack/frame pointer, which is | ||||
4965 | // live for the whole function. | ||||
4966 | if (AllocaInst *AI = dyn_cast<AllocaInst>(Val)) | ||||
4967 | if (AI->isStaticAlloca()) | ||||
4968 | return true; | ||||
4969 | |||||
4970 | // Check to see if this value is already used in the memory instruction's | ||||
4971 | // block. If so, it's already live into the block at the very least, so we | ||||
4972 | // can reasonably fold it. | ||||
4973 | return Val->isUsedInBasicBlock(MemoryInst->getParent()); | ||||
4974 | } | ||||
4975 | |||||
4976 | /// It is possible for the addressing mode of the machine to fold the specified | ||||
4977 | /// instruction into a load or store that ultimately uses it. | ||||
4978 | /// However, the specified instruction has multiple uses. | ||||
4979 | /// Given this, it may actually increase register pressure to fold it | ||||
4980 | /// into the load. For example, consider this code: | ||||
4981 | /// | ||||
4982 | /// X = ... | ||||
4983 | /// Y = X+1 | ||||
4984 | /// use(Y) -> nonload/store | ||||
4985 | /// Z = Y+1 | ||||
4986 | /// load Z | ||||
4987 | /// | ||||
4988 | /// In this case, Y has multiple uses, and can be folded into the load of Z | ||||
4989 | /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to | ||||
4990 | /// be live at the use(Y) line. If we don't fold Y into load Z, we use one | ||||
4991 | /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the | ||||
4992 | /// number of computations either. | ||||
4993 | /// | ||||
4994 | /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If | ||||
4995 | /// X was live across 'load Z' for other reasons, we actually *would* want to | ||||
4996 | /// fold the addressing mode in the Z case. This would make Y die earlier. | ||||
4997 | bool AddressingModeMatcher::isProfitableToFoldIntoAddressingMode( | ||||
4998 | Instruction *I, ExtAddrMode &AMBefore, ExtAddrMode &AMAfter) { | ||||
4999 | if (IgnoreProfitability) | ||||
5000 | return true; | ||||
5001 | |||||
5002 | // AMBefore is the addressing mode before this instruction was folded into it, | ||||
5003 | // and AMAfter is the addressing mode after the instruction was folded. Get | ||||
5004 | // the set of registers referenced by AMAfter and subtract out those | ||||
5005 | // referenced by AMBefore: this is the set of values which folding in this | ||||
5006 | // address extends the lifetime of. | ||||
5007 | // | ||||
5008 | // Note that there are only two potential values being referenced here, | ||||
5009 | // BaseReg and ScaleReg (global addresses are always available, as are any | ||||
5010 | // folded immediates). | ||||
5011 | Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg; | ||||
5012 | |||||
5013 | // If the BaseReg or ScaledReg was referenced by the previous addrmode, their | ||||
5014 | // lifetime wasn't extended by adding this instruction. | ||||
5015 | if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg)) | ||||
5016 | BaseReg = nullptr; | ||||
5017 | if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg)) | ||||
5018 | ScaledReg = nullptr; | ||||
5019 | |||||
5020 | // If folding this instruction (and it's subexprs) didn't extend any live | ||||
5021 | // ranges, we're ok with it. | ||||
5022 | if (!BaseReg && !ScaledReg) | ||||
5023 | return true; | ||||
5024 | |||||
5025 | // If all uses of this instruction can have the address mode sunk into them, | ||||
5026 | // we can remove the addressing mode and effectively trade one live register | ||||
5027 | // for another (at worst.) In this context, folding an addressing mode into | ||||
5028 | // the use is just a particularly nice way of sinking it. | ||||
5029 | SmallVector<std::pair<Value *, Type *>, 16> MemoryUses; | ||||
5030 | SmallPtrSet<Instruction *, 16> ConsideredInsts; | ||||
5031 | if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI, OptSize, PSI, | ||||
5032 | BFI)) | ||||
5033 | return false; // Has a non-memory, non-foldable use! | ||||
5034 | |||||
5035 | // Now that we know that all uses of this instruction are part of a chain of | ||||
5036 | // computation involving only operations that could theoretically be folded | ||||
5037 | // into a memory use, loop over each of these memory operation uses and see | ||||
5038 | // if they could *actually* fold the instruction. The assumption is that | ||||
5039 | // addressing modes are cheap and that duplicating the computation involved | ||||
5040 | // many times is worthwhile, even on a fastpath. For sinking candidates | ||||
5041 | // (i.e. cold call sites), this serves as a way to prevent excessive code | ||||
5042 | // growth since most architectures have some reasonable small and fast way to | ||||
5043 | // compute an effective address. (i.e LEA on x86) | ||||
5044 | SmallVector<Instruction *, 32> MatchedAddrModeInsts; | ||||
5045 | for (const std::pair<Value *, Type *> &Pair : MemoryUses) { | ||||
5046 | Value *Address = Pair.first; | ||||
5047 | Type *AddressAccessTy = Pair.second; | ||||
5048 | unsigned AS = Address->getType()->getPointerAddressSpace(); | ||||
5049 | |||||
5050 | // Do a match against the root of this address, ignoring profitability. This | ||||
5051 | // will tell us if the addressing mode for the memory operation will | ||||
5052 | // *actually* cover the shared instruction. | ||||
5053 | ExtAddrMode Result; | ||||
5054 | std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr, | ||||
5055 | 0); | ||||
5056 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | ||||
5057 | TPT.getRestorationPoint(); | ||||
5058 | AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, TRI, LI, getDTFn, | ||||
5059 | AddressAccessTy, AS, MemoryInst, Result, | ||||
5060 | InsertedInsts, PromotedInsts, TPT, | ||||
5061 | LargeOffsetGEP, OptSize, PSI, BFI); | ||||
5062 | Matcher.IgnoreProfitability = true; | ||||
5063 | bool Success = Matcher.matchAddr(Address, 0); | ||||
5064 | (void)Success; | ||||
5065 | assert(Success && "Couldn't select *anything*?")(static_cast <bool> (Success && "Couldn't select *anything*?" ) ? void (0) : __assert_fail ("Success && \"Couldn't select *anything*?\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 5065, __extension__ __PRETTY_FUNCTION__ )); | ||||
5066 | |||||
5067 | // The match was to check the profitability, the changes made are not | ||||
5068 | // part of the original matcher. Therefore, they should be dropped | ||||
5069 | // otherwise the original matcher will not present the right state. | ||||
5070 | TPT.rollback(LastKnownGood); | ||||
5071 | |||||
5072 | // If the match didn't cover I, then it won't be shared by it. | ||||
5073 | if (!is_contained(MatchedAddrModeInsts, I)) | ||||
5074 | return false; | ||||
5075 | |||||
5076 | MatchedAddrModeInsts.clear(); | ||||
5077 | } | ||||
5078 | |||||
5079 | return true; | ||||
5080 | } | ||||
5081 | |||||
5082 | /// Return true if the specified values are defined in a | ||||
5083 | /// different basic block than BB. | ||||
5084 | static bool IsNonLocalValue(Value *V, BasicBlock *BB) { | ||||
5085 | if (Instruction *I = dyn_cast<Instruction>(V)) | ||||
5086 | return I->getParent() != BB; | ||||
5087 | return false; | ||||
5088 | } | ||||
5089 | |||||
5090 | /// Sink addressing mode computation immediate before MemoryInst if doing so | ||||
5091 | /// can be done without increasing register pressure. The need for the | ||||
5092 | /// register pressure constraint means this can end up being an all or nothing | ||||
5093 | /// decision for all uses of the same addressing computation. | ||||
5094 | /// | ||||
5095 | /// Load and Store Instructions often have addressing modes that can do | ||||
5096 | /// significant amounts of computation. As such, instruction selection will try | ||||
5097 | /// to get the load or store to do as much computation as possible for the | ||||
5098 | /// program. The problem is that isel can only see within a single block. As | ||||
5099 | /// such, we sink as much legal addressing mode work into the block as possible. | ||||
5100 | /// | ||||
5101 | /// This method is used to optimize both load/store and inline asms with memory | ||||
5102 | /// operands. It's also used to sink addressing computations feeding into cold | ||||
5103 | /// call sites into their (cold) basic block. | ||||
5104 | /// | ||||
5105 | /// The motivation for handling sinking into cold blocks is that doing so can | ||||
5106 | /// both enable other address mode sinking (by satisfying the register pressure | ||||
5107 | /// constraint above), and reduce register pressure globally (by removing the | ||||
5108 | /// addressing mode computation from the fast path entirely.). | ||||
5109 | bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, | ||||
5110 | Type *AccessTy, unsigned AddrSpace) { | ||||
5111 | Value *Repl = Addr; | ||||
5112 | |||||
5113 | // Try to collapse single-value PHI nodes. This is necessary to undo | ||||
5114 | // unprofitable PRE transformations. | ||||
5115 | SmallVector<Value *, 8> worklist; | ||||
5116 | SmallPtrSet<Value *, 16> Visited; | ||||
5117 | worklist.push_back(Addr); | ||||
5118 | |||||
5119 | // Use a worklist to iteratively look through PHI and select nodes, and | ||||
5120 | // ensure that the addressing mode obtained from the non-PHI/select roots of | ||||
5121 | // the graph are compatible. | ||||
5122 | bool PhiOrSelectSeen = false; | ||||
5123 | SmallVector<Instruction *, 16> AddrModeInsts; | ||||
5124 | const SimplifyQuery SQ(*DL, TLInfo); | ||||
5125 | AddressingModeCombiner AddrModes(SQ, Addr); | ||||
5126 | TypePromotionTransaction TPT(RemovedInsts); | ||||
5127 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | ||||
5128 | TPT.getRestorationPoint(); | ||||
5129 | while (!worklist.empty()) { | ||||
5130 | Value *V = worklist.pop_back_val(); | ||||
5131 | |||||
5132 | // We allow traversing cyclic Phi nodes. | ||||
5133 | // In case of success after this loop we ensure that traversing through | ||||
5134 | // Phi nodes ends up with all cases to compute address of the form | ||||
5135 | // BaseGV + Base + Scale * Index + Offset | ||||
5136 | // where Scale and Offset are constans and BaseGV, Base and Index | ||||
5137 | // are exactly the same Values in all cases. | ||||
5138 | // It means that BaseGV, Scale and Offset dominate our memory instruction | ||||
5139 | // and have the same value as they had in address computation represented | ||||
5140 | // as Phi. So we can safely sink address computation to memory instruction. | ||||
5141 | if (!Visited.insert(V).second) | ||||
5142 | continue; | ||||
5143 | |||||
5144 | // For a PHI node, push all of its incoming values. | ||||
5145 | if (PHINode *P = dyn_cast<PHINode>(V)) { | ||||
5146 | append_range(worklist, P->incoming_values()); | ||||
5147 | PhiOrSelectSeen = true; | ||||
5148 | continue; | ||||
5149 | } | ||||
5150 | // Similar for select. | ||||
5151 | if (SelectInst *SI = dyn_cast<SelectInst>(V)) { | ||||
5152 | worklist.push_back(SI->getFalseValue()); | ||||
5153 | worklist.push_back(SI->getTrueValue()); | ||||
5154 | PhiOrSelectSeen = true; | ||||
5155 | continue; | ||||
5156 | } | ||||
5157 | |||||
5158 | // For non-PHIs, determine the addressing mode being computed. Note that | ||||
5159 | // the result may differ depending on what other uses our candidate | ||||
5160 | // addressing instructions might have. | ||||
5161 | AddrModeInsts.clear(); | ||||
5162 | std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr, | ||||
5163 | 0); | ||||
5164 | // Defer the query (and possible computation of) the dom tree to point of | ||||
5165 | // actual use. It's expected that most address matches don't actually need | ||||
5166 | // the domtree. | ||||
5167 | auto getDTFn = [MemoryInst, this]() -> const DominatorTree & { | ||||
5168 | Function *F = MemoryInst->getParent()->getParent(); | ||||
5169 | return this->getDT(*F); | ||||
5170 | }; | ||||
5171 | ExtAddrMode NewAddrMode = AddressingModeMatcher::Match( | ||||
5172 | V, AccessTy, AddrSpace, MemoryInst, AddrModeInsts, *TLI, *LI, getDTFn, | ||||
5173 | *TRI, InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP, OptSize, PSI, | ||||
5174 | BFI.get()); | ||||
5175 | |||||
5176 | GetElementPtrInst *GEP = LargeOffsetGEP.first; | ||||
5177 | if (GEP && !NewGEPBases.count(GEP)) { | ||||
5178 | // If splitting the underlying data structure can reduce the offset of a | ||||
5179 | // GEP, collect the GEP. Skip the GEPs that are the new bases of | ||||
5180 | // previously split data structures. | ||||
5181 | LargeOffsetGEPMap[GEP->getPointerOperand()].push_back(LargeOffsetGEP); | ||||
5182 | LargeOffsetGEPID.insert(std::make_pair(GEP, LargeOffsetGEPID.size())); | ||||
5183 | } | ||||
5184 | |||||
5185 | NewAddrMode.OriginalValue = V; | ||||
5186 | if (!AddrModes.addNewAddrMode(NewAddrMode)) | ||||
5187 | break; | ||||
5188 | } | ||||
5189 | |||||
5190 | // Try to combine the AddrModes we've collected. If we couldn't collect any, | ||||
5191 | // or we have multiple but either couldn't combine them or combining them | ||||
5192 | // wouldn't do anything useful, bail out now. | ||||
5193 | if (!AddrModes.combineAddrModes()) { | ||||
5194 | TPT.rollback(LastKnownGood); | ||||
5195 | return false; | ||||
5196 | } | ||||
5197 | bool Modified = TPT.commit(); | ||||
5198 | |||||
5199 | // Get the combined AddrMode (or the only AddrMode, if we only had one). | ||||
5200 | ExtAddrMode AddrMode = AddrModes.getAddrMode(); | ||||
5201 | |||||
5202 | // If all the instructions matched are already in this BB, don't do anything. | ||||
5203 | // If we saw a Phi node then it is not local definitely, and if we saw a | ||||
5204 | // select then we want to push the address calculation past it even if it's | ||||
5205 | // already in this BB. | ||||
5206 | if (!PhiOrSelectSeen && none_of(AddrModeInsts, [&](Value *V) { | ||||
5207 | return IsNonLocalValue(V, MemoryInst->getParent()); | ||||
5208 | })) { | ||||
5209 | LLVM_DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrModedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n"; } } while (false) | ||||
5210 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n"; } } while (false); | ||||
5211 | return Modified; | ||||
5212 | } | ||||
5213 | |||||
5214 | // Insert this computation right after this user. Since our caller is | ||||
5215 | // scanning from the top of the BB to the bottom, reuse of the expr are | ||||
5216 | // guaranteed to happen later. | ||||
5217 | IRBuilder<> Builder(MemoryInst); | ||||
5218 | |||||
5219 | // Now that we determined the addressing expression we want to use and know | ||||
5220 | // that we have to sink it into this block. Check to see if we have already | ||||
5221 | // done this for some other load/store instr in this block. If so, reuse | ||||
5222 | // the computation. Before attempting reuse, check if the address is valid | ||||
5223 | // as it may have been erased. | ||||
5224 | |||||
5225 | WeakTrackingVH SunkAddrVH = SunkAddrs[Addr]; | ||||
5226 | |||||
5227 | Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr; | ||||
5228 | Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); | ||||
5229 | if (SunkAddr) { | ||||
5230 | LLVM_DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrModedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (false) | ||||
5231 | << " for " << *MemoryInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (false); | ||||
5232 | if (SunkAddr->getType() != Addr->getType()) { | ||||
5233 | if (SunkAddr->getType()->getPointerAddressSpace() != | ||||
5234 | Addr->getType()->getPointerAddressSpace() && | ||||
5235 | !DL->isNonIntegralPointerType(Addr->getType())) { | ||||
5236 | // There are two reasons the address spaces might not match: a no-op | ||||
5237 | // addrspacecast, or a ptrtoint/inttoptr pair. Either way, we emit a | ||||
5238 | // ptrtoint/inttoptr pair to ensure we match the original semantics. | ||||
5239 | // TODO: allow bitcast between different address space pointers with the | ||||
5240 | // same size. | ||||
5241 | SunkAddr = Builder.CreatePtrToInt(SunkAddr, IntPtrTy, "sunkaddr"); | ||||
5242 | SunkAddr = | ||||
5243 | Builder.CreateIntToPtr(SunkAddr, Addr->getType(), "sunkaddr"); | ||||
5244 | } else | ||||
5245 | SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); | ||||
5246 | } | ||||
5247 | } else if (AddrSinkUsingGEPs || (!AddrSinkUsingGEPs.getNumOccurrences() && | ||||
5248 | SubtargetInfo->addrSinkUsingGEPs())) { | ||||
5249 | // By default, we use the GEP-based method when AA is used later. This | ||||
5250 | // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities. | ||||
5251 | LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrModedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (false) | ||||
5252 | << " for " << *MemoryInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (false); | ||||
5253 | Value *ResultPtr = nullptr, *ResultIndex = nullptr; | ||||
5254 | |||||
5255 | // First, find the pointer. | ||||
5256 | if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) { | ||||
5257 | ResultPtr = AddrMode.BaseReg; | ||||
5258 | AddrMode.BaseReg = nullptr; | ||||
5259 | } | ||||
5260 | |||||
5261 | if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) { | ||||
5262 | // We can't add more than one pointer together, nor can we scale a | ||||
5263 | // pointer (both of which seem meaningless). | ||||
5264 | if (ResultPtr || AddrMode.Scale != 1) | ||||
5265 | return Modified; | ||||
5266 | |||||
5267 | ResultPtr = AddrMode.ScaledReg; | ||||
5268 | AddrMode.Scale = 0; | ||||
5269 | } | ||||
5270 | |||||
5271 | // It is only safe to sign extend the BaseReg if we know that the math | ||||
5272 | // required to create it did not overflow before we extend it. Since | ||||
5273 | // the original IR value was tossed in favor of a constant back when | ||||
5274 | // the AddrMode was created we need to bail out gracefully if widths | ||||
5275 | // do not match instead of extending it. | ||||
5276 | // | ||||
5277 | // (See below for code to add the scale.) | ||||
5278 | if (AddrMode.Scale) { | ||||
5279 | Type *ScaledRegTy = AddrMode.ScaledReg->getType(); | ||||
5280 | if (cast<IntegerType>(IntPtrTy)->getBitWidth() > | ||||
5281 | cast<IntegerType>(ScaledRegTy)->getBitWidth()) | ||||
5282 | return Modified; | ||||
5283 | } | ||||
5284 | |||||
5285 | if (AddrMode.BaseGV) { | ||||
5286 | if (ResultPtr) | ||||
5287 | return Modified; | ||||
5288 | |||||
5289 | ResultPtr = AddrMode.BaseGV; | ||||
5290 | } | ||||
5291 | |||||
5292 | // If the real base value actually came from an inttoptr, then the matcher | ||||
5293 | // will look through it and provide only the integer value. In that case, | ||||
5294 | // use it here. | ||||
5295 | if (!DL->isNonIntegralPointerType(Addr->getType())) { | ||||
5296 | if (!ResultPtr && AddrMode.BaseReg) { | ||||
5297 | ResultPtr = Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), | ||||
5298 | "sunkaddr"); | ||||
5299 | AddrMode.BaseReg = nullptr; | ||||
5300 | } else if (!ResultPtr && AddrMode.Scale == 1) { | ||||
5301 | ResultPtr = Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), | ||||
5302 | "sunkaddr"); | ||||
5303 | AddrMode.Scale = 0; | ||||
5304 | } | ||||
5305 | } | ||||
5306 | |||||
5307 | if (!ResultPtr && !AddrMode.BaseReg && !AddrMode.Scale && | ||||
5308 | !AddrMode.BaseOffs) { | ||||
5309 | SunkAddr = Constant::getNullValue(Addr->getType()); | ||||
5310 | } else if (!ResultPtr) { | ||||
5311 | return Modified; | ||||
5312 | } else { | ||||
5313 | Type *I8PtrTy = | ||||
5314 | Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace()); | ||||
5315 | Type *I8Ty = Builder.getInt8Ty(); | ||||
5316 | |||||
5317 | // Start with the base register. Do this first so that subsequent address | ||||
5318 | // matching finds it last, which will prevent it from trying to match it | ||||
5319 | // as the scaled value in case it happens to be a mul. That would be | ||||
5320 | // problematic if we've sunk a different mul for the scale, because then | ||||
5321 | // we'd end up sinking both muls. | ||||
5322 | if (AddrMode.BaseReg) { | ||||
5323 | Value *V = AddrMode.BaseReg; | ||||
5324 | if (V->getType() != IntPtrTy) | ||||
5325 | V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); | ||||
5326 | |||||
5327 | ResultIndex = V; | ||||
5328 | } | ||||
5329 | |||||
5330 | // Add the scale value. | ||||
5331 | if (AddrMode.Scale) { | ||||
5332 | Value *V = AddrMode.ScaledReg; | ||||
5333 | if (V->getType() == IntPtrTy) { | ||||
5334 | // done. | ||||
5335 | } else { | ||||
5336 | assert(cast<IntegerType>(IntPtrTy)->getBitWidth() <(static_cast <bool> (cast<IntegerType>(IntPtrTy)-> getBitWidth() < cast<IntegerType>(V->getType())-> getBitWidth() && "We can't transform if ScaledReg is too narrow" ) ? void (0) : __assert_fail ("cast<IntegerType>(IntPtrTy)->getBitWidth() < cast<IntegerType>(V->getType())->getBitWidth() && \"We can't transform if ScaledReg is too narrow\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 5338, __extension__ __PRETTY_FUNCTION__ )) | ||||
5337 | cast<IntegerType>(V->getType())->getBitWidth() &&(static_cast <bool> (cast<IntegerType>(IntPtrTy)-> getBitWidth() < cast<IntegerType>(V->getType())-> getBitWidth() && "We can't transform if ScaledReg is too narrow" ) ? void (0) : __assert_fail ("cast<IntegerType>(IntPtrTy)->getBitWidth() < cast<IntegerType>(V->getType())->getBitWidth() && \"We can't transform if ScaledReg is too narrow\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 5338, __extension__ __PRETTY_FUNCTION__ )) | ||||
5338 | "We can't transform if ScaledReg is too narrow")(static_cast <bool> (cast<IntegerType>(IntPtrTy)-> getBitWidth() < cast<IntegerType>(V->getType())-> getBitWidth() && "We can't transform if ScaledReg is too narrow" ) ? void (0) : __assert_fail ("cast<IntegerType>(IntPtrTy)->getBitWidth() < cast<IntegerType>(V->getType())->getBitWidth() && \"We can't transform if ScaledReg is too narrow\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 5338, __extension__ __PRETTY_FUNCTION__ )); | ||||
5339 | V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); | ||||
5340 | } | ||||
5341 | |||||
5342 | if (AddrMode.Scale != 1) | ||||
5343 | V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), | ||||
5344 | "sunkaddr"); | ||||
5345 | if (ResultIndex) | ||||
5346 | ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr"); | ||||
5347 | else | ||||
5348 | ResultIndex = V; | ||||
5349 | } | ||||
5350 | |||||
5351 | // Add in the Base Offset if present. | ||||
5352 | if (AddrMode.BaseOffs) { | ||||
5353 | Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); | ||||
5354 | if (ResultIndex) { | ||||
5355 | // We need to add this separately from the scale above to help with | ||||
5356 | // SDAG consecutive load/store merging. | ||||
5357 | if (ResultPtr->getType() != I8PtrTy) | ||||
5358 | ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); | ||||
5359 | ResultPtr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, | ||||
5360 | "sunkaddr", AddrMode.InBounds); | ||||
5361 | } | ||||
5362 | |||||
5363 | ResultIndex = V; | ||||
5364 | } | ||||
5365 | |||||
5366 | if (!ResultIndex) { | ||||
5367 | SunkAddr = ResultPtr; | ||||
5368 | } else { | ||||
5369 | if (ResultPtr->getType() != I8PtrTy) | ||||
5370 | ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); | ||||
5371 | SunkAddr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr", | ||||
5372 | AddrMode.InBounds); | ||||
5373 | } | ||||
5374 | |||||
5375 | if (SunkAddr->getType() != Addr->getType()) { | ||||
5376 | if (SunkAddr->getType()->getPointerAddressSpace() != | ||||
5377 | Addr->getType()->getPointerAddressSpace() && | ||||
5378 | !DL->isNonIntegralPointerType(Addr->getType())) { | ||||
5379 | // There are two reasons the address spaces might not match: a no-op | ||||
5380 | // addrspacecast, or a ptrtoint/inttoptr pair. Either way, we emit a | ||||
5381 | // ptrtoint/inttoptr pair to ensure we match the original semantics. | ||||
5382 | // TODO: allow bitcast between different address space pointers with | ||||
5383 | // the same size. | ||||
5384 | SunkAddr = Builder.CreatePtrToInt(SunkAddr, IntPtrTy, "sunkaddr"); | ||||
5385 | SunkAddr = | ||||
5386 | Builder.CreateIntToPtr(SunkAddr, Addr->getType(), "sunkaddr"); | ||||
5387 | } else | ||||
5388 | SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); | ||||
5389 | } | ||||
5390 | } | ||||
5391 | } else { | ||||
5392 | // We'd require a ptrtoint/inttoptr down the line, which we can't do for | ||||
5393 | // non-integral pointers, so in that case bail out now. | ||||
5394 | Type *BaseTy = AddrMode.BaseReg ? AddrMode.BaseReg->getType() : nullptr; | ||||
5395 | Type *ScaleTy = AddrMode.Scale ? AddrMode.ScaledReg->getType() : nullptr; | ||||
5396 | PointerType *BasePtrTy = dyn_cast_or_null<PointerType>(BaseTy); | ||||
5397 | PointerType *ScalePtrTy = dyn_cast_or_null<PointerType>(ScaleTy); | ||||
5398 | if (DL->isNonIntegralPointerType(Addr->getType()) || | ||||
5399 | (BasePtrTy && DL->isNonIntegralPointerType(BasePtrTy)) || | ||||
5400 | (ScalePtrTy && DL->isNonIntegralPointerType(ScalePtrTy)) || | ||||
5401 | (AddrMode.BaseGV && | ||||
5402 | DL->isNonIntegralPointerType(AddrMode.BaseGV->getType()))) | ||||
5403 | return Modified; | ||||
5404 | |||||
5405 | LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrModedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (false) | ||||
5406 | << " for " << *MemoryInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (false); | ||||
5407 | Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); | ||||
5408 | Value *Result = nullptr; | ||||
5409 | |||||
5410 | // Start with the base register. Do this first so that subsequent address | ||||
5411 | // matching finds it last, which will prevent it from trying to match it | ||||
5412 | // as the scaled value in case it happens to be a mul. That would be | ||||
5413 | // problematic if we've sunk a different mul for the scale, because then | ||||
5414 | // we'd end up sinking both muls. | ||||
5415 | if (AddrMode.BaseReg) { | ||||
5416 | Value *V = AddrMode.BaseReg; | ||||
5417 | if (V->getType()->isPointerTy()) | ||||
5418 | V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); | ||||
5419 | if (V->getType() != IntPtrTy) | ||||
5420 | V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); | ||||
5421 | Result = V; | ||||
5422 | } | ||||
5423 | |||||
5424 | // Add the scale value. | ||||
5425 | if (AddrMode.Scale) { | ||||
5426 | Value *V = AddrMode.ScaledReg; | ||||
5427 | if (V->getType() == IntPtrTy) { | ||||
5428 | // done. | ||||
5429 | } else if (V->getType()->isPointerTy()) { | ||||
5430 | V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); | ||||
5431 | } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < | ||||
5432 | cast<IntegerType>(V->getType())->getBitWidth()) { | ||||
5433 | V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); | ||||
5434 | } else { | ||||
5435 | // It is only safe to sign extend the BaseReg if we know that the math | ||||
5436 | // required to create it did not overflow before we extend it. Since | ||||
5437 | // the original IR value was tossed in favor of a constant back when | ||||
5438 | // the AddrMode was created we need to bail out gracefully if widths | ||||
5439 | // do not match instead of extending it. | ||||
5440 | Instruction *I = dyn_cast_or_null<Instruction>(Result); | ||||
5441 | if (I && (Result != AddrMode.BaseReg)) | ||||
5442 | I->eraseFromParent(); | ||||
5443 | return Modified; | ||||
5444 | } | ||||
5445 | if (AddrMode.Scale != 1) | ||||
5446 | V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), | ||||
5447 | "sunkaddr"); | ||||
5448 | if (Result) | ||||
5449 | Result = Builder.CreateAdd(Result, V, "sunkaddr"); | ||||
5450 | else | ||||
5451 | Result = V; | ||||
5452 | } | ||||
5453 | |||||
5454 | // Add in the BaseGV if present. | ||||
5455 | if (AddrMode.BaseGV) { | ||||
5456 | Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr"); | ||||
5457 | if (Result) | ||||
5458 | Result = Builder.CreateAdd(Result, V, "sunkaddr"); | ||||
5459 | else | ||||
5460 | Result = V; | ||||
5461 | } | ||||
5462 | |||||
5463 | // Add in the Base Offset if present. | ||||
5464 | if (AddrMode.BaseOffs) { | ||||
5465 | Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); | ||||
5466 | if (Result) | ||||
5467 | Result = Builder.CreateAdd(Result, V, "sunkaddr"); | ||||
5468 | else | ||||
5469 | Result = V; | ||||
5470 | } | ||||
5471 | |||||
5472 | if (!Result) | ||||
5473 | SunkAddr = Constant::getNullValue(Addr->getType()); | ||||
5474 | else | ||||
5475 | SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr"); | ||||
5476 | } | ||||
5477 | |||||
5478 | MemoryInst->replaceUsesOfWith(Repl, SunkAddr); | ||||
5479 | // Store the newly computed address into the cache. In the case we reused a | ||||
5480 | // value, this should be idempotent. | ||||
5481 | SunkAddrs[Addr] = WeakTrackingVH(SunkAddr); | ||||
5482 | |||||
5483 | // If we have no uses, recursively delete the value and all dead instructions | ||||
5484 | // using it. | ||||
5485 | if (Repl->use_empty()) { | ||||
5486 | resetIteratorIfInvalidatedWhileCalling(CurInstIterator->getParent(), [&]() { | ||||
5487 | RecursivelyDeleteTriviallyDeadInstructions( | ||||
5488 | Repl, TLInfo, nullptr, | ||||
5489 | [&](Value *V) { removeAllAssertingVHReferences(V); }); | ||||
5490 | }); | ||||
5491 | } | ||||
5492 | ++NumMemoryInsts; | ||||
5493 | return true; | ||||
5494 | } | ||||
5495 | |||||
5496 | /// Rewrite GEP input to gather/scatter to enable SelectionDAGBuilder to find | ||||
5497 | /// a uniform base to use for ISD::MGATHER/MSCATTER. SelectionDAGBuilder can | ||||
5498 | /// only handle a 2 operand GEP in the same basic block or a splat constant | ||||
5499 | /// vector. The 2 operands to the GEP must have a scalar pointer and a vector | ||||
5500 | /// index. | ||||
5501 | /// | ||||
5502 | /// If the existing GEP has a vector base pointer that is splat, we can look | ||||
5503 | /// through the splat to find the scalar pointer. If we can't find a scalar | ||||
5504 | /// pointer there's nothing we can do. | ||||
5505 | /// | ||||
5506 | /// If we have a GEP with more than 2 indices where the middle indices are all | ||||
5507 | /// zeroes, we can replace it with 2 GEPs where the second has 2 operands. | ||||
5508 | /// | ||||
5509 | /// If the final index isn't a vector or is a splat, we can emit a scalar GEP | ||||
5510 | /// followed by a GEP with an all zeroes vector index. This will enable | ||||
5511 | /// SelectionDAGBuilder to use the scalar GEP as the uniform base and have a | ||||
5512 | /// zero index. | ||||
5513 | bool CodeGenPrepare::optimizeGatherScatterInst(Instruction *MemoryInst, | ||||
5514 | Value *Ptr) { | ||||
5515 | Value *NewAddr; | ||||
5516 | |||||
5517 | if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) { | ||||
5518 | // Don't optimize GEPs that don't have indices. | ||||
5519 | if (!GEP->hasIndices()) | ||||
5520 | return false; | ||||
5521 | |||||
5522 | // If the GEP and the gather/scatter aren't in the same BB, don't optimize. | ||||
5523 | // FIXME: We should support this by sinking the GEP. | ||||
5524 | if (MemoryInst->getParent() != GEP->getParent()) | ||||
5525 | return false; | ||||
5526 | |||||
5527 | SmallVector<Value *, 2> Ops(GEP->operands()); | ||||
5528 | |||||
5529 | bool RewriteGEP = false; | ||||
5530 | |||||
5531 | if (Ops[0]->getType()->isVectorTy()) { | ||||
5532 | Ops[0] = getSplatValue(Ops[0]); | ||||
5533 | if (!Ops[0]) | ||||
5534 | return false; | ||||
5535 | RewriteGEP = true; | ||||
5536 | } | ||||
5537 | |||||
5538 | unsigned FinalIndex = Ops.size() - 1; | ||||
5539 | |||||
5540 | // Ensure all but the last index is 0. | ||||
5541 | // FIXME: This isn't strictly required. All that's required is that they are | ||||
5542 | // all scalars or splats. | ||||
5543 | for (unsigned i = 1; i < FinalIndex; ++i) { | ||||
5544 | auto *C = dyn_cast<Constant>(Ops[i]); | ||||
5545 | if (!C) | ||||
5546 | return false; | ||||
5547 | if (isa<VectorType>(C->getType())) | ||||
5548 | C = C->getSplatValue(); | ||||
5549 | auto *CI = dyn_cast_or_null<ConstantInt>(C); | ||||
5550 | if (!CI || !CI->isZero()) | ||||
5551 | return false; | ||||
5552 | // Scalarize the index if needed. | ||||
5553 | Ops[i] = CI; | ||||
5554 | } | ||||
5555 | |||||
5556 | // Try to scalarize the final index. | ||||
5557 | if (Ops[FinalIndex]->getType()->isVectorTy()) { | ||||
5558 | if (Value *V = getSplatValue(Ops[FinalIndex])) { | ||||
5559 | auto *C = dyn_cast<ConstantInt>(V); | ||||
5560 | // Don't scalarize all zeros vector. | ||||
5561 | if (!C || !C->isZero()) { | ||||
5562 | Ops[FinalIndex] = V; | ||||
5563 | RewriteGEP = true; | ||||
5564 | } | ||||
5565 | } | ||||
5566 | } | ||||
5567 | |||||
5568 | // If we made any changes or the we have extra operands, we need to generate | ||||
5569 | // new instructions. | ||||
5570 | if (!RewriteGEP && Ops.size() == 2) | ||||
5571 | return false; | ||||
5572 | |||||
5573 | auto NumElts = cast<VectorType>(Ptr->getType())->getElementCount(); | ||||
5574 | |||||
5575 | IRBuilder<> Builder(MemoryInst); | ||||
5576 | |||||
5577 | Type *SourceTy = GEP->getSourceElementType(); | ||||
5578 | Type *ScalarIndexTy = DL->getIndexType(Ops[0]->getType()->getScalarType()); | ||||
5579 | |||||
5580 | // If the final index isn't a vector, emit a scalar GEP containing all ops | ||||
5581 | // and a vector GEP with all zeroes final index. | ||||
5582 | if (!Ops[FinalIndex]->getType()->isVectorTy()) { | ||||
5583 | NewAddr = | ||||
5584 | Builder.CreateGEP(SourceTy, Ops[0], makeArrayRef(Ops).drop_front()); | ||||
5585 | auto *IndexTy = VectorType::get(ScalarIndexTy, NumElts); | ||||
5586 | auto *SecondTy = GetElementPtrInst::getIndexedType( | ||||
5587 | SourceTy, makeArrayRef(Ops).drop_front()); | ||||
5588 | NewAddr = | ||||
5589 | Builder.CreateGEP(SecondTy, NewAddr, Constant::getNullValue(IndexTy)); | ||||
5590 | } else { | ||||
5591 | Value *Base = Ops[0]; | ||||
5592 | Value *Index = Ops[FinalIndex]; | ||||
5593 | |||||
5594 | // Create a scalar GEP if there are more than 2 operands. | ||||
5595 | if (Ops.size() != 2) { | ||||
5596 | // Replace the last index with 0. | ||||
5597 | Ops[FinalIndex] = Constant::getNullValue(ScalarIndexTy); | ||||
5598 | Base = | ||||
5599 | Builder.CreateGEP(SourceTy, Base, makeArrayRef(Ops).drop_front()); | ||||
5600 | SourceTy = GetElementPtrInst::getIndexedType( | ||||
5601 | SourceTy, makeArrayRef(Ops).drop_front()); | ||||
5602 | } | ||||
5603 | |||||
5604 | // Now create the GEP with scalar pointer and vector index. | ||||
5605 | NewAddr = Builder.CreateGEP(SourceTy, Base, Index); | ||||
5606 | } | ||||
5607 | } else if (!isa<Constant>(Ptr)) { | ||||
5608 | // Not a GEP, maybe its a splat and we can create a GEP to enable | ||||
5609 | // SelectionDAGBuilder to use it as a uniform base. | ||||
5610 | Value *V = getSplatValue(Ptr); | ||||
5611 | if (!V) | ||||
5612 | return false; | ||||
5613 | |||||
5614 | auto NumElts = cast<VectorType>(Ptr->getType())->getElementCount(); | ||||
5615 | |||||
5616 | IRBuilder<> Builder(MemoryInst); | ||||
5617 | |||||
5618 | // Emit a vector GEP with a scalar pointer and all 0s vector index. | ||||
5619 | Type *ScalarIndexTy = DL->getIndexType(V->getType()->getScalarType()); | ||||
5620 | auto *IndexTy = VectorType::get(ScalarIndexTy, NumElts); | ||||
5621 | Type *ScalarTy; | ||||
5622 | if (cast<IntrinsicInst>(MemoryInst)->getIntrinsicID() == | ||||
5623 | Intrinsic::masked_gather) { | ||||
5624 | ScalarTy = MemoryInst->getType()->getScalarType(); | ||||
5625 | } else { | ||||
5626 | assert(cast<IntrinsicInst>(MemoryInst)->getIntrinsicID() ==(static_cast <bool> (cast<IntrinsicInst>(MemoryInst )->getIntrinsicID() == Intrinsic::masked_scatter) ? void ( 0) : __assert_fail ("cast<IntrinsicInst>(MemoryInst)->getIntrinsicID() == Intrinsic::masked_scatter" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 5627, __extension__ __PRETTY_FUNCTION__ )) | ||||
5627 | Intrinsic::masked_scatter)(static_cast <bool> (cast<IntrinsicInst>(MemoryInst )->getIntrinsicID() == Intrinsic::masked_scatter) ? void ( 0) : __assert_fail ("cast<IntrinsicInst>(MemoryInst)->getIntrinsicID() == Intrinsic::masked_scatter" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 5627, __extension__ __PRETTY_FUNCTION__ )); | ||||
5628 | ScalarTy = MemoryInst->getOperand(0)->getType()->getScalarType(); | ||||
5629 | } | ||||
5630 | NewAddr = Builder.CreateGEP(ScalarTy, V, Constant::getNullValue(IndexTy)); | ||||
5631 | } else { | ||||
5632 | // Constant, SelectionDAGBuilder knows to check if its a splat. | ||||
5633 | return false; | ||||
5634 | } | ||||
5635 | |||||
5636 | MemoryInst->replaceUsesOfWith(Ptr, NewAddr); | ||||
5637 | |||||
5638 | // If we have no uses, recursively delete the value and all dead instructions | ||||
5639 | // using it. | ||||
5640 | if (Ptr->use_empty()) | ||||
5641 | RecursivelyDeleteTriviallyDeadInstructions( | ||||
5642 | Ptr, TLInfo, nullptr, | ||||
5643 | [&](Value *V) { removeAllAssertingVHReferences(V); }); | ||||
5644 | |||||
5645 | return true; | ||||
5646 | } | ||||
5647 | |||||
5648 | /// If there are any memory operands, use OptimizeMemoryInst to sink their | ||||
5649 | /// address computing into the block when possible / profitable. | ||||
5650 | bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) { | ||||
5651 | bool MadeChange = false; | ||||
5652 | |||||
5653 | const TargetRegisterInfo *TRI = | ||||
5654 | TM->getSubtargetImpl(*CS->getFunction())->getRegisterInfo(); | ||||
5655 | TargetLowering::AsmOperandInfoVector TargetConstraints = | ||||
5656 | TLI->ParseConstraints(*DL, TRI, *CS); | ||||
5657 | unsigned ArgNo = 0; | ||||
5658 | for (TargetLowering::AsmOperandInfo &OpInfo : TargetConstraints) { | ||||
5659 | // Compute the constraint code and ConstraintType to use. | ||||
5660 | TLI->ComputeConstraintToUse(OpInfo, SDValue()); | ||||
5661 | |||||
5662 | // TODO: Also handle C_Address? | ||||
5663 | if (OpInfo.ConstraintType == TargetLowering::C_Memory && | ||||
5664 | OpInfo.isIndirect) { | ||||
5665 | Value *OpVal = CS->getArgOperand(ArgNo++); | ||||
5666 | MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u); | ||||
5667 | } else if (OpInfo.Type == InlineAsm::isInput) | ||||
5668 | ArgNo++; | ||||
5669 | } | ||||
5670 | |||||
5671 | return MadeChange; | ||||
5672 | } | ||||
5673 | |||||
5674 | /// Check if all the uses of \p Val are equivalent (or free) zero or | ||||
5675 | /// sign extensions. | ||||
5676 | static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) { | ||||
5677 | assert(!Val->use_empty() && "Input must have at least one use")(static_cast <bool> (!Val->use_empty() && "Input must have at least one use" ) ? void (0) : __assert_fail ("!Val->use_empty() && \"Input must have at least one use\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 5677, __extension__ __PRETTY_FUNCTION__ )); | ||||
5678 | const Instruction *FirstUser = cast<Instruction>(*Val->user_begin()); | ||||
5679 | bool IsSExt = isa<SExtInst>(FirstUser); | ||||
5680 | Type *ExtTy = FirstUser->getType(); | ||||
5681 | for (const User *U : Val->users()) { | ||||
5682 | const Instruction *UI = cast<Instruction>(U); | ||||
5683 | if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI))) | ||||
5684 | return false; | ||||
5685 | Type *CurTy = UI->getType(); | ||||
5686 | // Same input and output types: Same instruction after CSE. | ||||
5687 | if (CurTy == ExtTy) | ||||
5688 | continue; | ||||
5689 | |||||
5690 | // If IsSExt is true, we are in this situation: | ||||
5691 | // a = Val | ||||
5692 | // b = sext ty1 a to ty2 | ||||
5693 | // c = sext ty1 a to ty3 | ||||
5694 | // Assuming ty2 is shorter than ty3, this could be turned into: | ||||
5695 | // a = Val | ||||
5696 | // b = sext ty1 a to ty2 | ||||
5697 | // c = sext ty2 b to ty3 | ||||
5698 | // However, the last sext is not free. | ||||
5699 | if (IsSExt) | ||||
5700 | return false; | ||||
5701 | |||||
5702 | // This is a ZExt, maybe this is free to extend from one type to another. | ||||
5703 | // In that case, we would not account for a different use. | ||||
5704 | Type *NarrowTy; | ||||
5705 | Type *LargeTy; | ||||
5706 | if (ExtTy->getScalarType()->getIntegerBitWidth() > | ||||
5707 | CurTy->getScalarType()->getIntegerBitWidth()) { | ||||
5708 | NarrowTy = CurTy; | ||||
5709 | LargeTy = ExtTy; | ||||
5710 | } else { | ||||
5711 | NarrowTy = ExtTy; | ||||
5712 | LargeTy = CurTy; | ||||
5713 | } | ||||
5714 | |||||
5715 | if (!TLI.isZExtFree(NarrowTy, LargeTy)) | ||||
5716 | return false; | ||||
5717 | } | ||||
5718 | // All uses are the same or can be derived from one another for free. | ||||
5719 | return true; | ||||
5720 | } | ||||
5721 | |||||
5722 | /// Try to speculatively promote extensions in \p Exts and continue | ||||
5723 | /// promoting through newly promoted operands recursively as far as doing so is | ||||
5724 | /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts. | ||||
5725 | /// When some promotion happened, \p TPT contains the proper state to revert | ||||
5726 | /// them. | ||||
5727 | /// | ||||
5728 | /// \return true if some promotion happened, false otherwise. | ||||
5729 | bool CodeGenPrepare::tryToPromoteExts( | ||||
5730 | TypePromotionTransaction &TPT, const SmallVectorImpl<Instruction *> &Exts, | ||||
5731 | SmallVectorImpl<Instruction *> &ProfitablyMovedExts, | ||||
5732 | unsigned CreatedInstsCost) { | ||||
5733 | bool Promoted = false; | ||||
5734 | |||||
5735 | // Iterate over all the extensions to try to promote them. | ||||
5736 | for (auto *I : Exts) { | ||||
5737 | // Early check if we directly have ext(load). | ||||
5738 | if (isa<LoadInst>(I->getOperand(0))) { | ||||
5739 | ProfitablyMovedExts.push_back(I); | ||||
5740 | continue; | ||||
5741 | } | ||||
5742 | |||||
5743 | // Check whether or not we want to do any promotion. The reason we have | ||||
5744 | // this check inside the for loop is to catch the case where an extension | ||||
5745 | // is directly fed by a load because in such case the extension can be moved | ||||
5746 | // up without any promotion on its operands. | ||||
5747 | if (!TLI->enableExtLdPromotion() || DisableExtLdPromotion) | ||||
5748 | return false; | ||||
5749 | |||||
5750 | // Get the action to perform the promotion. | ||||
5751 | TypePromotionHelper::Action TPH = | ||||
5752 | TypePromotionHelper::getAction(I, InsertedInsts, *TLI, PromotedInsts); | ||||
5753 | // Check if we can promote. | ||||
5754 | if (!TPH) { | ||||
5755 | // Save the current extension as we cannot move up through its operand. | ||||
5756 | ProfitablyMovedExts.push_back(I); | ||||
5757 | continue; | ||||
5758 | } | ||||
5759 | |||||
5760 | // Save the current state. | ||||
5761 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | ||||
5762 | TPT.getRestorationPoint(); | ||||
5763 | SmallVector<Instruction *, 4> NewExts; | ||||
5764 | unsigned NewCreatedInstsCost = 0; | ||||
5765 | unsigned ExtCost = !TLI->isExtFree(I); | ||||
5766 | // Promote. | ||||
5767 | Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost, | ||||
5768 | &NewExts, nullptr, *TLI); | ||||
5769 | assert(PromotedVal &&(static_cast <bool> (PromotedVal && "TypePromotionHelper should have filtered out those cases" ) ? void (0) : __assert_fail ("PromotedVal && \"TypePromotionHelper should have filtered out those cases\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 5770, __extension__ __PRETTY_FUNCTION__ )) | ||||
5770 | "TypePromotionHelper should have filtered out those cases")(static_cast <bool> (PromotedVal && "TypePromotionHelper should have filtered out those cases" ) ? void (0) : __assert_fail ("PromotedVal && \"TypePromotionHelper should have filtered out those cases\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 5770, __extension__ __PRETTY_FUNCTION__ )); | ||||
5771 | |||||
5772 | // We would be able to merge only one extension in a load. | ||||
5773 | // Therefore, if we have more than 1 new extension we heuristically | ||||
5774 | // cut this search path, because it means we degrade the code quality. | ||||
5775 | // With exactly 2, the transformation is neutral, because we will merge | ||||
5776 | // one extension but leave one. However, we optimistically keep going, | ||||
5777 | // because the new extension may be removed too. | ||||
5778 | long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost; | ||||
5779 | // FIXME: It would be possible to propagate a negative value instead of | ||||
5780 | // conservatively ceiling it to 0. | ||||
5781 | TotalCreatedInstsCost = | ||||
5782 | std::max((long long)0, (TotalCreatedInstsCost - ExtCost)); | ||||
5783 | if (!StressExtLdPromotion && | ||||
5784 | (TotalCreatedInstsCost > 1 || | ||||
5785 | !isPromotedInstructionLegal(*TLI, *DL, PromotedVal))) { | ||||
5786 | // This promotion is not profitable, rollback to the previous state, and | ||||
5787 | // save the current extension in ProfitablyMovedExts as the latest | ||||
5788 | // speculative promotion turned out to be unprofitable. | ||||
5789 | TPT.rollback(LastKnownGood); | ||||
5790 | ProfitablyMovedExts.push_back(I); | ||||
5791 | continue; | ||||
5792 | } | ||||
5793 | // Continue promoting NewExts as far as doing so is profitable. | ||||
5794 | SmallVector<Instruction *, 2> NewlyMovedExts; | ||||
5795 | (void)tryToPromoteExts(TPT, NewExts, NewlyMovedExts, TotalCreatedInstsCost); | ||||
5796 | bool NewPromoted = false; | ||||
5797 | for (auto *ExtInst : NewlyMovedExts) { | ||||
5798 | Instruction *MovedExt = cast<Instruction>(ExtInst); | ||||
5799 | Value *ExtOperand = MovedExt->getOperand(0); | ||||
5800 | // If we have reached to a load, we need this extra profitability check | ||||
5801 | // as it could potentially be merged into an ext(load). | ||||
5802 | if (isa<LoadInst>(ExtOperand) && | ||||
5803 | !(StressExtLdPromotion || NewCreatedInstsCost <= ExtCost || | ||||
5804 | (ExtOperand->hasOneUse() || hasSameExtUse(ExtOperand, *TLI)))) | ||||
5805 | continue; | ||||
5806 | |||||
5807 | ProfitablyMovedExts.push_back(MovedExt); | ||||
5808 | NewPromoted = true; | ||||
5809 | } | ||||
5810 | |||||
5811 | // If none of speculative promotions for NewExts is profitable, rollback | ||||
5812 | // and save the current extension (I) as the last profitable extension. | ||||
5813 | if (!NewPromoted) { | ||||
5814 | TPT.rollback(LastKnownGood); | ||||
5815 | ProfitablyMovedExts.push_back(I); | ||||
5816 | continue; | ||||
5817 | } | ||||
5818 | // The promotion is profitable. | ||||
5819 | Promoted = true; | ||||
5820 | } | ||||
5821 | return Promoted; | ||||
5822 | } | ||||
5823 | |||||
5824 | /// Merging redundant sexts when one is dominating the other. | ||||
5825 | bool CodeGenPrepare::mergeSExts(Function &F) { | ||||
5826 | bool Changed = false; | ||||
5827 | for (auto &Entry : ValToSExtendedUses) { | ||||
5828 | SExts &Insts = Entry.second; | ||||
5829 | SExts CurPts; | ||||
5830 | for (Instruction *Inst : Insts) { | ||||
5831 | if (RemovedInsts.count(Inst) || !isa<SExtInst>(Inst) || | ||||
5832 | Inst->getOperand(0) != Entry.first) | ||||
5833 | continue; | ||||
5834 | bool inserted = false; | ||||
5835 | for (auto &Pt : CurPts) { | ||||
5836 | if (getDT(F).dominates(Inst, Pt)) { | ||||
5837 | Pt->replaceAllUsesWith(Inst); | ||||
5838 | RemovedInsts.insert(Pt); | ||||
5839 | Pt->removeFromParent(); | ||||
5840 | Pt = Inst; | ||||
5841 | inserted = true; | ||||
5842 | Changed = true; | ||||
5843 | break; | ||||
5844 | } | ||||
5845 | if (!getDT(F).dominates(Pt, Inst)) | ||||
5846 | // Give up if we need to merge in a common dominator as the | ||||
5847 | // experiments show it is not profitable. | ||||
5848 | continue; | ||||
5849 | Inst->replaceAllUsesWith(Pt); | ||||
5850 | RemovedInsts.insert(Inst); | ||||
5851 | Inst->removeFromParent(); | ||||
5852 | inserted = true; | ||||
5853 | Changed = true; | ||||
5854 | break; | ||||
5855 | } | ||||
5856 | if (!inserted) | ||||
5857 | CurPts.push_back(Inst); | ||||
5858 | } | ||||
5859 | } | ||||
5860 | return Changed; | ||||
5861 | } | ||||
5862 | |||||
5863 | // Splitting large data structures so that the GEPs accessing them can have | ||||
5864 | // smaller offsets so that they can be sunk to the same blocks as their users. | ||||
5865 | // For example, a large struct starting from %base is split into two parts | ||||
5866 | // where the second part starts from %new_base. | ||||
5867 | // | ||||
5868 | // Before: | ||||
5869 | // BB0: | ||||
5870 | // %base = | ||||
5871 | // | ||||
5872 | // BB1: | ||||
5873 | // %gep0 = gep %base, off0 | ||||
5874 | // %gep1 = gep %base, off1 | ||||
5875 | // %gep2 = gep %base, off2 | ||||
5876 | // | ||||
5877 | // BB2: | ||||
5878 | // %load1 = load %gep0 | ||||
5879 | // %load2 = load %gep1 | ||||
5880 | // %load3 = load %gep2 | ||||
5881 | // | ||||
5882 | // After: | ||||
5883 | // BB0: | ||||
5884 | // %base = | ||||
5885 | // %new_base = gep %base, off0 | ||||
5886 | // | ||||
5887 | // BB1: | ||||
5888 | // %new_gep0 = %new_base | ||||
5889 | // %new_gep1 = gep %new_base, off1 - off0 | ||||
5890 | // %new_gep2 = gep %new_base, off2 - off0 | ||||
5891 | // | ||||
5892 | // BB2: | ||||
5893 | // %load1 = load i32, i32* %new_gep0 | ||||
5894 | // %load2 = load i32, i32* %new_gep1 | ||||
5895 | // %load3 = load i32, i32* %new_gep2 | ||||
5896 | // | ||||
5897 | // %new_gep1 and %new_gep2 can be sunk to BB2 now after the splitting because | ||||
5898 | // their offsets are smaller enough to fit into the addressing mode. | ||||
5899 | bool CodeGenPrepare::splitLargeGEPOffsets() { | ||||
5900 | bool Changed = false; | ||||
5901 | for (auto &Entry : LargeOffsetGEPMap) { | ||||
5902 | Value *OldBase = Entry.first; | ||||
5903 | SmallVectorImpl<std::pair<AssertingVH<GetElementPtrInst>, int64_t>> | ||||
5904 | &LargeOffsetGEPs = Entry.second; | ||||
5905 | auto compareGEPOffset = | ||||
5906 | [&](const std::pair<GetElementPtrInst *, int64_t> &LHS, | ||||
5907 | const std::pair<GetElementPtrInst *, int64_t> &RHS) { | ||||
5908 | if (LHS.first == RHS.first) | ||||
5909 | return false; | ||||
5910 | if (LHS.second != RHS.second) | ||||
5911 | return LHS.second < RHS.second; | ||||
5912 | return LargeOffsetGEPID[LHS.first] < LargeOffsetGEPID[RHS.first]; | ||||
5913 | }; | ||||
5914 | // Sorting all the GEPs of the same data structures based on the offsets. | ||||
5915 | llvm::sort(LargeOffsetGEPs, compareGEPOffset); | ||||
5916 | LargeOffsetGEPs.erase( | ||||
5917 | std::unique(LargeOffsetGEPs.begin(), LargeOffsetGEPs.end()), | ||||
5918 | LargeOffsetGEPs.end()); | ||||
5919 | // Skip if all the GEPs have the same offsets. | ||||
5920 | if (LargeOffsetGEPs.front().second == LargeOffsetGEPs.back().second) | ||||
5921 | continue; | ||||
5922 | GetElementPtrInst *BaseGEP = LargeOffsetGEPs.begin()->first; | ||||
5923 | int64_t BaseOffset = LargeOffsetGEPs.begin()->second; | ||||
5924 | Value *NewBaseGEP = nullptr; | ||||
5925 | |||||
5926 | auto *LargeOffsetGEP = LargeOffsetGEPs.begin(); | ||||
5927 | while (LargeOffsetGEP != LargeOffsetGEPs.end()) { | ||||
5928 | GetElementPtrInst *GEP = LargeOffsetGEP->first; | ||||
5929 | int64_t Offset = LargeOffsetGEP->second; | ||||
5930 | if (Offset != BaseOffset) { | ||||
5931 | TargetLowering::AddrMode AddrMode; | ||||
5932 | AddrMode.BaseOffs = Offset - BaseOffset; | ||||
5933 | // The result type of the GEP might not be the type of the memory | ||||
5934 | // access. | ||||
5935 | if (!TLI->isLegalAddressingMode(*DL, AddrMode, | ||||
5936 | GEP->getResultElementType(), | ||||
5937 | GEP->getAddressSpace())) { | ||||
5938 | // We need to create a new base if the offset to the current base is | ||||
5939 | // too large to fit into the addressing mode. So, a very large struct | ||||
5940 | // may be split into several parts. | ||||
5941 | BaseGEP = GEP; | ||||
5942 | BaseOffset = Offset; | ||||
5943 | NewBaseGEP = nullptr; | ||||
5944 | } | ||||
5945 | } | ||||
5946 | |||||
5947 | // Generate a new GEP to replace the current one. | ||||
5948 | LLVMContext &Ctx = GEP->getContext(); | ||||
5949 | Type *IntPtrTy = DL->getIntPtrType(GEP->getType()); | ||||
5950 | Type *I8PtrTy = | ||||
5951 | Type::getInt8PtrTy(Ctx, GEP->getType()->getPointerAddressSpace()); | ||||
5952 | Type *I8Ty = Type::getInt8Ty(Ctx); | ||||
5953 | |||||
5954 | if (!NewBaseGEP) { | ||||
5955 | // Create a new base if we don't have one yet. Find the insertion | ||||
5956 | // pointer for the new base first. | ||||
5957 | BasicBlock::iterator NewBaseInsertPt; | ||||
5958 | BasicBlock *NewBaseInsertBB; | ||||
5959 | if (auto *BaseI = dyn_cast<Instruction>(OldBase)) { | ||||
5960 | // If the base of the struct is an instruction, the new base will be | ||||
5961 | // inserted close to it. | ||||
5962 | NewBaseInsertBB = BaseI->getParent(); | ||||
5963 | if (isa<PHINode>(BaseI)) | ||||
5964 | NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); | ||||
5965 | else if (InvokeInst *Invoke = dyn_cast<InvokeInst>(BaseI)) { | ||||
5966 | NewBaseInsertBB = | ||||
5967 | SplitEdge(NewBaseInsertBB, Invoke->getNormalDest()); | ||||
5968 | NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); | ||||
5969 | } else | ||||
5970 | NewBaseInsertPt = std::next(BaseI->getIterator()); | ||||
5971 | } else { | ||||
5972 | // If the current base is an argument or global value, the new base | ||||
5973 | // will be inserted to the entry block. | ||||
5974 | NewBaseInsertBB = &BaseGEP->getFunction()->getEntryBlock(); | ||||
5975 | NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); | ||||
5976 | } | ||||
5977 | IRBuilder<> NewBaseBuilder(NewBaseInsertBB, NewBaseInsertPt); | ||||
5978 | // Create a new base. | ||||
5979 | Value *BaseIndex = ConstantInt::get(IntPtrTy, BaseOffset); | ||||
5980 | NewBaseGEP = OldBase; | ||||
5981 | if (NewBaseGEP->getType() != I8PtrTy) | ||||
5982 | NewBaseGEP = NewBaseBuilder.CreatePointerCast(NewBaseGEP, I8PtrTy); | ||||
5983 | NewBaseGEP = | ||||
5984 | NewBaseBuilder.CreateGEP(I8Ty, NewBaseGEP, BaseIndex, "splitgep"); | ||||
5985 | NewGEPBases.insert(NewBaseGEP); | ||||
5986 | } | ||||
5987 | |||||
5988 | IRBuilder<> Builder(GEP); | ||||
5989 | Value *NewGEP = NewBaseGEP; | ||||
5990 | if (Offset == BaseOffset) { | ||||
5991 | if (GEP->getType() != I8PtrTy) | ||||
5992 | NewGEP = Builder.CreatePointerCast(NewGEP, GEP->getType()); | ||||
5993 | } else { | ||||
5994 | // Calculate the new offset for the new GEP. | ||||
5995 | Value *Index = ConstantInt::get(IntPtrTy, Offset - BaseOffset); | ||||
5996 | NewGEP = Builder.CreateGEP(I8Ty, NewBaseGEP, Index); | ||||
5997 | |||||
5998 | if (GEP->getType() != I8PtrTy) | ||||
5999 | NewGEP = Builder.CreatePointerCast(NewGEP, GEP->getType()); | ||||
6000 | } | ||||
6001 | GEP->replaceAllUsesWith(NewGEP); | ||||
6002 | LargeOffsetGEPID.erase(GEP); | ||||
6003 | LargeOffsetGEP = LargeOffsetGEPs.erase(LargeOffsetGEP); | ||||
6004 | GEP->eraseFromParent(); | ||||
6005 | Changed = true; | ||||
6006 | } | ||||
6007 | } | ||||
6008 | return Changed; | ||||
6009 | } | ||||
6010 | |||||
6011 | bool CodeGenPrepare::optimizePhiType( | ||||
6012 | PHINode *I, SmallPtrSetImpl<PHINode *> &Visited, | ||||
6013 | SmallPtrSetImpl<Instruction *> &DeletedInstrs) { | ||||
6014 | // We are looking for a collection on interconnected phi nodes that together | ||||
6015 | // only use loads/bitcasts and are used by stores/bitcasts, and the bitcasts | ||||
6016 | // are of the same type. Convert the whole set of nodes to the type of the | ||||
6017 | // bitcast. | ||||
6018 | Type *PhiTy = I->getType(); | ||||
6019 | Type *ConvertTy = nullptr; | ||||
6020 | if (Visited.count(I) || | ||||
6021 | (!I->getType()->isIntegerTy() && !I->getType()->isFloatingPointTy())) | ||||
6022 | return false; | ||||
6023 | |||||
6024 | SmallVector<Instruction *, 4> Worklist; | ||||
6025 | Worklist.push_back(cast<Instruction>(I)); | ||||
6026 | SmallPtrSet<PHINode *, 4> PhiNodes; | ||||
6027 | PhiNodes.insert(I); | ||||
6028 | Visited.insert(I); | ||||
6029 | SmallPtrSet<Instruction *, 4> Defs; | ||||
6030 | SmallPtrSet<Instruction *, 4> Uses; | ||||
6031 | // This works by adding extra bitcasts between load/stores and removing | ||||
6032 | // existing bicasts. If we have a phi(bitcast(load)) or a store(bitcast(phi)) | ||||
6033 | // we can get in the situation where we remove a bitcast in one iteration | ||||
6034 | // just to add it again in the next. We need to ensure that at least one | ||||
6035 | // bitcast we remove are anchored to something that will not change back. | ||||
6036 | bool AnyAnchored = false; | ||||
6037 | |||||
6038 | while (!Worklist.empty()) { | ||||
6039 | Instruction *II = Worklist.pop_back_val(); | ||||
6040 | |||||
6041 | if (auto *Phi = dyn_cast<PHINode>(II)) { | ||||
6042 | // Handle Defs, which might also be PHI's | ||||
6043 | for (Value *V : Phi->incoming_values()) { | ||||
6044 | if (auto *OpPhi = dyn_cast<PHINode>(V)) { | ||||
6045 | if (!PhiNodes.count(OpPhi)) { | ||||
6046 | if (!Visited.insert(OpPhi).second) | ||||
6047 | return false; | ||||
6048 | PhiNodes.insert(OpPhi); | ||||
6049 | Worklist.push_back(OpPhi); | ||||
6050 | } | ||||
6051 | } else if (auto *OpLoad = dyn_cast<LoadInst>(V)) { | ||||
6052 | if (!OpLoad->isSimple()) | ||||
6053 | return false; | ||||
6054 | if (Defs.insert(OpLoad).second) | ||||
6055 | Worklist.push_back(OpLoad); | ||||
6056 | } else if (auto *OpEx = dyn_cast<ExtractElementInst>(V)) { | ||||
6057 | if (Defs.insert(OpEx).second) | ||||
6058 | Worklist.push_back(OpEx); | ||||
6059 | } else if (auto *OpBC = dyn_cast<BitCastInst>(V)) { | ||||
6060 | if (!ConvertTy) | ||||
6061 | ConvertTy = OpBC->getOperand(0)->getType(); | ||||
6062 | if (OpBC->getOperand(0)->getType() != ConvertTy) | ||||
6063 | return false; | ||||
6064 | if (Defs.insert(OpBC).second) { | ||||
6065 | Worklist.push_back(OpBC); | ||||
6066 | AnyAnchored |= !isa<LoadInst>(OpBC->getOperand(0)) && | ||||
6067 | !isa<ExtractElementInst>(OpBC->getOperand(0)); | ||||
6068 | } | ||||
6069 | } else if (!isa<UndefValue>(V)) { | ||||
6070 | return false; | ||||
6071 | } | ||||
6072 | } | ||||
6073 | } | ||||
6074 | |||||
6075 | // Handle uses which might also be phi's | ||||
6076 | for (User *V : II->users()) { | ||||
6077 | if (auto *OpPhi = dyn_cast<PHINode>(V)) { | ||||
6078 | if (!PhiNodes.count(OpPhi)) { | ||||
6079 | if (Visited.count(OpPhi)) | ||||
6080 | return false; | ||||
6081 | PhiNodes.insert(OpPhi); | ||||
6082 | Visited.insert(OpPhi); | ||||
6083 | Worklist.push_back(OpPhi); | ||||
6084 | } | ||||
6085 | } else if (auto *OpStore = dyn_cast<StoreInst>(V)) { | ||||
6086 | if (!OpStore->isSimple() || OpStore->getOperand(0) != II) | ||||
6087 | return false; | ||||
6088 | Uses.insert(OpStore); | ||||
6089 | } else if (auto *OpBC = dyn_cast<BitCastInst>(V)) { | ||||
6090 | if (!ConvertTy) | ||||
6091 | ConvertTy = OpBC->getType(); | ||||
6092 | if (OpBC->getType() != ConvertTy) | ||||
6093 | return false; | ||||
6094 | Uses.insert(OpBC); | ||||
6095 | AnyAnchored |= | ||||
6096 | any_of(OpBC->users(), [](User *U) { return !isa<StoreInst>(U); }); | ||||
6097 | } else { | ||||
6098 | return false; | ||||
6099 | } | ||||
6100 | } | ||||
6101 | } | ||||
6102 | |||||
6103 | if (!ConvertTy || !AnyAnchored || | ||||
6104 | !TLI->shouldConvertPhiType(PhiTy, ConvertTy)) | ||||
6105 | return false; | ||||
6106 | |||||
6107 | LLVM_DEBUG(dbgs() << "Converting " << *I << "\n and connected nodes to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Converting " << * I << "\n and connected nodes to " << *ConvertTy << "\n"; } } while (false) | ||||
6108 | << *ConvertTy << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Converting " << * I << "\n and connected nodes to " << *ConvertTy << "\n"; } } while (false); | ||||
6109 | |||||
6110 | // Create all the new phi nodes of the new type, and bitcast any loads to the | ||||
6111 | // correct type. | ||||
6112 | ValueToValueMap ValMap; | ||||
6113 | ValMap[UndefValue::get(PhiTy)] = UndefValue::get(ConvertTy); | ||||
6114 | for (Instruction *D : Defs) { | ||||
6115 | if (isa<BitCastInst>(D)) { | ||||
6116 | ValMap[D] = D->getOperand(0); | ||||
6117 | DeletedInstrs.insert(D); | ||||
6118 | } else { | ||||
6119 | ValMap[D] = | ||||
6120 | new BitCastInst(D, ConvertTy, D->getName() + ".bc", D->getNextNode()); | ||||
6121 | } | ||||
6122 | } | ||||
6123 | for (PHINode *Phi : PhiNodes) | ||||
6124 | ValMap[Phi] = PHINode::Create(ConvertTy, Phi->getNumIncomingValues(), | ||||
6125 | Phi->getName() + ".tc", Phi); | ||||
6126 | // Pipe together all the PhiNodes. | ||||
6127 | for (PHINode *Phi : PhiNodes) { | ||||
6128 | PHINode *NewPhi = cast<PHINode>(ValMap[Phi]); | ||||
6129 | for (int i = 0, e = Phi->getNumIncomingValues(); i < e; i++) | ||||
6130 | NewPhi->addIncoming(ValMap[Phi->getIncomingValue(i)], | ||||
6131 | Phi->getIncomingBlock(i)); | ||||
6132 | Visited.insert(NewPhi); | ||||
6133 | } | ||||
6134 | // And finally pipe up the stores and bitcasts | ||||
6135 | for (Instruction *U : Uses) { | ||||
6136 | if (isa<BitCastInst>(U)) { | ||||
6137 | DeletedInstrs.insert(U); | ||||
6138 | U->replaceAllUsesWith(ValMap[U->getOperand(0)]); | ||||
6139 | } else { | ||||
6140 | U->setOperand(0, | ||||
6141 | new BitCastInst(ValMap[U->getOperand(0)], PhiTy, "bc", U)); | ||||
6142 | } | ||||
6143 | } | ||||
6144 | |||||
6145 | // Save the removed phis to be deleted later. | ||||
6146 | for (PHINode *Phi : PhiNodes) | ||||
6147 | DeletedInstrs.insert(Phi); | ||||
6148 | return true; | ||||
6149 | } | ||||
6150 | |||||
6151 | bool CodeGenPrepare::optimizePhiTypes(Function &F) { | ||||
6152 | if (!OptimizePhiTypes) | ||||
6153 | return false; | ||||
6154 | |||||
6155 | bool Changed = false; | ||||
6156 | SmallPtrSet<PHINode *, 4> Visited; | ||||
6157 | SmallPtrSet<Instruction *, 4> DeletedInstrs; | ||||
6158 | |||||
6159 | // Attempt to optimize all the phis in the functions to the correct type. | ||||
6160 | for (auto &BB : F) | ||||
6161 | for (auto &Phi : BB.phis()) | ||||
6162 | Changed |= optimizePhiType(&Phi, Visited, DeletedInstrs); | ||||
6163 | |||||
6164 | // Remove any old phi's that have been converted. | ||||
6165 | for (auto *I : DeletedInstrs) { | ||||
6166 | I->replaceAllUsesWith(PoisonValue::get(I->getType())); | ||||
6167 | I->eraseFromParent(); | ||||
6168 | } | ||||
6169 | |||||
6170 | return Changed; | ||||
6171 | } | ||||
6172 | |||||
6173 | /// Return true, if an ext(load) can be formed from an extension in | ||||
6174 | /// \p MovedExts. | ||||
6175 | bool CodeGenPrepare::canFormExtLd( | ||||
6176 | const SmallVectorImpl<Instruction *> &MovedExts, LoadInst *&LI, | ||||
6177 | Instruction *&Inst, bool HasPromoted) { | ||||
6178 | for (auto *MovedExtInst : MovedExts) { | ||||
6179 | if (isa<LoadInst>(MovedExtInst->getOperand(0))) { | ||||
6180 | LI = cast<LoadInst>(MovedExtInst->getOperand(0)); | ||||
6181 | Inst = MovedExtInst; | ||||
6182 | break; | ||||
6183 | } | ||||
6184 | } | ||||
6185 | if (!LI) | ||||
6186 | return false; | ||||
6187 | |||||
6188 | // If they're already in the same block, there's nothing to do. | ||||
6189 | // Make the cheap checks first if we did not promote. | ||||
6190 | // If we promoted, we need to check if it is indeed profitable. | ||||
6191 | if (!HasPromoted && LI->getParent() == Inst->getParent()) | ||||
6192 | return false; | ||||
6193 | |||||
6194 | return TLI->isExtLoad(LI, Inst, *DL); | ||||
6195 | } | ||||
6196 | |||||
6197 | /// Move a zext or sext fed by a load into the same basic block as the load, | ||||
6198 | /// unless conditions are unfavorable. This allows SelectionDAG to fold the | ||||
6199 | /// extend into the load. | ||||
6200 | /// | ||||
6201 | /// E.g., | ||||
6202 | /// \code | ||||
6203 | /// %ld = load i32* %addr | ||||
6204 | /// %add = add nuw i32 %ld, 4 | ||||
6205 | /// %zext = zext i32 %add to i64 | ||||
6206 | // \endcode | ||||
6207 | /// => | ||||
6208 | /// \code | ||||
6209 | /// %ld = load i32* %addr | ||||
6210 | /// %zext = zext i32 %ld to i64 | ||||
6211 | /// %add = add nuw i64 %zext, 4 | ||||
6212 | /// \encode | ||||
6213 | /// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which | ||||
6214 | /// allow us to match zext(load i32*) to i64. | ||||
6215 | /// | ||||
6216 | /// Also, try to promote the computations used to obtain a sign extended | ||||
6217 | /// value used into memory accesses. | ||||
6218 | /// E.g., | ||||
6219 | /// \code | ||||
6220 | /// a = add nsw i32 b, 3 | ||||
6221 | /// d = sext i32 a to i64 | ||||
6222 | /// e = getelementptr ..., i64 d | ||||
6223 | /// \endcode | ||||
6224 | /// => | ||||
6225 | /// \code | ||||
6226 | /// f = sext i32 b to i64 | ||||
6227 | /// a = add nsw i64 f, 3 | ||||
6228 | /// e = getelementptr ..., i64 a | ||||
6229 | /// \endcode | ||||
6230 | /// | ||||
6231 | /// \p Inst[in/out] the extension may be modified during the process if some | ||||
6232 | /// promotions apply. | ||||
6233 | bool CodeGenPrepare::optimizeExt(Instruction *&Inst) { | ||||
6234 | bool AllowPromotionWithoutCommonHeader = false; | ||||
6235 | /// See if it is an interesting sext operations for the address type | ||||
6236 | /// promotion before trying to promote it, e.g., the ones with the right | ||||
6237 | /// type and used in memory accesses. | ||||
6238 | bool ATPConsiderable = TTI->shouldConsiderAddressTypePromotion( | ||||
6239 | *Inst, AllowPromotionWithoutCommonHeader); | ||||
6240 | TypePromotionTransaction TPT(RemovedInsts); | ||||
6241 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | ||||
6242 | TPT.getRestorationPoint(); | ||||
6243 | SmallVector<Instruction *, 1> Exts; | ||||
6244 | SmallVector<Instruction *, 2> SpeculativelyMovedExts; | ||||
6245 | Exts.push_back(Inst); | ||||
6246 | |||||
6247 | bool HasPromoted = tryToPromoteExts(TPT, Exts, SpeculativelyMovedExts); | ||||
6248 | |||||
6249 | // Look for a load being extended. | ||||
6250 | LoadInst *LI = nullptr; | ||||
6251 | Instruction *ExtFedByLoad; | ||||
6252 | |||||
6253 | // Try to promote a chain of computation if it allows to form an extended | ||||
6254 | // load. | ||||
6255 | if (canFormExtLd(SpeculativelyMovedExts, LI, ExtFedByLoad, HasPromoted)) { | ||||
6256 | assert(LI && ExtFedByLoad && "Expect a valid load and extension")(static_cast <bool> (LI && ExtFedByLoad && "Expect a valid load and extension") ? void (0) : __assert_fail ("LI && ExtFedByLoad && \"Expect a valid load and extension\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 6256, __extension__ __PRETTY_FUNCTION__ )); | ||||
6257 | TPT.commit(); | ||||
6258 | // Move the extend into the same block as the load. | ||||
6259 | ExtFedByLoad->moveAfter(LI); | ||||
6260 | ++NumExtsMoved; | ||||
6261 | Inst = ExtFedByLoad; | ||||
6262 | return true; | ||||
6263 | } | ||||
6264 | |||||
6265 | // Continue promoting SExts if known as considerable depending on targets. | ||||
6266 | if (ATPConsiderable && | ||||
6267 | performAddressTypePromotion(Inst, AllowPromotionWithoutCommonHeader, | ||||
6268 | HasPromoted, TPT, SpeculativelyMovedExts)) | ||||
6269 | return true; | ||||
6270 | |||||
6271 | TPT.rollback(LastKnownGood); | ||||
6272 | return false; | ||||
6273 | } | ||||
6274 | |||||
6275 | // Perform address type promotion if doing so is profitable. | ||||
6276 | // If AllowPromotionWithoutCommonHeader == false, we should find other sext | ||||
6277 | // instructions that sign extended the same initial value. However, if | ||||
6278 | // AllowPromotionWithoutCommonHeader == true, we expect promoting the | ||||
6279 | // extension is just profitable. | ||||
6280 | bool CodeGenPrepare::performAddressTypePromotion( | ||||
6281 | Instruction *&Inst, bool AllowPromotionWithoutCommonHeader, | ||||
6282 | bool HasPromoted, TypePromotionTransaction &TPT, | ||||
6283 | SmallVectorImpl<Instruction *> &SpeculativelyMovedExts) { | ||||
6284 | bool Promoted = false; | ||||
6285 | SmallPtrSet<Instruction *, 1> UnhandledExts; | ||||
6286 | bool AllSeenFirst = true; | ||||
6287 | for (auto *I : SpeculativelyMovedExts) { | ||||
6288 | Value *HeadOfChain = I->getOperand(0); | ||||
6289 | DenseMap<Value *, Instruction *>::iterator AlreadySeen = | ||||
6290 | SeenChainsForSExt.find(HeadOfChain); | ||||
6291 | // If there is an unhandled SExt which has the same header, try to promote | ||||
6292 | // it as well. | ||||
6293 | if (AlreadySeen != SeenChainsForSExt.end()) { | ||||
6294 | if (AlreadySeen->second != nullptr) | ||||
6295 | UnhandledExts.insert(AlreadySeen->second); | ||||
6296 | AllSeenFirst = false; | ||||
6297 | } | ||||
6298 | } | ||||
6299 | |||||
6300 | if (!AllSeenFirst || (AllowPromotionWithoutCommonHeader && | ||||
6301 | SpeculativelyMovedExts.size() == 1)) { | ||||
6302 | TPT.commit(); | ||||
6303 | if (HasPromoted) | ||||
6304 | Promoted = true; | ||||
6305 | for (auto *I : SpeculativelyMovedExts) { | ||||
6306 | Value *HeadOfChain = I->getOperand(0); | ||||
6307 | SeenChainsForSExt[HeadOfChain] = nullptr; | ||||
6308 | ValToSExtendedUses[HeadOfChain].push_back(I); | ||||
6309 | } | ||||
6310 | // Update Inst as promotion happen. | ||||
6311 | Inst = SpeculativelyMovedExts.pop_back_val(); | ||||
6312 | } else { | ||||
6313 | // This is the first chain visited from the header, keep the current chain | ||||
6314 | // as unhandled. Defer to promote this until we encounter another SExt | ||||
6315 | // chain derived from the same header. | ||||
6316 | for (auto *I : SpeculativelyMovedExts) { | ||||
6317 | Value *HeadOfChain = I->getOperand(0); | ||||
6318 | SeenChainsForSExt[HeadOfChain] = Inst; | ||||
6319 | } | ||||
6320 | return false; | ||||
6321 | } | ||||
6322 | |||||
6323 | if (!AllSeenFirst && !UnhandledExts.empty()) | ||||
6324 | for (auto *VisitedSExt : UnhandledExts) { | ||||
6325 | if (RemovedInsts.count(VisitedSExt)) | ||||
6326 | continue; | ||||
6327 | TypePromotionTransaction TPT(RemovedInsts); | ||||
6328 | SmallVector<Instruction *, 1> Exts; | ||||
6329 | SmallVector<Instruction *, 2> Chains; | ||||
6330 | Exts.push_back(VisitedSExt); | ||||
6331 | bool HasPromoted = tryToPromoteExts(TPT, Exts, Chains); | ||||
6332 | TPT.commit(); | ||||
6333 | if (HasPromoted) | ||||
6334 | Promoted = true; | ||||
6335 | for (auto *I : Chains) { | ||||
6336 | Value *HeadOfChain = I->getOperand(0); | ||||
6337 | // Mark this as handled. | ||||
6338 | SeenChainsForSExt[HeadOfChain] = nullptr; | ||||
6339 | ValToSExtendedUses[HeadOfChain].push_back(I); | ||||
6340 | } | ||||
6341 | } | ||||
6342 | return Promoted; | ||||
6343 | } | ||||
6344 | |||||
6345 | bool CodeGenPrepare::optimizeExtUses(Instruction *I) { | ||||
6346 | BasicBlock *DefBB = I->getParent(); | ||||
6347 | |||||
6348 | // If the result of a {s|z}ext and its source are both live out, rewrite all | ||||
6349 | // other uses of the source with result of extension. | ||||
6350 | Value *Src = I->getOperand(0); | ||||
6351 | if (Src->hasOneUse()) | ||||
6352 | return false; | ||||
6353 | |||||
6354 | // Only do this xform if truncating is free. | ||||
6355 | if (!TLI->isTruncateFree(I->getType(), Src->getType())) | ||||
6356 | return false; | ||||
6357 | |||||
6358 | // Only safe to perform the optimization if the source is also defined in | ||||
6359 | // this block. | ||||
6360 | if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) | ||||
6361 | return false; | ||||
6362 | |||||
6363 | bool DefIsLiveOut = false; | ||||
6364 | for (User *U : I->users()) { | ||||
6365 | Instruction *UI = cast<Instruction>(U); | ||||
6366 | |||||
6367 | // Figure out which BB this ext is used in. | ||||
6368 | BasicBlock *UserBB = UI->getParent(); | ||||
6369 | if (UserBB == DefBB) | ||||
6370 | continue; | ||||
6371 | DefIsLiveOut = true; | ||||
6372 | break; | ||||
6373 | } | ||||
6374 | if (!DefIsLiveOut) | ||||
6375 | return false; | ||||
6376 | |||||
6377 | // Make sure none of the uses are PHI nodes. | ||||
6378 | for (User *U : Src->users()) { | ||||
6379 | Instruction *UI = cast<Instruction>(U); | ||||
6380 | BasicBlock *UserBB = UI->getParent(); | ||||
6381 | if (UserBB == DefBB) | ||||
6382 | continue; | ||||
6383 | // Be conservative. We don't want this xform to end up introducing | ||||
6384 | // reloads just before load / store instructions. | ||||
6385 | if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI)) | ||||
6386 | return false; | ||||
6387 | } | ||||
6388 | |||||
6389 | // InsertedTruncs - Only insert one trunc in each block once. | ||||
6390 | DenseMap<BasicBlock *, Instruction *> InsertedTruncs; | ||||
6391 | |||||
6392 | bool MadeChange = false; | ||||
6393 | for (Use &U : Src->uses()) { | ||||
6394 | Instruction *User = cast<Instruction>(U.getUser()); | ||||
6395 | |||||
6396 | // Figure out which BB this ext is used in. | ||||
6397 | BasicBlock *UserBB = User->getParent(); | ||||
6398 | if (UserBB == DefBB) | ||||
6399 | continue; | ||||
6400 | |||||
6401 | // Both src and def are live in this block. Rewrite the use. | ||||
6402 | Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; | ||||
6403 | |||||
6404 | if (!InsertedTrunc) { | ||||
6405 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); | ||||
6406 | assert(InsertPt != UserBB->end())(static_cast <bool> (InsertPt != UserBB->end()) ? void (0) : __assert_fail ("InsertPt != UserBB->end()", "llvm/lib/CodeGen/CodeGenPrepare.cpp" , 6406, __extension__ __PRETTY_FUNCTION__)); | ||||
6407 | InsertedTrunc = new TruncInst(I, Src->getType(), "", &*InsertPt); | ||||
6408 | InsertedInsts.insert(InsertedTrunc); | ||||
6409 | } | ||||
6410 | |||||
6411 | // Replace a use of the {s|z}ext source with a use of the result. | ||||
6412 | U = InsertedTrunc; | ||||
6413 | ++NumExtUses; | ||||
6414 | MadeChange = true; | ||||
6415 | } | ||||
6416 | |||||
6417 | return MadeChange; | ||||
6418 | } | ||||
6419 | |||||
6420 | // Find loads whose uses only use some of the loaded value's bits. Add an "and" | ||||
6421 | // just after the load if the target can fold this into one extload instruction, | ||||
6422 | // with the hope of eliminating some of the other later "and" instructions using | ||||
6423 | // the loaded value. "and"s that are made trivially redundant by the insertion | ||||
6424 | // of the new "and" are removed by this function, while others (e.g. those whose | ||||
6425 | // path from the load goes through a phi) are left for isel to potentially | ||||
6426 | // remove. | ||||
6427 | // | ||||
6428 | // For example: | ||||
6429 | // | ||||
6430 | // b0: | ||||
6431 | // x = load i32 | ||||
6432 | // ... | ||||
6433 | // b1: | ||||
6434 | // y = and x, 0xff | ||||
6435 | // z = use y | ||||
6436 | // | ||||
6437 | // becomes: | ||||
6438 | // | ||||
6439 | // b0: | ||||
6440 | // x = load i32 | ||||
6441 | // x' = and x, 0xff | ||||
6442 | // ... | ||||
6443 | // b1: | ||||
6444 | // z = use x' | ||||
6445 | // | ||||
6446 | // whereas: | ||||
6447 | // | ||||
6448 | // b0: | ||||
6449 | // x1 = load i32 | ||||
6450 | // ... | ||||
6451 | // b1: | ||||
6452 | // x2 = load i32 | ||||
6453 | // ... | ||||
6454 | // b2: | ||||
6455 | // x = phi x1, x2 | ||||
6456 | // y = and x, 0xff | ||||
6457 | // | ||||
6458 | // becomes (after a call to optimizeLoadExt for each load): | ||||
6459 | // | ||||
6460 | // b0: | ||||
6461 | // x1 = load i32 | ||||
6462 | // x1' = and x1, 0xff | ||||
6463 | // ... | ||||
6464 | // b1: | ||||
6465 | // x2 = load i32 | ||||
6466 | // x2' = and x2, 0xff | ||||
6467 | // ... | ||||
6468 | // b2: | ||||
6469 | // x = phi x1', x2' | ||||
6470 | // y = and x, 0xff | ||||
6471 | bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) { | ||||
6472 | if (!Load->isSimple() || !Load->getType()->isIntOrPtrTy()) | ||||
6473 | return false; | ||||
6474 | |||||
6475 | // Skip loads we've already transformed. | ||||
6476 | if (Load->hasOneUse() && | ||||
6477 | InsertedInsts.count(cast<Instruction>(*Load->user_begin()))) | ||||
6478 | return false; | ||||
6479 | |||||
6480 | // Look at all uses of Load, looking through phis, to determine how many bits | ||||
6481 | // of the loaded value are needed. | ||||
6482 | SmallVector<Instruction *, 8> WorkList; | ||||
6483 | SmallPtrSet<Instruction *, 16> Visited; | ||||
6484 | SmallVector<Instruction *, 8> AndsToMaybeRemove; | ||||
6485 | for (auto *U : Load->users()) | ||||
6486 | WorkList.push_back(cast<Instruction>(U)); | ||||
6487 | |||||
6488 | EVT LoadResultVT = TLI->getValueType(*DL, Load->getType()); | ||||
6489 | unsigned BitWidth = LoadResultVT.getSizeInBits(); | ||||
6490 | // If the BitWidth is 0, do not try to optimize the type | ||||
6491 | if (BitWidth == 0) | ||||
6492 | return false; | ||||
6493 | |||||
6494 | APInt DemandBits(BitWidth, 0); | ||||
6495 | APInt WidestAndBits(BitWidth, 0); | ||||
6496 | |||||
6497 | while (!WorkList.empty()) { | ||||
6498 | Instruction *I = WorkList.pop_back_val(); | ||||
6499 | |||||
6500 | // Break use-def graph loops. | ||||
6501 | if (!Visited.insert(I).second) | ||||
6502 | continue; | ||||
6503 | |||||
6504 | // For a PHI node, push all of its users. | ||||
6505 | if (auto *Phi = dyn_cast<PHINode>(I)) { | ||||
6506 | for (auto *U : Phi->users()) | ||||
6507 | WorkList.push_back(cast<Instruction>(U)); | ||||
6508 | continue; | ||||
6509 | } | ||||
6510 | |||||
6511 | switch (I->getOpcode()) { | ||||
6512 | case Instruction::And: { | ||||
6513 | auto *AndC = dyn_cast<ConstantInt>(I->getOperand(1)); | ||||
6514 | if (!AndC) | ||||
6515 | return false; | ||||
6516 | APInt AndBits = AndC->getValue(); | ||||
6517 | DemandBits |= AndBits; | ||||
6518 | // Keep track of the widest and mask we see. | ||||
6519 | if (AndBits.ugt(WidestAndBits)) | ||||
6520 | WidestAndBits = AndBits; | ||||
6521 | if (AndBits == WidestAndBits && I->getOperand(0) == Load) | ||||
6522 | AndsToMaybeRemove.push_back(I); | ||||
6523 | break; | ||||
6524 | } | ||||
6525 | |||||
6526 | case Instruction::Shl: { | ||||
6527 | auto *ShlC = dyn_cast<ConstantInt>(I->getOperand(1)); | ||||
6528 | if (!ShlC) | ||||
6529 | return false; | ||||
6530 | uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1); | ||||
6531 | DemandBits.setLowBits(BitWidth - ShiftAmt); | ||||
6532 | break; | ||||
6533 | } | ||||
6534 | |||||
6535 | case Instruction::Trunc: { | ||||
6536 | EVT TruncVT = TLI->getValueType(*DL, I->getType()); | ||||
6537 | unsigned TruncBitWidth = TruncVT.getSizeInBits(); | ||||
6538 | DemandBits.setLowBits(TruncBitWidth); | ||||
6539 | break; | ||||
6540 | } | ||||
6541 | |||||
6542 | default: | ||||
6543 | return false; | ||||
6544 | } | ||||
6545 | } | ||||
6546 | |||||
6547 | uint32_t ActiveBits = DemandBits.getActiveBits(); | ||||
6548 | // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the | ||||
6549 | // target even if isLoadExtLegal says an i1 EXTLOAD is valid. For example, | ||||
6550 | // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but | ||||
6551 | // (and (load x) 1) is not matched as a single instruction, rather as a LDR | ||||
6552 | // followed by an AND. | ||||
6553 | // TODO: Look into removing this restriction by fixing backends to either | ||||
6554 | // return false for isLoadExtLegal for i1 or have them select this pattern to | ||||
6555 | // a single instruction. | ||||
6556 | // | ||||
6557 | // Also avoid hoisting if we didn't see any ands with the exact DemandBits | ||||
6558 | // mask, since these are the only ands that will be removed by isel. | ||||
6559 | if (ActiveBits <= 1 || !DemandBits.isMask(ActiveBits) || | ||||
6560 | WidestAndBits != DemandBits) | ||||
6561 | return false; | ||||
6562 | |||||
6563 | LLVMContext &Ctx = Load->getType()->getContext(); | ||||
6564 | Type *TruncTy = Type::getIntNTy(Ctx, ActiveBits); | ||||
6565 | EVT TruncVT = TLI->getValueType(*DL, TruncTy); | ||||
6566 | |||||
6567 | // Reject cases that won't be matched as extloads. | ||||
6568 | if (!LoadResultVT.bitsGT(TruncVT) || !TruncVT.isRound() || | ||||
6569 | !TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT)) | ||||
6570 | return false; | ||||
6571 | |||||
6572 | IRBuilder<> Builder(Load->getNextNode()); | ||||
6573 | auto *NewAnd = cast<Instruction>( | ||||
6574 | Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits))); | ||||
6575 | // Mark this instruction as "inserted by CGP", so that other | ||||
6576 | // optimizations don't touch it. | ||||
6577 | InsertedInsts.insert(NewAnd); | ||||
6578 | |||||
6579 | // Replace all uses of load with new and (except for the use of load in the | ||||
6580 | // new and itself). | ||||
6581 | Load->replaceAllUsesWith(NewAnd); | ||||
6582 | NewAnd->setOperand(0, Load); | ||||
6583 | |||||
6584 | // Remove any and instructions that are now redundant. | ||||
6585 | for (auto *And : AndsToMaybeRemove) | ||||
6586 | // Check that the and mask is the same as the one we decided to put on the | ||||
6587 | // new and. | ||||
6588 | if (cast<ConstantInt>(And->getOperand(1))->getValue() == DemandBits) { | ||||
6589 | And->replaceAllUsesWith(NewAnd); | ||||
6590 | if (&*CurInstIterator == And) | ||||
6591 | CurInstIterator = std::next(And->getIterator()); | ||||
6592 | And->eraseFromParent(); | ||||
6593 | ++NumAndUses; | ||||
6594 | } | ||||
6595 | |||||
6596 | ++NumAndsAdded; | ||||
6597 | return true; | ||||
6598 | } | ||||
6599 | |||||
6600 | /// Check if V (an operand of a select instruction) is an expensive instruction | ||||
6601 | /// that is only used once. | ||||
6602 | static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V) { | ||||
6603 | auto *I = dyn_cast<Instruction>(V); | ||||
6604 | // If it's safe to speculatively execute, then it should not have side | ||||
6605 | // effects; therefore, it's safe to sink and possibly *not* execute. | ||||
6606 | return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) && | ||||
6607 | TTI->isExpensiveToSpeculativelyExecute(I); | ||||
6608 | } | ||||
6609 | |||||
6610 | /// Returns true if a SelectInst should be turned into an explicit branch. | ||||
6611 | static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI, | ||||
6612 | const TargetLowering *TLI, | ||||
6613 | SelectInst *SI) { | ||||
6614 | // If even a predictable select is cheap, then a branch can't be cheaper. | ||||
6615 | if (!TLI->isPredictableSelectExpensive()) | ||||
6616 | return false; | ||||
6617 | |||||
6618 | // FIXME: This should use the same heuristics as IfConversion to determine | ||||
6619 | // whether a select is better represented as a branch. | ||||
6620 | |||||
6621 | // If metadata tells us that the select condition is obviously predictable, | ||||
6622 | // then we want to replace the select with a branch. | ||||
6623 | uint64_t TrueWeight, FalseWeight; | ||||
6624 | if (extractBranchWeights(*SI, TrueWeight, FalseWeight)) { | ||||
6625 | uint64_t Max = std::max(TrueWeight, FalseWeight); | ||||
6626 | uint64_t Sum = TrueWeight + FalseWeight; | ||||
6627 | if (Sum != 0) { | ||||
6628 | auto Probability = BranchProbability::getBranchProbability(Max, Sum); | ||||
6629 | if (Probability > TTI->getPredictableBranchThreshold()) | ||||
6630 | return true; | ||||
6631 | } | ||||
6632 | } | ||||
6633 | |||||
6634 | CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); | ||||
6635 | |||||
6636 | // If a branch is predictable, an out-of-order CPU can avoid blocking on its | ||||
6637 | // comparison condition. If the compare has more than one use, there's | ||||
6638 | // probably another cmov or setcc around, so it's not worth emitting a branch. | ||||
6639 | if (!Cmp || !Cmp->hasOneUse()) | ||||
6640 | return false; | ||||
6641 | |||||
6642 | // If either operand of the select is expensive and only needed on one side | ||||
6643 | // of the select, we should form a branch. | ||||
6644 | if (sinkSelectOperand(TTI, SI->getTrueValue()) || | ||||
6645 | sinkSelectOperand(TTI, SI->getFalseValue())) | ||||
6646 | return true; | ||||
6647 | |||||
6648 | return false; | ||||
6649 | } | ||||
6650 | |||||
6651 | /// If \p isTrue is true, return the true value of \p SI, otherwise return | ||||
6652 | /// false value of \p SI. If the true/false value of \p SI is defined by any | ||||
6653 | /// select instructions in \p Selects, look through the defining select | ||||
6654 | /// instruction until the true/false value is not defined in \p Selects. | ||||
6655 | static Value * | ||||
6656 | getTrueOrFalseValue(SelectInst *SI, bool isTrue, | ||||
6657 | const SmallPtrSet<const Instruction *, 2> &Selects) { | ||||
6658 | Value *V = nullptr; | ||||
6659 | |||||
6660 | for (SelectInst *DefSI = SI; DefSI != nullptr && Selects.count(DefSI); | ||||
6661 | DefSI = dyn_cast<SelectInst>(V)) { | ||||
6662 | assert(DefSI->getCondition() == SI->getCondition() &&(static_cast <bool> (DefSI->getCondition() == SI-> getCondition() && "The condition of DefSI does not match with SI" ) ? void (0) : __assert_fail ("DefSI->getCondition() == SI->getCondition() && \"The condition of DefSI does not match with SI\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 6663, __extension__ __PRETTY_FUNCTION__ )) | ||||
6663 | "The condition of DefSI does not match with SI")(static_cast <bool> (DefSI->getCondition() == SI-> getCondition() && "The condition of DefSI does not match with SI" ) ? void (0) : __assert_fail ("DefSI->getCondition() == SI->getCondition() && \"The condition of DefSI does not match with SI\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 6663, __extension__ __PRETTY_FUNCTION__ )); | ||||
6664 | V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue()); | ||||
6665 | } | ||||
6666 | |||||
6667 | assert(V && "Failed to get select true/false value")(static_cast <bool> (V && "Failed to get select true/false value" ) ? void (0) : __assert_fail ("V && \"Failed to get select true/false value\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 6667, __extension__ __PRETTY_FUNCTION__ )); | ||||
6668 | return V; | ||||
6669 | } | ||||
6670 | |||||
6671 | bool CodeGenPrepare::optimizeShiftInst(BinaryOperator *Shift) { | ||||
6672 | assert(Shift->isShift() && "Expected a shift")(static_cast <bool> (Shift->isShift() && "Expected a shift" ) ? void (0) : __assert_fail ("Shift->isShift() && \"Expected a shift\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 6672, __extension__ __PRETTY_FUNCTION__ )); | ||||
6673 | |||||
6674 | // If this is (1) a vector shift, (2) shifts by scalars are cheaper than | ||||
6675 | // general vector shifts, and (3) the shift amount is a select-of-splatted | ||||
6676 | // values, hoist the shifts before the select: | ||||
6677 | // shift Op0, (select Cond, TVal, FVal) --> | ||||
6678 | // select Cond, (shift Op0, TVal), (shift Op0, FVal) | ||||
6679 | // | ||||
6680 | // This is inverting a generic IR transform when we know that the cost of a | ||||
6681 | // general vector shift is more than the cost of 2 shift-by-scalars. | ||||
6682 | // We can't do this effectively in SDAG because we may not be able to | ||||
6683 | // determine if the select operands are splats from within a basic block. | ||||
6684 | Type *Ty = Shift->getType(); | ||||
6685 | if (!Ty->isVectorTy() || !TLI->isVectorShiftByScalarCheap(Ty)) | ||||
6686 | return false; | ||||
6687 | Value *Cond, *TVal, *FVal; | ||||
6688 | if (!match(Shift->getOperand(1), | ||||
6689 | m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal))))) | ||||
6690 | return false; | ||||
6691 | if (!isSplatValue(TVal) || !isSplatValue(FVal)) | ||||
6692 | return false; | ||||
6693 | |||||
6694 | IRBuilder<> Builder(Shift); | ||||
6695 | BinaryOperator::BinaryOps Opcode = Shift->getOpcode(); | ||||
6696 | Value *NewTVal = Builder.CreateBinOp(Opcode, Shift->getOperand(0), TVal); | ||||
6697 | Value *NewFVal = Builder.CreateBinOp(Opcode, Shift->getOperand(0), FVal); | ||||
6698 | Value *NewSel = Builder.CreateSelect(Cond, NewTVal, NewFVal); | ||||
6699 | Shift->replaceAllUsesWith(NewSel); | ||||
6700 | Shift->eraseFromParent(); | ||||
6701 | return true; | ||||
6702 | } | ||||
6703 | |||||
6704 | bool CodeGenPrepare::optimizeFunnelShift(IntrinsicInst *Fsh) { | ||||
6705 | Intrinsic::ID Opcode = Fsh->getIntrinsicID(); | ||||
6706 | assert((Opcode == Intrinsic::fshl || Opcode == Intrinsic::fshr) &&(static_cast <bool> ((Opcode == Intrinsic::fshl || Opcode == Intrinsic::fshr) && "Expected a funnel shift") ? void (0) : __assert_fail ("(Opcode == Intrinsic::fshl || Opcode == Intrinsic::fshr) && \"Expected a funnel shift\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 6707, __extension__ __PRETTY_FUNCTION__ )) | ||||
6707 | "Expected a funnel shift")(static_cast <bool> ((Opcode == Intrinsic::fshl || Opcode == Intrinsic::fshr) && "Expected a funnel shift") ? void (0) : __assert_fail ("(Opcode == Intrinsic::fshl || Opcode == Intrinsic::fshr) && \"Expected a funnel shift\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 6707, __extension__ __PRETTY_FUNCTION__ )); | ||||
6708 | |||||
6709 | // If this is (1) a vector funnel shift, (2) shifts by scalars are cheaper | ||||
6710 | // than general vector shifts, and (3) the shift amount is select-of-splatted | ||||
6711 | // values, hoist the funnel shifts before the select: | ||||
6712 | // fsh Op0, Op1, (select Cond, TVal, FVal) --> | ||||
6713 | // select Cond, (fsh Op0, Op1, TVal), (fsh Op0, Op1, FVal) | ||||
6714 | // | ||||
6715 | // This is inverting a generic IR transform when we know that the cost of a | ||||
6716 | // general vector shift is more than the cost of 2 shift-by-scalars. | ||||
6717 | // We can't do this effectively in SDAG because we may not be able to | ||||
6718 | // determine if the select operands are splats from within a basic block. | ||||
6719 | Type *Ty = Fsh->getType(); | ||||
6720 | if (!Ty->isVectorTy() || !TLI->isVectorShiftByScalarCheap(Ty)) | ||||
6721 | return false; | ||||
6722 | Value *Cond, *TVal, *FVal; | ||||
6723 | if (!match(Fsh->getOperand(2), | ||||
6724 | m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal))))) | ||||
6725 | return false; | ||||
6726 | if (!isSplatValue(TVal) || !isSplatValue(FVal)) | ||||
6727 | return false; | ||||
6728 | |||||
6729 | IRBuilder<> Builder(Fsh); | ||||
6730 | Value *X = Fsh->getOperand(0), *Y = Fsh->getOperand(1); | ||||
6731 | Value *NewTVal = Builder.CreateIntrinsic(Opcode, Ty, {X, Y, TVal}); | ||||
6732 | Value *NewFVal = Builder.CreateIntrinsic(Opcode, Ty, {X, Y, FVal}); | ||||
6733 | Value *NewSel = Builder.CreateSelect(Cond, NewTVal, NewFVal); | ||||
6734 | Fsh->replaceAllUsesWith(NewSel); | ||||
6735 | Fsh->eraseFromParent(); | ||||
6736 | return true; | ||||
6737 | } | ||||
6738 | |||||
6739 | /// If we have a SelectInst that will likely profit from branch prediction, | ||||
6740 | /// turn it into a branch. | ||||
6741 | bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) { | ||||
6742 | if (DisableSelectToBranch) | ||||
6743 | return false; | ||||
6744 | |||||
6745 | // If the SelectOptimize pass is enabled, selects have already been optimized. | ||||
6746 | if (!getCGPassBuilderOption().DisableSelectOptimize) | ||||
6747 | return false; | ||||
6748 | |||||
6749 | // Find all consecutive select instructions that share the same condition. | ||||
6750 | SmallVector<SelectInst *, 2> ASI; | ||||
6751 | ASI.push_back(SI); | ||||
6752 | for (BasicBlock::iterator It = ++BasicBlock::iterator(SI); | ||||
6753 | It != SI->getParent()->end(); ++It) { | ||||
6754 | SelectInst *I = dyn_cast<SelectInst>(&*It); | ||||
6755 | if (I && SI->getCondition() == I->getCondition()) { | ||||
6756 | ASI.push_back(I); | ||||
6757 | } else { | ||||
6758 | break; | ||||
6759 | } | ||||
6760 | } | ||||
6761 | |||||
6762 | SelectInst *LastSI = ASI.back(); | ||||
6763 | // Increment the current iterator to skip all the rest of select instructions | ||||
6764 | // because they will be either "not lowered" or "all lowered" to branch. | ||||
6765 | CurInstIterator = std::next(LastSI->getIterator()); | ||||
6766 | |||||
6767 | bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1); | ||||
6768 | |||||
6769 | // Can we convert the 'select' to CF ? | ||||
6770 | if (VectorCond || SI->getMetadata(LLVMContext::MD_unpredictable)) | ||||
6771 | return false; | ||||
6772 | |||||
6773 | TargetLowering::SelectSupportKind SelectKind; | ||||
6774 | if (VectorCond) | ||||
6775 | SelectKind = TargetLowering::VectorMaskSelect; | ||||
6776 | else if (SI->getType()->isVectorTy()) | ||||
6777 | SelectKind = TargetLowering::ScalarCondVectorVal; | ||||
6778 | else | ||||
6779 | SelectKind = TargetLowering::ScalarValSelect; | ||||
6780 | |||||
6781 | if (TLI->isSelectSupported(SelectKind) && | ||||
6782 | (!isFormingBranchFromSelectProfitable(TTI, TLI, SI) || OptSize || | ||||
6783 | llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI.get()))) | ||||
6784 | return false; | ||||
6785 | |||||
6786 | // The DominatorTree needs to be rebuilt by any consumers after this | ||||
6787 | // transformation. We simply reset here rather than setting the ModifiedDT | ||||
6788 | // flag to avoid restarting the function walk in runOnFunction for each | ||||
6789 | // select optimized. | ||||
6790 | DT.reset(); | ||||
6791 | |||||
6792 | // Transform a sequence like this: | ||||
6793 | // start: | ||||
6794 | // %cmp = cmp uge i32 %a, %b | ||||
6795 | // %sel = select i1 %cmp, i32 %c, i32 %d | ||||
6796 | // | ||||
6797 | // Into: | ||||
6798 | // start: | ||||
6799 | // %cmp = cmp uge i32 %a, %b | ||||
6800 | // %cmp.frozen = freeze %cmp | ||||
6801 | // br i1 %cmp.frozen, label %select.true, label %select.false | ||||
6802 | // select.true: | ||||
6803 | // br label %select.end | ||||
6804 | // select.false: | ||||
6805 | // br label %select.end | ||||
6806 | // select.end: | ||||
6807 | // %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ] | ||||
6808 | // | ||||
6809 | // %cmp should be frozen, otherwise it may introduce undefined behavior. | ||||
6810 | // In addition, we may sink instructions that produce %c or %d from | ||||
6811 | // the entry block into the destination(s) of the new branch. | ||||
6812 | // If the true or false blocks do not contain a sunken instruction, that | ||||
6813 | // block and its branch may be optimized away. In that case, one side of the | ||||
6814 | // first branch will point directly to select.end, and the corresponding PHI | ||||
6815 | // predecessor block will be the start block. | ||||
6816 | |||||
6817 | // First, we split the block containing the select into 2 blocks. | ||||
6818 | BasicBlock *StartBlock = SI->getParent(); | ||||
6819 | BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(LastSI)); | ||||
6820 | BasicBlock *EndBlock = StartBlock->splitBasicBlock(SplitPt, "select.end"); | ||||
6821 | BFI->setBlockFreq(EndBlock, BFI->getBlockFreq(StartBlock).getFrequency()); | ||||
6822 | |||||
6823 | // Delete the unconditional branch that was just created by the split. | ||||
6824 | StartBlock->getTerminator()->eraseFromParent(); | ||||
6825 | |||||
6826 | // These are the new basic blocks for the conditional branch. | ||||
6827 | // At least one will become an actual new basic block. | ||||
6828 | BasicBlock *TrueBlock = nullptr; | ||||
6829 | BasicBlock *FalseBlock = nullptr; | ||||
6830 | BranchInst *TrueBranch = nullptr; | ||||
6831 | BranchInst *FalseBranch = nullptr; | ||||
6832 | |||||
6833 | // Sink expensive instructions into the conditional blocks to avoid executing | ||||
6834 | // them speculatively. | ||||
6835 | for (SelectInst *SI : ASI) { | ||||
6836 | if (sinkSelectOperand(TTI, SI->getTrueValue())) { | ||||
6837 | if (TrueBlock == nullptr) { | ||||
6838 | TrueBlock = BasicBlock::Create(SI->getContext(), "select.true.sink", | ||||
6839 | EndBlock->getParent(), EndBlock); | ||||
6840 | TrueBranch = BranchInst::Create(EndBlock, TrueBlock); | ||||
6841 | TrueBranch->setDebugLoc(SI->getDebugLoc()); | ||||
6842 | } | ||||
6843 | auto *TrueInst = cast<Instruction>(SI->getTrueValue()); | ||||
6844 | TrueInst->moveBefore(TrueBranch); | ||||
6845 | } | ||||
6846 | if (sinkSelectOperand(TTI, SI->getFalseValue())) { | ||||
6847 | if (FalseBlock == nullptr) { | ||||
6848 | FalseBlock = BasicBlock::Create(SI->getContext(), "select.false.sink", | ||||
6849 | EndBlock->getParent(), EndBlock); | ||||
6850 | FalseBranch = BranchInst::Create(EndBlock, FalseBlock); | ||||
6851 | FalseBranch->setDebugLoc(SI->getDebugLoc()); | ||||
6852 | } | ||||
6853 | auto *FalseInst = cast<Instruction>(SI->getFalseValue()); | ||||
6854 | FalseInst->moveBefore(FalseBranch); | ||||
6855 | } | ||||
6856 | } | ||||
6857 | |||||
6858 | // If there was nothing to sink, then arbitrarily choose the 'false' side | ||||
6859 | // for a new input value to the PHI. | ||||
6860 | if (TrueBlock == FalseBlock) { | ||||
6861 | assert(TrueBlock == nullptr &&(static_cast <bool> (TrueBlock == nullptr && "Unexpected basic block transform while optimizing select" ) ? void (0) : __assert_fail ("TrueBlock == nullptr && \"Unexpected basic block transform while optimizing select\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 6862, __extension__ __PRETTY_FUNCTION__ )) | ||||
6862 | "Unexpected basic block transform while optimizing select")(static_cast <bool> (TrueBlock == nullptr && "Unexpected basic block transform while optimizing select" ) ? void (0) : __assert_fail ("TrueBlock == nullptr && \"Unexpected basic block transform while optimizing select\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 6862, __extension__ __PRETTY_FUNCTION__ )); | ||||
6863 | |||||
6864 | FalseBlock = BasicBlock::Create(SI->getContext(), "select.false", | ||||
6865 | EndBlock->getParent(), EndBlock); | ||||
6866 | auto *FalseBranch = BranchInst::Create(EndBlock, FalseBlock); | ||||
6867 | FalseBranch->setDebugLoc(SI->getDebugLoc()); | ||||
6868 | } | ||||
6869 | |||||
6870 | // Insert the real conditional branch based on the original condition. | ||||
6871 | // If we did not create a new block for one of the 'true' or 'false' paths | ||||
6872 | // of the condition, it means that side of the branch goes to the end block | ||||
6873 | // directly and the path originates from the start block from the point of | ||||
6874 | // view of the new PHI. | ||||
6875 | BasicBlock *TT, *FT; | ||||
6876 | if (TrueBlock == nullptr) { | ||||
6877 | TT = EndBlock; | ||||
6878 | FT = FalseBlock; | ||||
6879 | TrueBlock = StartBlock; | ||||
6880 | } else if (FalseBlock == nullptr) { | ||||
6881 | TT = TrueBlock; | ||||
6882 | FT = EndBlock; | ||||
6883 | FalseBlock = StartBlock; | ||||
6884 | } else { | ||||
6885 | TT = TrueBlock; | ||||
6886 | FT = FalseBlock; | ||||
6887 | } | ||||
6888 | IRBuilder<> IB(SI); | ||||
6889 | auto *CondFr = IB.CreateFreeze(SI->getCondition(), SI->getName() + ".frozen"); | ||||
6890 | IB.CreateCondBr(CondFr, TT, FT, SI); | ||||
6891 | |||||
6892 | SmallPtrSet<const Instruction *, 2> INS; | ||||
6893 | INS.insert(ASI.begin(), ASI.end()); | ||||
6894 | // Use reverse iterator because later select may use the value of the | ||||
6895 | // earlier select, and we need to propagate value through earlier select | ||||
6896 | // to get the PHI operand. | ||||
6897 | for (SelectInst *SI : llvm::reverse(ASI)) { | ||||
6898 | // The select itself is replaced with a PHI Node. | ||||
6899 | PHINode *PN = PHINode::Create(SI->getType(), 2, "", &EndBlock->front()); | ||||
6900 | PN->takeName(SI); | ||||
6901 | PN->addIncoming(getTrueOrFalseValue(SI, true, INS), TrueBlock); | ||||
6902 | PN->addIncoming(getTrueOrFalseValue(SI, false, INS), FalseBlock); | ||||
6903 | PN->setDebugLoc(SI->getDebugLoc()); | ||||
6904 | |||||
6905 | SI->replaceAllUsesWith(PN); | ||||
6906 | SI->eraseFromParent(); | ||||
6907 | INS.erase(SI); | ||||
6908 | ++NumSelectsExpanded; | ||||
6909 | } | ||||
6910 | |||||
6911 | // Instruct OptimizeBlock to skip to the next block. | ||||
6912 | CurInstIterator = StartBlock->end(); | ||||
6913 | return true; | ||||
6914 | } | ||||
6915 | |||||
6916 | /// Some targets only accept certain types for splat inputs. For example a VDUP | ||||
6917 | /// in MVE takes a GPR (integer) register, and the instruction that incorporate | ||||
6918 | /// a VDUP (such as a VADD qd, qm, rm) also require a gpr register. | ||||
6919 | bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) { | ||||
6920 | // Accept shuf(insertelem(undef/poison, val, 0), undef/poison, <0,0,..>) only | ||||
6921 | if (!match(SVI, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()), | ||||
6922 | m_Undef(), m_ZeroMask()))) | ||||
6923 | return false; | ||||
6924 | Type *NewType = TLI->shouldConvertSplatType(SVI); | ||||
6925 | if (!NewType) | ||||
6926 | return false; | ||||
6927 | |||||
6928 | auto *SVIVecType = cast<FixedVectorType>(SVI->getType()); | ||||
6929 | assert(!NewType->isVectorTy() && "Expected a scalar type!")(static_cast <bool> (!NewType->isVectorTy() && "Expected a scalar type!") ? void (0) : __assert_fail ("!NewType->isVectorTy() && \"Expected a scalar type!\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 6929, __extension__ __PRETTY_FUNCTION__ )); | ||||
6930 | assert(NewType->getScalarSizeInBits() == SVIVecType->getScalarSizeInBits() &&(static_cast <bool> (NewType->getScalarSizeInBits() == SVIVecType->getScalarSizeInBits() && "Expected a type of the same size!" ) ? void (0) : __assert_fail ("NewType->getScalarSizeInBits() == SVIVecType->getScalarSizeInBits() && \"Expected a type of the same size!\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 6931, __extension__ __PRETTY_FUNCTION__ )) | ||||
6931 | "Expected a type of the same size!")(static_cast <bool> (NewType->getScalarSizeInBits() == SVIVecType->getScalarSizeInBits() && "Expected a type of the same size!" ) ? void (0) : __assert_fail ("NewType->getScalarSizeInBits() == SVIVecType->getScalarSizeInBits() && \"Expected a type of the same size!\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 6931, __extension__ __PRETTY_FUNCTION__ )); | ||||
6932 | auto *NewVecType = | ||||
6933 | FixedVectorType::get(NewType, SVIVecType->getNumElements()); | ||||
6934 | |||||
6935 | // Create a bitcast (shuffle (insert (bitcast(..)))) | ||||
6936 | IRBuilder<> Builder(SVI->getContext()); | ||||
6937 | Builder.SetInsertPoint(SVI); | ||||
6938 | Value *BC1 = Builder.CreateBitCast( | ||||
6939 | cast<Instruction>(SVI->getOperand(0))->getOperand(1), NewType); | ||||
6940 | Value *Shuffle = Builder.CreateVectorSplat(NewVecType->getNumElements(), BC1); | ||||
6941 | Value *BC2 = Builder.CreateBitCast(Shuffle, SVIVecType); | ||||
6942 | |||||
6943 | SVI->replaceAllUsesWith(BC2); | ||||
6944 | RecursivelyDeleteTriviallyDeadInstructions( | ||||
6945 | SVI, TLInfo, nullptr, | ||||
6946 | [&](Value *V) { removeAllAssertingVHReferences(V); }); | ||||
6947 | |||||
6948 | // Also hoist the bitcast up to its operand if it they are not in the same | ||||
6949 | // block. | ||||
6950 | if (auto *BCI = dyn_cast<Instruction>(BC1)) | ||||
6951 | if (auto *Op = dyn_cast<Instruction>(BCI->getOperand(0))) | ||||
6952 | if (BCI->getParent() != Op->getParent() && !isa<PHINode>(Op) && | ||||
6953 | !Op->isTerminator() && !Op->isEHPad()) | ||||
6954 | BCI->moveAfter(Op); | ||||
6955 | |||||
6956 | return true; | ||||
6957 | } | ||||
6958 | |||||
6959 | bool CodeGenPrepare::tryToSinkFreeOperands(Instruction *I) { | ||||
6960 | // If the operands of I can be folded into a target instruction together with | ||||
6961 | // I, duplicate and sink them. | ||||
6962 | SmallVector<Use *, 4> OpsToSink; | ||||
6963 | if (!TLI->shouldSinkOperands(I, OpsToSink)) | ||||
6964 | return false; | ||||
6965 | |||||
6966 | // OpsToSink can contain multiple uses in a use chain (e.g. | ||||
6967 | // (%u1 with %u1 = shufflevector), (%u2 with %u2 = zext %u1)). The dominating | ||||
6968 | // uses must come first, so we process the ops in reverse order so as to not | ||||
6969 | // create invalid IR. | ||||
6970 | BasicBlock *TargetBB = I->getParent(); | ||||
6971 | bool Changed = false; | ||||
6972 | SmallVector<Use *, 4> ToReplace; | ||||
6973 | Instruction *InsertPoint = I; | ||||
6974 | DenseMap<const Instruction *, unsigned long> InstOrdering; | ||||
6975 | unsigned long InstNumber = 0; | ||||
6976 | for (const auto &I : *TargetBB) | ||||
6977 | InstOrdering[&I] = InstNumber++; | ||||
6978 | |||||
6979 | for (Use *U : reverse(OpsToSink)) { | ||||
6980 | auto *UI = cast<Instruction>(U->get()); | ||||
6981 | if (isa<PHINode>(UI)) | ||||
6982 | continue; | ||||
6983 | if (UI->getParent() == TargetBB) { | ||||
6984 | if (InstOrdering[UI] < InstOrdering[InsertPoint]) | ||||
6985 | InsertPoint = UI; | ||||
6986 | continue; | ||||
6987 | } | ||||
6988 | ToReplace.push_back(U); | ||||
6989 | } | ||||
6990 | |||||
6991 | SetVector<Instruction *> MaybeDead; | ||||
6992 | DenseMap<Instruction *, Instruction *> NewInstructions; | ||||
6993 | for (Use *U : ToReplace) { | ||||
6994 | auto *UI = cast<Instruction>(U->get()); | ||||
6995 | Instruction *NI = UI->clone(); | ||||
6996 | NewInstructions[UI] = NI; | ||||
6997 | MaybeDead.insert(UI); | ||||
6998 | LLVM_DEBUG(dbgs() << "Sinking " << *UI << " to user " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Sinking " << *UI << " to user " << *I << "\n"; } } while (false ); | ||||
6999 | NI->insertBefore(InsertPoint); | ||||
7000 | InsertPoint = NI; | ||||
7001 | InsertedInsts.insert(NI); | ||||
7002 | |||||
7003 | // Update the use for the new instruction, making sure that we update the | ||||
7004 | // sunk instruction uses, if it is part of a chain that has already been | ||||
7005 | // sunk. | ||||
7006 | Instruction *OldI = cast<Instruction>(U->getUser()); | ||||
7007 | if (NewInstructions.count(OldI)) | ||||
7008 | NewInstructions[OldI]->setOperand(U->getOperandNo(), NI); | ||||
7009 | else | ||||
7010 | U->set(NI); | ||||
7011 | Changed = true; | ||||
7012 | } | ||||
7013 | |||||
7014 | // Remove instructions that are dead after sinking. | ||||
7015 | for (auto *I : MaybeDead) { | ||||
7016 | if (!I->hasNUsesOrMore(1)) { | ||||
7017 | LLVM_DEBUG(dbgs() << "Removing dead instruction: " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Removing dead instruction: " << *I << "\n"; } } while (false); | ||||
7018 | I->eraseFromParent(); | ||||
7019 | } | ||||
7020 | } | ||||
7021 | |||||
7022 | return Changed; | ||||
7023 | } | ||||
7024 | |||||
7025 | bool CodeGenPrepare::optimizeSwitchType(SwitchInst *SI) { | ||||
7026 | Value *Cond = SI->getCondition(); | ||||
7027 | Type *OldType = Cond->getType(); | ||||
7028 | LLVMContext &Context = Cond->getContext(); | ||||
7029 | EVT OldVT = TLI->getValueType(*DL, OldType); | ||||
7030 | MVT RegType = TLI->getPreferredSwitchConditionType(Context, OldVT); | ||||
7031 | unsigned RegWidth = RegType.getSizeInBits(); | ||||
7032 | |||||
7033 | if (RegWidth <= cast<IntegerType>(OldType)->getBitWidth()) | ||||
7034 | return false; | ||||
7035 | |||||
7036 | // If the register width is greater than the type width, expand the condition | ||||
7037 | // of the switch instruction and each case constant to the width of the | ||||
7038 | // register. By widening the type of the switch condition, subsequent | ||||
7039 | // comparisons (for case comparisons) will not need to be extended to the | ||||
7040 | // preferred register width, so we will potentially eliminate N-1 extends, | ||||
7041 | // where N is the number of cases in the switch. | ||||
7042 | auto *NewType = Type::getIntNTy(Context, RegWidth); | ||||
7043 | |||||
7044 | // Extend the switch condition and case constants using the target preferred | ||||
7045 | // extend unless the switch condition is a function argument with an extend | ||||
7046 | // attribute. In that case, we can avoid an unnecessary mask/extension by | ||||
7047 | // matching the argument extension instead. | ||||
7048 | Instruction::CastOps ExtType = Instruction::ZExt; | ||||
7049 | // Some targets prefer SExt over ZExt. | ||||
7050 | if (TLI->isSExtCheaperThanZExt(OldVT, RegType)) | ||||
7051 | ExtType = Instruction::SExt; | ||||
7052 | |||||
7053 | if (auto *Arg = dyn_cast<Argument>(Cond)) { | ||||
7054 | if (Arg->hasSExtAttr()) | ||||
7055 | ExtType = Instruction::SExt; | ||||
7056 | if (Arg->hasZExtAttr()) | ||||
7057 | ExtType = Instruction::ZExt; | ||||
7058 | } | ||||
7059 | |||||
7060 | auto *ExtInst = CastInst::Create(ExtType, Cond, NewType); | ||||
7061 | ExtInst->insertBefore(SI); | ||||
7062 | ExtInst->setDebugLoc(SI->getDebugLoc()); | ||||
7063 | SI->setCondition(ExtInst); | ||||
7064 | for (auto Case : SI->cases()) { | ||||
7065 | const APInt &NarrowConst = Case.getCaseValue()->getValue(); | ||||
7066 | APInt WideConst = (ExtType == Instruction::ZExt) | ||||
7067 | ? NarrowConst.zext(RegWidth) | ||||
7068 | : NarrowConst.sext(RegWidth); | ||||
7069 | Case.setValue(ConstantInt::get(Context, WideConst)); | ||||
7070 | } | ||||
7071 | |||||
7072 | return true; | ||||
7073 | } | ||||
7074 | |||||
7075 | bool CodeGenPrepare::optimizeSwitchPhiConstants(SwitchInst *SI) { | ||||
7076 | // The SCCP optimization tends to produce code like this: | ||||
7077 | // switch(x) { case 42: phi(42, ...) } | ||||
7078 | // Materializing the constant for the phi-argument needs instructions; So we | ||||
7079 | // change the code to: | ||||
7080 | // switch(x) { case 42: phi(x, ...) } | ||||
7081 | |||||
7082 | Value *Condition = SI->getCondition(); | ||||
7083 | // Avoid endless loop in degenerate case. | ||||
7084 | if (isa<ConstantInt>(*Condition)) | ||||
7085 | return false; | ||||
7086 | |||||
7087 | bool Changed = false; | ||||
7088 | BasicBlock *SwitchBB = SI->getParent(); | ||||
7089 | Type *ConditionType = Condition->getType(); | ||||
7090 | |||||
7091 | for (const SwitchInst::CaseHandle &Case : SI->cases()) { | ||||
7092 | ConstantInt *CaseValue = Case.getCaseValue(); | ||||
7093 | BasicBlock *CaseBB = Case.getCaseSuccessor(); | ||||
7094 | // Set to true if we previously checked that `CaseBB` is only reached by | ||||
7095 | // a single case from this switch. | ||||
7096 | bool CheckedForSinglePred = false; | ||||
7097 | for (PHINode &PHI : CaseBB->phis()) { | ||||
7098 | Type *PHIType = PHI.getType(); | ||||
7099 | // If ZExt is free then we can also catch patterns like this: | ||||
7100 | // switch((i32)x) { case 42: phi((i64)42, ...); } | ||||
7101 | // and replace `(i64)42` with `zext i32 %x to i64`. | ||||
7102 | bool TryZExt = | ||||
7103 | PHIType->isIntegerTy() && | ||||
7104 | PHIType->getIntegerBitWidth() > ConditionType->getIntegerBitWidth() && | ||||
7105 | TLI->isZExtFree(ConditionType, PHIType); | ||||
7106 | if (PHIType == ConditionType || TryZExt) { | ||||
7107 | // Set to true to skip this case because of multiple preds. | ||||
7108 | bool SkipCase = false; | ||||
7109 | Value *Replacement = nullptr; | ||||
7110 | for (unsigned I = 0, E = PHI.getNumIncomingValues(); I != E; I++) { | ||||
7111 | Value *PHIValue = PHI.getIncomingValue(I); | ||||
7112 | if (PHIValue != CaseValue) { | ||||
7113 | if (!TryZExt) | ||||
7114 | continue; | ||||
7115 | ConstantInt *PHIValueInt = dyn_cast<ConstantInt>(PHIValue); | ||||
7116 | if (!PHIValueInt || | ||||
7117 | PHIValueInt->getValue() != | ||||
7118 | CaseValue->getValue().zext(PHIType->getIntegerBitWidth())) | ||||
7119 | continue; | ||||
7120 | } | ||||
7121 | if (PHI.getIncomingBlock(I) != SwitchBB) | ||||
7122 | continue; | ||||
7123 | // We cannot optimize if there are multiple case labels jumping to | ||||
7124 | // this block. This check may get expensive when there are many | ||||
7125 | // case labels so we test for it last. | ||||
7126 | if (!CheckedForSinglePred) { | ||||
7127 | CheckedForSinglePred = true; | ||||
7128 | if (SI->findCaseDest(CaseBB) == nullptr) { | ||||
7129 | SkipCase = true; | ||||
7130 | break; | ||||
7131 | } | ||||
7132 | } | ||||
7133 | |||||
7134 | if (Replacement == nullptr) { | ||||
7135 | if (PHIValue == CaseValue) { | ||||
7136 | Replacement = Condition; | ||||
7137 | } else { | ||||
7138 | IRBuilder<> Builder(SI); | ||||
7139 | Replacement = Builder.CreateZExt(Condition, PHIType); | ||||
7140 | } | ||||
7141 | } | ||||
7142 | PHI.setIncomingValue(I, Replacement); | ||||
7143 | Changed = true; | ||||
7144 | } | ||||
7145 | if (SkipCase) | ||||
7146 | break; | ||||
7147 | } | ||||
7148 | } | ||||
7149 | } | ||||
7150 | return Changed; | ||||
7151 | } | ||||
7152 | |||||
7153 | bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) { | ||||
7154 | bool Changed = optimizeSwitchType(SI); | ||||
7155 | Changed |= optimizeSwitchPhiConstants(SI); | ||||
7156 | return Changed; | ||||
7157 | } | ||||
7158 | |||||
7159 | namespace { | ||||
7160 | |||||
7161 | /// Helper class to promote a scalar operation to a vector one. | ||||
7162 | /// This class is used to move downward extractelement transition. | ||||
7163 | /// E.g., | ||||
7164 | /// a = vector_op <2 x i32> | ||||
7165 | /// b = extractelement <2 x i32> a, i32 0 | ||||
7166 | /// c = scalar_op b | ||||
7167 | /// store c | ||||
7168 | /// | ||||
7169 | /// => | ||||
7170 | /// a = vector_op <2 x i32> | ||||
7171 | /// c = vector_op a (equivalent to scalar_op on the related lane) | ||||
7172 | /// * d = extractelement <2 x i32> c, i32 0 | ||||
7173 | /// * store d | ||||
7174 | /// Assuming both extractelement and store can be combine, we get rid of the | ||||
7175 | /// transition. | ||||
7176 | class VectorPromoteHelper { | ||||
7177 | /// DataLayout associated with the current module. | ||||
7178 | const DataLayout &DL; | ||||
7179 | |||||
7180 | /// Used to perform some checks on the legality of vector operations. | ||||
7181 | const TargetLowering &TLI; | ||||
7182 | |||||
7183 | /// Used to estimated the cost of the promoted chain. | ||||
7184 | const TargetTransformInfo &TTI; | ||||
7185 | |||||
7186 | /// The transition being moved downwards. | ||||
7187 | Instruction *Transition; | ||||
7188 | |||||
7189 | /// The sequence of instructions to be promoted. | ||||
7190 | SmallVector<Instruction *, 4> InstsToBePromoted; | ||||
7191 | |||||
7192 | /// Cost of combining a store and an extract. | ||||
7193 | unsigned StoreExtractCombineCost; | ||||
7194 | |||||
7195 | /// Instruction that will be combined with the transition. | ||||
7196 | Instruction *CombineInst = nullptr; | ||||
7197 | |||||
7198 | /// The instruction that represents the current end of the transition. | ||||
7199 | /// Since we are faking the promotion until we reach the end of the chain | ||||
7200 | /// of computation, we need a way to get the current end of the transition. | ||||
7201 | Instruction *getEndOfTransition() const { | ||||
7202 | if (InstsToBePromoted.empty()) | ||||
7203 | return Transition; | ||||
7204 | return InstsToBePromoted.back(); | ||||
7205 | } | ||||
7206 | |||||
7207 | /// Return the index of the original value in the transition. | ||||
7208 | /// E.g., for "extractelement <2 x i32> c, i32 1" the original value, | ||||
7209 | /// c, is at index 0. | ||||
7210 | unsigned getTransitionOriginalValueIdx() const { | ||||
7211 | assert(isa<ExtractElementInst>(Transition) &&(static_cast <bool> (isa<ExtractElementInst>(Transition ) && "Other kind of transitions are not supported yet" ) ? void (0) : __assert_fail ("isa<ExtractElementInst>(Transition) && \"Other kind of transitions are not supported yet\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7212, __extension__ __PRETTY_FUNCTION__ )) | ||||
7212 | "Other kind of transitions are not supported yet")(static_cast <bool> (isa<ExtractElementInst>(Transition ) && "Other kind of transitions are not supported yet" ) ? void (0) : __assert_fail ("isa<ExtractElementInst>(Transition) && \"Other kind of transitions are not supported yet\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7212, __extension__ __PRETTY_FUNCTION__ )); | ||||
7213 | return 0; | ||||
7214 | } | ||||
7215 | |||||
7216 | /// Return the index of the index in the transition. | ||||
7217 | /// E.g., for "extractelement <2 x i32> c, i32 0" the index | ||||
7218 | /// is at index 1. | ||||
7219 | unsigned getTransitionIdx() const { | ||||
7220 | assert(isa<ExtractElementInst>(Transition) &&(static_cast <bool> (isa<ExtractElementInst>(Transition ) && "Other kind of transitions are not supported yet" ) ? void (0) : __assert_fail ("isa<ExtractElementInst>(Transition) && \"Other kind of transitions are not supported yet\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7221, __extension__ __PRETTY_FUNCTION__ )) | ||||
7221 | "Other kind of transitions are not supported yet")(static_cast <bool> (isa<ExtractElementInst>(Transition ) && "Other kind of transitions are not supported yet" ) ? void (0) : __assert_fail ("isa<ExtractElementInst>(Transition) && \"Other kind of transitions are not supported yet\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7221, __extension__ __PRETTY_FUNCTION__ )); | ||||
7222 | return 1; | ||||
7223 | } | ||||
7224 | |||||
7225 | /// Get the type of the transition. | ||||
7226 | /// This is the type of the original value. | ||||
7227 | /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the | ||||
7228 | /// transition is <2 x i32>. | ||||
7229 | Type *getTransitionType() const { | ||||
7230 | return Transition->getOperand(getTransitionOriginalValueIdx())->getType(); | ||||
7231 | } | ||||
7232 | |||||
7233 | /// Promote \p ToBePromoted by moving \p Def downward through. | ||||
7234 | /// I.e., we have the following sequence: | ||||
7235 | /// Def = Transition <ty1> a to <ty2> | ||||
7236 | /// b = ToBePromoted <ty2> Def, ... | ||||
7237 | /// => | ||||
7238 | /// b = ToBePromoted <ty1> a, ... | ||||
7239 | /// Def = Transition <ty1> ToBePromoted to <ty2> | ||||
7240 | void promoteImpl(Instruction *ToBePromoted); | ||||
7241 | |||||
7242 | /// Check whether or not it is profitable to promote all the | ||||
7243 | /// instructions enqueued to be promoted. | ||||
7244 | bool isProfitableToPromote() { | ||||
7245 | Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx()); | ||||
7246 | unsigned Index = isa<ConstantInt>(ValIdx) | ||||
7247 | ? cast<ConstantInt>(ValIdx)->getZExtValue() | ||||
7248 | : -1; | ||||
7249 | Type *PromotedType = getTransitionType(); | ||||
7250 | |||||
7251 | StoreInst *ST = cast<StoreInst>(CombineInst); | ||||
7252 | unsigned AS = ST->getPointerAddressSpace(); | ||||
7253 | // Check if this store is supported. | ||||
7254 | if (!TLI.allowsMisalignedMemoryAccesses( | ||||
7255 | TLI.getValueType(DL, ST->getValueOperand()->getType()), AS, | ||||
7256 | ST->getAlign())) { | ||||
7257 | // If this is not supported, there is no way we can combine | ||||
7258 | // the extract with the store. | ||||
7259 | return false; | ||||
7260 | } | ||||
7261 | |||||
7262 | // The scalar chain of computation has to pay for the transition | ||||
7263 | // scalar to vector. | ||||
7264 | // The vector chain has to account for the combining cost. | ||||
7265 | InstructionCost ScalarCost = | ||||
7266 | TTI.getVectorInstrCost(*Transition, PromotedType, Index); | ||||
7267 | InstructionCost VectorCost = StoreExtractCombineCost; | ||||
7268 | enum TargetTransformInfo::TargetCostKind CostKind = | ||||
7269 | TargetTransformInfo::TCK_RecipThroughput; | ||||
7270 | for (const auto &Inst : InstsToBePromoted) { | ||||
7271 | // Compute the cost. | ||||
7272 | // By construction, all instructions being promoted are arithmetic ones. | ||||
7273 | // Moreover, one argument is a constant that can be viewed as a splat | ||||
7274 | // constant. | ||||
7275 | Value *Arg0 = Inst->getOperand(0); | ||||
7276 | bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) || | ||||
7277 | isa<ConstantFP>(Arg0); | ||||
7278 | TargetTransformInfo::OperandValueInfo Arg0Info, Arg1Info; | ||||
7279 | if (IsArg0Constant) | ||||
7280 | Arg0Info.Kind = TargetTransformInfo::OK_UniformConstantValue; | ||||
7281 | else | ||||
7282 | Arg1Info.Kind = TargetTransformInfo::OK_UniformConstantValue; | ||||
7283 | |||||
7284 | ScalarCost += TTI.getArithmeticInstrCost( | ||||
7285 | Inst->getOpcode(), Inst->getType(), CostKind, Arg0Info, Arg1Info); | ||||
7286 | VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType, | ||||
7287 | CostKind, Arg0Info, Arg1Info); | ||||
7288 | } | ||||
7289 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Estimated cost of computation to be promoted:\nScalar: " << ScalarCost << "\nVector: " << VectorCost << '\n'; } } while (false) | ||||
7290 | dbgs() << "Estimated cost of computation to be promoted:\nScalar: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Estimated cost of computation to be promoted:\nScalar: " << ScalarCost << "\nVector: " << VectorCost << '\n'; } } while (false) | ||||
7291 | << ScalarCost << "\nVector: " << VectorCost << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Estimated cost of computation to be promoted:\nScalar: " << ScalarCost << "\nVector: " << VectorCost << '\n'; } } while (false); | ||||
7292 | return ScalarCost > VectorCost; | ||||
7293 | } | ||||
7294 | |||||
7295 | /// Generate a constant vector with \p Val with the same | ||||
7296 | /// number of elements as the transition. | ||||
7297 | /// \p UseSplat defines whether or not \p Val should be replicated | ||||
7298 | /// across the whole vector. | ||||
7299 | /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>, | ||||
7300 | /// otherwise we generate a vector with as many undef as possible: | ||||
7301 | /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only | ||||
7302 | /// used at the index of the extract. | ||||
7303 | Value *getConstantVector(Constant *Val, bool UseSplat) const { | ||||
7304 | unsigned ExtractIdx = std::numeric_limits<unsigned>::max(); | ||||
7305 | if (!UseSplat) { | ||||
7306 | // If we cannot determine where the constant must be, we have to | ||||
7307 | // use a splat constant. | ||||
7308 | Value *ValExtractIdx = Transition->getOperand(getTransitionIdx()); | ||||
7309 | if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx)) | ||||
7310 | ExtractIdx = CstVal->getSExtValue(); | ||||
7311 | else | ||||
7312 | UseSplat = true; | ||||
7313 | } | ||||
7314 | |||||
7315 | ElementCount EC = cast<VectorType>(getTransitionType())->getElementCount(); | ||||
7316 | if (UseSplat) | ||||
7317 | return ConstantVector::getSplat(EC, Val); | ||||
7318 | |||||
7319 | if (!EC.isScalable()) { | ||||
7320 | SmallVector<Constant *, 4> ConstVec; | ||||
7321 | UndefValue *UndefVal = UndefValue::get(Val->getType()); | ||||
7322 | for (unsigned Idx = 0; Idx != EC.getKnownMinValue(); ++Idx) { | ||||
7323 | if (Idx == ExtractIdx) | ||||
7324 | ConstVec.push_back(Val); | ||||
7325 | else | ||||
7326 | ConstVec.push_back(UndefVal); | ||||
7327 | } | ||||
7328 | return ConstantVector::get(ConstVec); | ||||
7329 | } else | ||||
7330 | llvm_unreachable(::llvm::llvm_unreachable_internal("Generate scalable vector for non-splat is unimplemented" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7331) | ||||
7331 | "Generate scalable vector for non-splat is unimplemented")::llvm::llvm_unreachable_internal("Generate scalable vector for non-splat is unimplemented" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7331); | ||||
7332 | } | ||||
7333 | |||||
7334 | /// Check if promoting to a vector type an operand at \p OperandIdx | ||||
7335 | /// in \p Use can trigger undefined behavior. | ||||
7336 | static bool canCauseUndefinedBehavior(const Instruction *Use, | ||||
7337 | unsigned OperandIdx) { | ||||
7338 | // This is not safe to introduce undef when the operand is on | ||||
7339 | // the right hand side of a division-like instruction. | ||||
7340 | if (OperandIdx != 1) | ||||
7341 | return false; | ||||
7342 | switch (Use->getOpcode()) { | ||||
7343 | default: | ||||
7344 | return false; | ||||
7345 | case Instruction::SDiv: | ||||
7346 | case Instruction::UDiv: | ||||
7347 | case Instruction::SRem: | ||||
7348 | case Instruction::URem: | ||||
7349 | return true; | ||||
7350 | case Instruction::FDiv: | ||||
7351 | case Instruction::FRem: | ||||
7352 | return !Use->hasNoNaNs(); | ||||
7353 | } | ||||
7354 | llvm_unreachable(nullptr)::llvm::llvm_unreachable_internal(nullptr, "llvm/lib/CodeGen/CodeGenPrepare.cpp" , 7354); | ||||
7355 | } | ||||
7356 | |||||
7357 | public: | ||||
7358 | VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI, | ||||
7359 | const TargetTransformInfo &TTI, Instruction *Transition, | ||||
7360 | unsigned CombineCost) | ||||
7361 | : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition), | ||||
7362 | StoreExtractCombineCost(CombineCost) { | ||||
7363 | assert(Transition && "Do not know how to promote null")(static_cast <bool> (Transition && "Do not know how to promote null" ) ? void (0) : __assert_fail ("Transition && \"Do not know how to promote null\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7363, __extension__ __PRETTY_FUNCTION__ )); | ||||
7364 | } | ||||
7365 | |||||
7366 | /// Check if we can promote \p ToBePromoted to \p Type. | ||||
7367 | bool canPromote(const Instruction *ToBePromoted) const { | ||||
7368 | // We could support CastInst too. | ||||
7369 | return isa<BinaryOperator>(ToBePromoted); | ||||
7370 | } | ||||
7371 | |||||
7372 | /// Check if it is profitable to promote \p ToBePromoted | ||||
7373 | /// by moving downward the transition through. | ||||
7374 | bool shouldPromote(const Instruction *ToBePromoted) const { | ||||
7375 | // Promote only if all the operands can be statically expanded. | ||||
7376 | // Indeed, we do not want to introduce any new kind of transitions. | ||||
7377 | for (const Use &U : ToBePromoted->operands()) { | ||||
7378 | const Value *Val = U.get(); | ||||
7379 | if (Val == getEndOfTransition()) { | ||||
7380 | // If the use is a division and the transition is on the rhs, | ||||
7381 | // we cannot promote the operation, otherwise we may create a | ||||
7382 | // division by zero. | ||||
7383 | if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())) | ||||
7384 | return false; | ||||
7385 | continue; | ||||
7386 | } | ||||
7387 | if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) && | ||||
7388 | !isa<ConstantFP>(Val)) | ||||
7389 | return false; | ||||
7390 | } | ||||
7391 | // Check that the resulting operation is legal. | ||||
7392 | int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode()); | ||||
7393 | if (!ISDOpcode) | ||||
7394 | return false; | ||||
7395 | return StressStoreExtract || | ||||
7396 | TLI.isOperationLegalOrCustom( | ||||
7397 | ISDOpcode, TLI.getValueType(DL, getTransitionType(), true)); | ||||
7398 | } | ||||
7399 | |||||
7400 | /// Check whether or not \p Use can be combined | ||||
7401 | /// with the transition. | ||||
7402 | /// I.e., is it possible to do Use(Transition) => AnotherUse? | ||||
7403 | bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); } | ||||
7404 | |||||
7405 | /// Record \p ToBePromoted as part of the chain to be promoted. | ||||
7406 | void enqueueForPromotion(Instruction *ToBePromoted) { | ||||
7407 | InstsToBePromoted.push_back(ToBePromoted); | ||||
7408 | } | ||||
7409 | |||||
7410 | /// Set the instruction that will be combined with the transition. | ||||
7411 | void recordCombineInstruction(Instruction *ToBeCombined) { | ||||
7412 | assert(canCombine(ToBeCombined) && "Unsupported instruction to combine")(static_cast <bool> (canCombine(ToBeCombined) && "Unsupported instruction to combine") ? void (0) : __assert_fail ("canCombine(ToBeCombined) && \"Unsupported instruction to combine\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7412, __extension__ __PRETTY_FUNCTION__ )); | ||||
7413 | CombineInst = ToBeCombined; | ||||
7414 | } | ||||
7415 | |||||
7416 | /// Promote all the instructions enqueued for promotion if it is | ||||
7417 | /// is profitable. | ||||
7418 | /// \return True if the promotion happened, false otherwise. | ||||
7419 | bool promote() { | ||||
7420 | // Check if there is something to promote. | ||||
7421 | // Right now, if we do not have anything to combine with, | ||||
7422 | // we assume the promotion is not profitable. | ||||
7423 | if (InstsToBePromoted.empty() || !CombineInst) | ||||
7424 | return false; | ||||
7425 | |||||
7426 | // Check cost. | ||||
7427 | if (!StressStoreExtract && !isProfitableToPromote()) | ||||
7428 | return false; | ||||
7429 | |||||
7430 | // Promote. | ||||
7431 | for (auto &ToBePromoted : InstsToBePromoted) | ||||
7432 | promoteImpl(ToBePromoted); | ||||
7433 | InstsToBePromoted.clear(); | ||||
7434 | return true; | ||||
7435 | } | ||||
7436 | }; | ||||
7437 | |||||
7438 | } // end anonymous namespace | ||||
7439 | |||||
7440 | void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) { | ||||
7441 | // At this point, we know that all the operands of ToBePromoted but Def | ||||
7442 | // can be statically promoted. | ||||
7443 | // For Def, we need to use its parameter in ToBePromoted: | ||||
7444 | // b = ToBePromoted ty1 a | ||||
7445 | // Def = Transition ty1 b to ty2 | ||||
7446 | // Move the transition down. | ||||
7447 | // 1. Replace all uses of the promoted operation by the transition. | ||||
7448 | // = ... b => = ... Def. | ||||
7449 | assert(ToBePromoted->getType() == Transition->getType() &&(static_cast <bool> (ToBePromoted->getType() == Transition ->getType() && "The type of the result of the transition does not match " "the final type") ? void (0) : __assert_fail ("ToBePromoted->getType() == Transition->getType() && \"The type of the result of the transition does not match \" \"the final type\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7451, __extension__ __PRETTY_FUNCTION__ )) | ||||
7450 | "The type of the result of the transition does not match "(static_cast <bool> (ToBePromoted->getType() == Transition ->getType() && "The type of the result of the transition does not match " "the final type") ? void (0) : __assert_fail ("ToBePromoted->getType() == Transition->getType() && \"The type of the result of the transition does not match \" \"the final type\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7451, __extension__ __PRETTY_FUNCTION__ )) | ||||
7451 | "the final type")(static_cast <bool> (ToBePromoted->getType() == Transition ->getType() && "The type of the result of the transition does not match " "the final type") ? void (0) : __assert_fail ("ToBePromoted->getType() == Transition->getType() && \"The type of the result of the transition does not match \" \"the final type\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7451, __extension__ __PRETTY_FUNCTION__ )); | ||||
7452 | ToBePromoted->replaceAllUsesWith(Transition); | ||||
7453 | // 2. Update the type of the uses. | ||||
7454 | // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def. | ||||
7455 | Type *TransitionTy = getTransitionType(); | ||||
7456 | ToBePromoted->mutateType(TransitionTy); | ||||
7457 | // 3. Update all the operands of the promoted operation with promoted | ||||
7458 | // operands. | ||||
7459 | // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a. | ||||
7460 | for (Use &U : ToBePromoted->operands()) { | ||||
7461 | Value *Val = U.get(); | ||||
7462 | Value *NewVal = nullptr; | ||||
7463 | if (Val == Transition) | ||||
7464 | NewVal = Transition->getOperand(getTransitionOriginalValueIdx()); | ||||
7465 | else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) || | ||||
7466 | isa<ConstantFP>(Val)) { | ||||
7467 | // Use a splat constant if it is not safe to use undef. | ||||
7468 | NewVal = getConstantVector( | ||||
7469 | cast<Constant>(Val), | ||||
7470 | isa<UndefValue>(Val) || | ||||
7471 | canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())); | ||||
7472 | } else | ||||
7473 | llvm_unreachable("Did you modified shouldPromote and forgot to update "::llvm::llvm_unreachable_internal("Did you modified shouldPromote and forgot to update " "this?", "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7474) | ||||
7474 | "this?")::llvm::llvm_unreachable_internal("Did you modified shouldPromote and forgot to update " "this?", "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7474); | ||||
7475 | ToBePromoted->setOperand(U.getOperandNo(), NewVal); | ||||
7476 | } | ||||
7477 | Transition->moveAfter(ToBePromoted); | ||||
7478 | Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted); | ||||
7479 | } | ||||
7480 | |||||
7481 | /// Some targets can do store(extractelement) with one instruction. | ||||
7482 | /// Try to push the extractelement towards the stores when the target | ||||
7483 | /// has this feature and this is profitable. | ||||
7484 | bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) { | ||||
7485 | unsigned CombineCost = std::numeric_limits<unsigned>::max(); | ||||
7486 | if (DisableStoreExtract || | ||||
7487 | (!StressStoreExtract && | ||||
7488 | !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(), | ||||
7489 | Inst->getOperand(1), CombineCost))) | ||||
7490 | return false; | ||||
7491 | |||||
7492 | // At this point we know that Inst is a vector to scalar transition. | ||||
7493 | // Try to move it down the def-use chain, until: | ||||
7494 | // - We can combine the transition with its single use | ||||
7495 | // => we got rid of the transition. | ||||
7496 | // - We escape the current basic block | ||||
7497 | // => we would need to check that we are moving it at a cheaper place and | ||||
7498 | // we do not do that for now. | ||||
7499 | BasicBlock *Parent = Inst->getParent(); | ||||
7500 | LLVM_DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Found an interesting transition: " << *Inst << '\n'; } } while (false); | ||||
7501 | VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost); | ||||
7502 | // If the transition has more than one use, assume this is not going to be | ||||
7503 | // beneficial. | ||||
7504 | while (Inst->hasOneUse()) { | ||||
7505 | Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin()); | ||||
7506 | LLVM_DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Use: " << *ToBePromoted << '\n'; } } while (false); | ||||
7507 | |||||
7508 | if (ToBePromoted->getParent() != Parent) { | ||||
7509 | LLVM_DEBUG(dbgs() << "Instruction to promote is in a different block ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Instruction to promote is in a different block (" << ToBePromoted->getParent()->getName() << ") than the transition (" << Parent->getName() << ").\n"; } } while (false) | ||||
7510 | << ToBePromoted->getParent()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Instruction to promote is in a different block (" << ToBePromoted->getParent()->getName() << ") than the transition (" << Parent->getName() << ").\n"; } } while (false) | ||||
7511 | << ") than the transition (" << Parent->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Instruction to promote is in a different block (" << ToBePromoted->getParent()->getName() << ") than the transition (" << Parent->getName() << ").\n"; } } while (false) | ||||
7512 | << ").\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Instruction to promote is in a different block (" << ToBePromoted->getParent()->getName() << ") than the transition (" << Parent->getName() << ").\n"; } } while (false); | ||||
7513 | return false; | ||||
7514 | } | ||||
7515 | |||||
7516 | if (VPH.canCombine(ToBePromoted)) { | ||||
7517 | LLVM_DEBUG(dbgs() << "Assume " << *Inst << '\n'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Assume " << *Inst << '\n' << "will be combined with: " << *ToBePromoted << '\n'; } } while (false) | ||||
7518 | << "will be combined with: " << *ToBePromoted << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Assume " << *Inst << '\n' << "will be combined with: " << *ToBePromoted << '\n'; } } while (false); | ||||
7519 | VPH.recordCombineInstruction(ToBePromoted); | ||||
7520 | bool Changed = VPH.promote(); | ||||
7521 | NumStoreExtractExposed += Changed; | ||||
7522 | return Changed; | ||||
7523 | } | ||||
7524 | |||||
7525 | LLVM_DEBUG(dbgs() << "Try promoting.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Try promoting.\n"; } } while (false); | ||||
7526 | if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted)) | ||||
7527 | return false; | ||||
7528 | |||||
7529 | LLVM_DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Promoting is possible... Enqueue for promotion!\n" ; } } while (false); | ||||
7530 | |||||
7531 | VPH.enqueueForPromotion(ToBePromoted); | ||||
7532 | Inst = ToBePromoted; | ||||
7533 | } | ||||
7534 | return false; | ||||
7535 | } | ||||
7536 | |||||
7537 | /// For the instruction sequence of store below, F and I values | ||||
7538 | /// are bundled together as an i64 value before being stored into memory. | ||||
7539 | /// Sometimes it is more efficient to generate separate stores for F and I, | ||||
7540 | /// which can remove the bitwise instructions or sink them to colder places. | ||||
7541 | /// | ||||
7542 | /// (store (or (zext (bitcast F to i32) to i64), | ||||
7543 | /// (shl (zext I to i64), 32)), addr) --> | ||||
7544 | /// (store F, addr) and (store I, addr+4) | ||||
7545 | /// | ||||
7546 | /// Similarly, splitting for other merged store can also be beneficial, like: | ||||
7547 | /// For pair of {i32, i32}, i64 store --> two i32 stores. | ||||
7548 | /// For pair of {i32, i16}, i64 store --> two i32 stores. | ||||
7549 | /// For pair of {i16, i16}, i32 store --> two i16 stores. | ||||
7550 | /// For pair of {i16, i8}, i32 store --> two i16 stores. | ||||
7551 | /// For pair of {i8, i8}, i16 store --> two i8 stores. | ||||
7552 | /// | ||||
7553 | /// We allow each target to determine specifically which kind of splitting is | ||||
7554 | /// supported. | ||||
7555 | /// | ||||
7556 | /// The store patterns are commonly seen from the simple code snippet below | ||||
7557 | /// if only std::make_pair(...) is sroa transformed before inlined into hoo. | ||||
7558 | /// void goo(const std::pair<int, float> &); | ||||
7559 | /// hoo() { | ||||
7560 | /// ... | ||||
7561 | /// goo(std::make_pair(tmp, ftmp)); | ||||
7562 | /// ... | ||||
7563 | /// } | ||||
7564 | /// | ||||
7565 | /// Although we already have similar splitting in DAG Combine, we duplicate | ||||
7566 | /// it in CodeGenPrepare to catch the case in which pattern is across | ||||
7567 | /// multiple BBs. The logic in DAG Combine is kept to catch case generated | ||||
7568 | /// during code expansion. | ||||
7569 | static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL, | ||||
7570 | const TargetLowering &TLI) { | ||||
7571 | // Handle simple but common cases only. | ||||
7572 | Type *StoreType = SI.getValueOperand()->getType(); | ||||
7573 | |||||
7574 | // The code below assumes shifting a value by <number of bits>, | ||||
7575 | // whereas scalable vectors would have to be shifted by | ||||
7576 | // <2log(vscale) + number of bits> in order to store the | ||||
7577 | // low/high parts. Bailing out for now. | ||||
7578 | if (isa<ScalableVectorType>(StoreType)) | ||||
7579 | return false; | ||||
7580 | |||||
7581 | if (!DL.typeSizeEqualsStoreSize(StoreType) || | ||||
7582 | DL.getTypeSizeInBits(StoreType) == 0) | ||||
7583 | return false; | ||||
7584 | |||||
7585 | unsigned HalfValBitSize = DL.getTypeSizeInBits(StoreType) / 2; | ||||
7586 | Type *SplitStoreType = Type::getIntNTy(SI.getContext(), HalfValBitSize); | ||||
7587 | if (!DL.typeSizeEqualsStoreSize(SplitStoreType)) | ||||
7588 | return false; | ||||
7589 | |||||
7590 | // Don't split the store if it is volatile. | ||||
7591 | if (SI.isVolatile()) | ||||
7592 | return false; | ||||
7593 | |||||
7594 | // Match the following patterns: | ||||
7595 | // (store (or (zext LValue to i64), | ||||
7596 | // (shl (zext HValue to i64), 32)), HalfValBitSize) | ||||
7597 | // or | ||||
7598 | // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize) | ||||
7599 | // (zext LValue to i64), | ||||
7600 | // Expect both operands of OR and the first operand of SHL have only | ||||
7601 | // one use. | ||||
7602 | Value *LValue, *HValue; | ||||
7603 | if (!match(SI.getValueOperand(), | ||||
7604 | m_c_Or(m_OneUse(m_ZExt(m_Value(LValue))), | ||||
7605 | m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue))), | ||||
7606 | m_SpecificInt(HalfValBitSize)))))) | ||||
7607 | return false; | ||||
7608 | |||||
7609 | // Check LValue and HValue are int with size less or equal than 32. | ||||
7610 | if (!LValue->getType()->isIntegerTy() || | ||||
7611 | DL.getTypeSizeInBits(LValue->getType()) > HalfValBitSize || | ||||
7612 | !HValue->getType()->isIntegerTy() || | ||||
7613 | DL.getTypeSizeInBits(HValue->getType()) > HalfValBitSize) | ||||
7614 | return false; | ||||
7615 | |||||
7616 | // If LValue/HValue is a bitcast instruction, use the EVT before bitcast | ||||
7617 | // as the input of target query. | ||||
7618 | auto *LBC = dyn_cast<BitCastInst>(LValue); | ||||
7619 | auto *HBC = dyn_cast<BitCastInst>(HValue); | ||||
7620 | EVT LowTy = LBC ? EVT::getEVT(LBC->getOperand(0)->getType()) | ||||
7621 | : EVT::getEVT(LValue->getType()); | ||||
7622 | EVT HighTy = HBC ? EVT::getEVT(HBC->getOperand(0)->getType()) | ||||
7623 | : EVT::getEVT(HValue->getType()); | ||||
7624 | if (!ForceSplitStore && !TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy)) | ||||
7625 | return false; | ||||
7626 | |||||
7627 | // Start to split store. | ||||
7628 | IRBuilder<> Builder(SI.getContext()); | ||||
7629 | Builder.SetInsertPoint(&SI); | ||||
7630 | |||||
7631 | // If LValue/HValue is a bitcast in another BB, create a new one in current | ||||
7632 | // BB so it may be merged with the splitted stores by dag combiner. | ||||
7633 | if (LBC && LBC->getParent() != SI.getParent()) | ||||
7634 | LValue = Builder.CreateBitCast(LBC->getOperand(0), LBC->getType()); | ||||
7635 | if (HBC && HBC->getParent() != SI.getParent()) | ||||
7636 | HValue = Builder.CreateBitCast(HBC->getOperand(0), HBC->getType()); | ||||
7637 | |||||
7638 | bool IsLE = SI.getModule()->getDataLayout().isLittleEndian(); | ||||
7639 | auto CreateSplitStore = [&](Value *V, bool Upper) { | ||||
7640 | V = Builder.CreateZExtOrBitCast(V, SplitStoreType); | ||||
7641 | Value *Addr = Builder.CreateBitCast( | ||||
7642 | SI.getOperand(1), | ||||
7643 | SplitStoreType->getPointerTo(SI.getPointerAddressSpace())); | ||||
7644 | Align Alignment = SI.getAlign(); | ||||
7645 | const bool IsOffsetStore = (IsLE && Upper) || (!IsLE && !Upper); | ||||
7646 | if (IsOffsetStore) { | ||||
7647 | Addr = Builder.CreateGEP( | ||||
7648 | SplitStoreType, Addr, | ||||
7649 | ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1)); | ||||
7650 | |||||
7651 | // When splitting the store in half, naturally one half will retain the | ||||
7652 | // alignment of the original wider store, regardless of whether it was | ||||
7653 | // over-aligned or not, while the other will require adjustment. | ||||
7654 | Alignment = commonAlignment(Alignment, HalfValBitSize / 8); | ||||
7655 | } | ||||
7656 | Builder.CreateAlignedStore(V, Addr, Alignment); | ||||
7657 | }; | ||||
7658 | |||||
7659 | CreateSplitStore(LValue, false); | ||||
7660 | CreateSplitStore(HValue, true); | ||||
7661 | |||||
7662 | // Delete the old store. | ||||
7663 | SI.eraseFromParent(); | ||||
7664 | return true; | ||||
7665 | } | ||||
7666 | |||||
7667 | // Return true if the GEP has two operands, the first operand is of a sequential | ||||
7668 | // type, and the second operand is a constant. | ||||
7669 | static bool GEPSequentialConstIndexed(GetElementPtrInst *GEP) { | ||||
7670 | gep_type_iterator I = gep_type_begin(*GEP); | ||||
7671 | return GEP->getNumOperands() == 2 && I.isSequential() && | ||||
7672 | isa<ConstantInt>(GEP->getOperand(1)); | ||||
7673 | } | ||||
7674 | |||||
7675 | // Try unmerging GEPs to reduce liveness interference (register pressure) across | ||||
7676 | // IndirectBr edges. Since IndirectBr edges tend to touch on many blocks, | ||||
7677 | // reducing liveness interference across those edges benefits global register | ||||
7678 | // allocation. Currently handles only certain cases. | ||||
7679 | // | ||||
7680 | // For example, unmerge %GEPI and %UGEPI as below. | ||||
7681 | // | ||||
7682 | // ---------- BEFORE ---------- | ||||
7683 | // SrcBlock: | ||||
7684 | // ... | ||||
7685 | // %GEPIOp = ... | ||||
7686 | // ... | ||||
7687 | // %GEPI = gep %GEPIOp, Idx | ||||
7688 | // ... | ||||
7689 | // indirectbr ... [ label %DstB0, label %DstB1, ... label %DstBi ... ] | ||||
7690 | // (* %GEPI is alive on the indirectbr edges due to other uses ahead) | ||||
7691 | // (* %GEPIOp is alive on the indirectbr edges only because of it's used by | ||||
7692 | // %UGEPI) | ||||
7693 | // | ||||
7694 | // DstB0: ... (there may be a gep similar to %UGEPI to be unmerged) | ||||
7695 | // DstB1: ... (there may be a gep similar to %UGEPI to be unmerged) | ||||
7696 | // ... | ||||
7697 | // | ||||
7698 | // DstBi: | ||||
7699 | // ... | ||||
7700 | // %UGEPI = gep %GEPIOp, UIdx | ||||
7701 | // ... | ||||
7702 | // --------------------------- | ||||
7703 | // | ||||
7704 | // ---------- AFTER ---------- | ||||
7705 | // SrcBlock: | ||||
7706 | // ... (same as above) | ||||
7707 | // (* %GEPI is still alive on the indirectbr edges) | ||||
7708 | // (* %GEPIOp is no longer alive on the indirectbr edges as a result of the | ||||
7709 | // unmerging) | ||||
7710 | // ... | ||||
7711 | // | ||||
7712 | // DstBi: | ||||
7713 | // ... | ||||
7714 | // %UGEPI = gep %GEPI, (UIdx-Idx) | ||||
7715 | // ... | ||||
7716 | // --------------------------- | ||||
7717 | // | ||||
7718 | // The register pressure on the IndirectBr edges is reduced because %GEPIOp is | ||||
7719 | // no longer alive on them. | ||||
7720 | // | ||||
7721 | // We try to unmerge GEPs here in CodGenPrepare, as opposed to limiting merging | ||||
7722 | // of GEPs in the first place in InstCombiner::visitGetElementPtrInst() so as | ||||
7723 | // not to disable further simplications and optimizations as a result of GEP | ||||
7724 | // merging. | ||||
7725 | // | ||||
7726 | // Note this unmerging may increase the length of the data flow critical path | ||||
7727 | // (the path from %GEPIOp to %UGEPI would go through %GEPI), which is a tradeoff | ||||
7728 | // between the register pressure and the length of data-flow critical | ||||
7729 | // path. Restricting this to the uncommon IndirectBr case would minimize the | ||||
7730 | // impact of potentially longer critical path, if any, and the impact on compile | ||||
7731 | // time. | ||||
7732 | static bool tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst *GEPI, | ||||
7733 | const TargetTransformInfo *TTI) { | ||||
7734 | BasicBlock *SrcBlock = GEPI->getParent(); | ||||
7735 | // Check that SrcBlock ends with an IndirectBr. If not, give up. The common | ||||
7736 | // (non-IndirectBr) cases exit early here. | ||||
7737 | if (!isa<IndirectBrInst>(SrcBlock->getTerminator())) | ||||
7738 | return false; | ||||
7739 | // Check that GEPI is a simple gep with a single constant index. | ||||
7740 | if (!GEPSequentialConstIndexed(GEPI)) | ||||
7741 | return false; | ||||
7742 | ConstantInt *GEPIIdx = cast<ConstantInt>(GEPI->getOperand(1)); | ||||
7743 | // Check that GEPI is a cheap one. | ||||
7744 | if (TTI->getIntImmCost(GEPIIdx->getValue(), GEPIIdx->getType(), | ||||
7745 | TargetTransformInfo::TCK_SizeAndLatency) > | ||||
7746 | TargetTransformInfo::TCC_Basic) | ||||
7747 | return false; | ||||
7748 | Value *GEPIOp = GEPI->getOperand(0); | ||||
7749 | // Check that GEPIOp is an instruction that's also defined in SrcBlock. | ||||
7750 | if (!isa<Instruction>(GEPIOp)) | ||||
7751 | return false; | ||||
7752 | auto *GEPIOpI = cast<Instruction>(GEPIOp); | ||||
7753 | if (GEPIOpI->getParent() != SrcBlock) | ||||
7754 | return false; | ||||
7755 | // Check that GEP is used outside the block, meaning it's alive on the | ||||
7756 | // IndirectBr edge(s). | ||||
7757 | if (llvm::none_of(GEPI->users(), [&](User *Usr) { | ||||
7758 | if (auto *I = dyn_cast<Instruction>(Usr)) { | ||||
7759 | if (I->getParent() != SrcBlock) { | ||||
7760 | return true; | ||||
7761 | } | ||||
7762 | } | ||||
7763 | return false; | ||||
7764 | })) | ||||
7765 | return false; | ||||
7766 | // The second elements of the GEP chains to be unmerged. | ||||
7767 | std::vector<GetElementPtrInst *> UGEPIs; | ||||
7768 | // Check each user of GEPIOp to check if unmerging would make GEPIOp not alive | ||||
7769 | // on IndirectBr edges. | ||||
7770 | for (User *Usr : GEPIOp->users()) { | ||||
7771 | if (Usr == GEPI) | ||||
7772 | continue; | ||||
7773 | // Check if Usr is an Instruction. If not, give up. | ||||
7774 | if (!isa<Instruction>(Usr)) | ||||
7775 | return false; | ||||
7776 | auto *UI = cast<Instruction>(Usr); | ||||
7777 | // Check if Usr in the same block as GEPIOp, which is fine, skip. | ||||
7778 | if (UI->getParent() == SrcBlock) | ||||
7779 | continue; | ||||
7780 | // Check if Usr is a GEP. If not, give up. | ||||
7781 | if (!isa<GetElementPtrInst>(Usr)) | ||||
7782 | return false; | ||||
7783 | auto *UGEPI = cast<GetElementPtrInst>(Usr); | ||||
7784 | // Check if UGEPI is a simple gep with a single constant index and GEPIOp is | ||||
7785 | // the pointer operand to it. If so, record it in the vector. If not, give | ||||
7786 | // up. | ||||
7787 | if (!GEPSequentialConstIndexed(UGEPI)) | ||||
7788 | return false; | ||||
7789 | if (UGEPI->getOperand(0) != GEPIOp) | ||||
7790 | return false; | ||||
7791 | if (GEPIIdx->getType() != | ||||
7792 | cast<ConstantInt>(UGEPI->getOperand(1))->getType()) | ||||
7793 | return false; | ||||
7794 | ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); | ||||
7795 | if (TTI->getIntImmCost(UGEPIIdx->getValue(), UGEPIIdx->getType(), | ||||
7796 | TargetTransformInfo::TCK_SizeAndLatency) > | ||||
7797 | TargetTransformInfo::TCC_Basic) | ||||
7798 | return false; | ||||
7799 | UGEPIs.push_back(UGEPI); | ||||
7800 | } | ||||
7801 | if (UGEPIs.size() == 0) | ||||
7802 | return false; | ||||
7803 | // Check the materializing cost of (Uidx-Idx). | ||||
7804 | for (GetElementPtrInst *UGEPI : UGEPIs) { | ||||
7805 | ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); | ||||
7806 | APInt NewIdx = UGEPIIdx->getValue() - GEPIIdx->getValue(); | ||||
7807 | InstructionCost ImmCost = TTI->getIntImmCost( | ||||
7808 | NewIdx, GEPIIdx->getType(), TargetTransformInfo::TCK_SizeAndLatency); | ||||
7809 | if (ImmCost > TargetTransformInfo::TCC_Basic) | ||||
7810 | return false; | ||||
7811 | } | ||||
7812 | // Now unmerge between GEPI and UGEPIs. | ||||
7813 | for (GetElementPtrInst *UGEPI : UGEPIs) { | ||||
7814 | UGEPI->setOperand(0, GEPI); | ||||
7815 | ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); | ||||
7816 | Constant *NewUGEPIIdx = ConstantInt::get( | ||||
7817 | GEPIIdx->getType(), UGEPIIdx->getValue() - GEPIIdx->getValue()); | ||||
7818 | UGEPI->setOperand(1, NewUGEPIIdx); | ||||
7819 | // If GEPI is not inbounds but UGEPI is inbounds, change UGEPI to not | ||||
7820 | // inbounds to avoid UB. | ||||
7821 | if (!GEPI->isInBounds()) { | ||||
7822 | UGEPI->setIsInBounds(false); | ||||
7823 | } | ||||
7824 | } | ||||
7825 | // After unmerging, verify that GEPIOp is actually only used in SrcBlock (not | ||||
7826 | // alive on IndirectBr edges). | ||||
7827 | assert(llvm::none_of(GEPIOp->users(),(static_cast <bool> (llvm::none_of(GEPIOp->users(), [ &](User *Usr) { return cast<Instruction>(Usr)->getParent () != SrcBlock; }) && "GEPIOp is used outside SrcBlock" ) ? void (0) : __assert_fail ("llvm::none_of(GEPIOp->users(), [&](User *Usr) { return cast<Instruction>(Usr)->getParent() != SrcBlock; }) && \"GEPIOp is used outside SrcBlock\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7831, __extension__ __PRETTY_FUNCTION__ )) | ||||
7828 | [&](User *Usr) {(static_cast <bool> (llvm::none_of(GEPIOp->users(), [ &](User *Usr) { return cast<Instruction>(Usr)->getParent () != SrcBlock; }) && "GEPIOp is used outside SrcBlock" ) ? void (0) : __assert_fail ("llvm::none_of(GEPIOp->users(), [&](User *Usr) { return cast<Instruction>(Usr)->getParent() != SrcBlock; }) && \"GEPIOp is used outside SrcBlock\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7831, __extension__ __PRETTY_FUNCTION__ )) | ||||
7829 | return cast<Instruction>(Usr)->getParent() != SrcBlock;(static_cast <bool> (llvm::none_of(GEPIOp->users(), [ &](User *Usr) { return cast<Instruction>(Usr)->getParent () != SrcBlock; }) && "GEPIOp is used outside SrcBlock" ) ? void (0) : __assert_fail ("llvm::none_of(GEPIOp->users(), [&](User *Usr) { return cast<Instruction>(Usr)->getParent() != SrcBlock; }) && \"GEPIOp is used outside SrcBlock\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7831, __extension__ __PRETTY_FUNCTION__ )) | ||||
7830 | }) &&(static_cast <bool> (llvm::none_of(GEPIOp->users(), [ &](User *Usr) { return cast<Instruction>(Usr)->getParent () != SrcBlock; }) && "GEPIOp is used outside SrcBlock" ) ? void (0) : __assert_fail ("llvm::none_of(GEPIOp->users(), [&](User *Usr) { return cast<Instruction>(Usr)->getParent() != SrcBlock; }) && \"GEPIOp is used outside SrcBlock\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7831, __extension__ __PRETTY_FUNCTION__ )) | ||||
7831 | "GEPIOp is used outside SrcBlock")(static_cast <bool> (llvm::none_of(GEPIOp->users(), [ &](User *Usr) { return cast<Instruction>(Usr)->getParent () != SrcBlock; }) && "GEPIOp is used outside SrcBlock" ) ? void (0) : __assert_fail ("llvm::none_of(GEPIOp->users(), [&](User *Usr) { return cast<Instruction>(Usr)->getParent() != SrcBlock; }) && \"GEPIOp is used outside SrcBlock\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7831, __extension__ __PRETTY_FUNCTION__ )); | ||||
7832 | return true; | ||||
7833 | } | ||||
7834 | |||||
7835 | static bool optimizeBranch(BranchInst *Branch, const TargetLowering &TLI) { | ||||
7836 | // Try and convert | ||||
7837 | // %c = icmp ult %x, 8 | ||||
7838 | // br %c, bla, blb | ||||
7839 | // %tc = lshr %x, 3 | ||||
7840 | // to | ||||
7841 | // %tc = lshr %x, 3 | ||||
7842 | // %c = icmp eq %tc, 0 | ||||
7843 | // br %c, bla, blb | ||||
7844 | // Creating the cmp to zero can be better for the backend, especially if the | ||||
7845 | // lshr produces flags that can be used automatically. | ||||
7846 | if (!TLI.preferZeroCompareBranch() || !Branch->isConditional()) | ||||
7847 | return false; | ||||
7848 | |||||
7849 | ICmpInst *Cmp = dyn_cast<ICmpInst>(Branch->getCondition()); | ||||
7850 | if (!Cmp || !isa<ConstantInt>(Cmp->getOperand(1)) || !Cmp->hasOneUse()) | ||||
7851 | return false; | ||||
7852 | |||||
7853 | Value *X = Cmp->getOperand(0); | ||||
7854 | APInt CmpC = cast<ConstantInt>(Cmp->getOperand(1))->getValue(); | ||||
7855 | |||||
7856 | for (auto *U : X->users()) { | ||||
7857 | Instruction *UI = dyn_cast<Instruction>(U); | ||||
7858 | // A quick dominance check | ||||
7859 | if (!UI || | ||||
7860 | (UI->getParent() != Branch->getParent() && | ||||
7861 | UI->getParent() != Branch->getSuccessor(0) && | ||||
7862 | UI->getParent() != Branch->getSuccessor(1)) || | ||||
7863 | (UI->getParent() != Branch->getParent() && | ||||
7864 | !UI->getParent()->getSinglePredecessor())) | ||||
7865 | continue; | ||||
7866 | |||||
7867 | if (CmpC.isPowerOf2() && Cmp->getPredicate() == ICmpInst::ICMP_ULT && | ||||
7868 | match(UI, m_Shr(m_Specific(X), m_SpecificInt(CmpC.logBase2())))) { | ||||
7869 | IRBuilder<> Builder(Branch); | ||||
7870 | if (UI->getParent() != Branch->getParent()) | ||||
7871 | UI->moveBefore(Branch); | ||||
7872 | Value *NewCmp = Builder.CreateCmp(ICmpInst::ICMP_EQ, UI, | ||||
7873 | ConstantInt::get(UI->getType(), 0)); | ||||
7874 | LLVM_DEBUG(dbgs() << "Converting " << *Cmp << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Converting " << * Cmp << "\n"; } } while (false); | ||||
7875 | LLVM_DEBUG(dbgs() << " to compare on zero: " << *NewCmp << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << " to compare on zero: " << *NewCmp << "\n"; } } while (false); | ||||
7876 | Cmp->replaceAllUsesWith(NewCmp); | ||||
7877 | return true; | ||||
7878 | } | ||||
7879 | if (Cmp->isEquality() && | ||||
7880 | (match(UI, m_Add(m_Specific(X), m_SpecificInt(-CmpC))) || | ||||
7881 | match(UI, m_Sub(m_Specific(X), m_SpecificInt(CmpC))))) { | ||||
7882 | IRBuilder<> Builder(Branch); | ||||
7883 | if (UI->getParent() != Branch->getParent()) | ||||
7884 | UI->moveBefore(Branch); | ||||
7885 | Value *NewCmp = Builder.CreateCmp(Cmp->getPredicate(), UI, | ||||
7886 | ConstantInt::get(UI->getType(), 0)); | ||||
7887 | LLVM_DEBUG(dbgs() << "Converting " << *Cmp << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Converting " << * Cmp << "\n"; } } while (false); | ||||
7888 | LLVM_DEBUG(dbgs() << " to compare on zero: " << *NewCmp << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << " to compare on zero: " << *NewCmp << "\n"; } } while (false); | ||||
7889 | Cmp->replaceAllUsesWith(NewCmp); | ||||
7890 | return true; | ||||
7891 | } | ||||
7892 | } | ||||
7893 | return false; | ||||
7894 | } | ||||
7895 | |||||
7896 | bool CodeGenPrepare::optimizeInst(Instruction *I, bool &ModifiedDT) { | ||||
7897 | // Bail out if we inserted the instruction to prevent optimizations from | ||||
7898 | // stepping on each other's toes. | ||||
7899 | if (InsertedInsts.count(I)) | ||||
7900 | return false; | ||||
7901 | |||||
7902 | // TODO: Move into the switch on opcode below here. | ||||
7903 | if (PHINode *P = dyn_cast<PHINode>(I)) { | ||||
7904 | // It is possible for very late stage optimizations (such as SimplifyCFG) | ||||
7905 | // to introduce PHI nodes too late to be cleaned up. If we detect such a | ||||
7906 | // trivial PHI, go ahead and zap it here. | ||||
7907 | if (Value *V = simplifyInstruction(P, {*DL, TLInfo})) { | ||||
7908 | LargeOffsetGEPMap.erase(P); | ||||
7909 | P->replaceAllUsesWith(V); | ||||
7910 | P->eraseFromParent(); | ||||
7911 | ++NumPHIsElim; | ||||
7912 | return true; | ||||
7913 | } | ||||
7914 | return false; | ||||
7915 | } | ||||
7916 | |||||
7917 | if (CastInst *CI = dyn_cast<CastInst>(I)) { | ||||
7918 | // If the source of the cast is a constant, then this should have | ||||
7919 | // already been constant folded. The only reason NOT to constant fold | ||||
7920 | // it is if something (e.g. LSR) was careful to place the constant | ||||
7921 | // evaluation in a block other than then one that uses it (e.g. to hoist | ||||
7922 | // the address of globals out of a loop). If this is the case, we don't | ||||
7923 | // want to forward-subst the cast. | ||||
7924 | if (isa<Constant>(CI->getOperand(0))) | ||||
7925 | return false; | ||||
7926 | |||||
7927 | if (OptimizeNoopCopyExpression(CI, *TLI, *DL)) | ||||
7928 | return true; | ||||
7929 | |||||
7930 | if (isa<ZExtInst>(I) || isa<SExtInst>(I)) { | ||||
7931 | /// Sink a zext or sext into its user blocks if the target type doesn't | ||||
7932 | /// fit in one register | ||||
7933 | if (TLI->getTypeAction(CI->getContext(), | ||||
7934 | TLI->getValueType(*DL, CI->getType())) == | ||||
7935 | TargetLowering::TypeExpandInteger) { | ||||
7936 | return SinkCast(CI); | ||||
7937 | } else { | ||||
7938 | bool MadeChange = optimizeExt(I); | ||||
7939 | return MadeChange | optimizeExtUses(I); | ||||
7940 | } | ||||
7941 | } | ||||
7942 | return false; | ||||
7943 | } | ||||
7944 | |||||
7945 | if (auto *Cmp = dyn_cast<CmpInst>(I)) | ||||
7946 | if (optimizeCmp(Cmp, ModifiedDT)) | ||||
7947 | return true; | ||||
7948 | |||||
7949 | if (LoadInst *LI = dyn_cast<LoadInst>(I)) { | ||||
7950 | LI->setMetadata(LLVMContext::MD_invariant_group, nullptr); | ||||
7951 | bool Modified = optimizeLoadExt(LI); | ||||
7952 | unsigned AS = LI->getPointerAddressSpace(); | ||||
7953 | Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS); | ||||
7954 | return Modified; | ||||
7955 | } | ||||
7956 | |||||
7957 | if (StoreInst *SI = dyn_cast<StoreInst>(I)) { | ||||
7958 | if (splitMergedValStore(*SI, *DL, *TLI)) | ||||
7959 | return true; | ||||
7960 | SI->setMetadata(LLVMContext::MD_invariant_group, nullptr); | ||||
7961 | unsigned AS = SI->getPointerAddressSpace(); | ||||
7962 | return optimizeMemoryInst(I, SI->getOperand(1), | ||||
7963 | SI->getOperand(0)->getType(), AS); | ||||
7964 | } | ||||
7965 | |||||
7966 | if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { | ||||
7967 | unsigned AS = RMW->getPointerAddressSpace(); | ||||
7968 | return optimizeMemoryInst(I, RMW->getPointerOperand(), RMW->getType(), AS); | ||||
7969 | } | ||||
7970 | |||||
7971 | if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(I)) { | ||||
7972 | unsigned AS = CmpX->getPointerAddressSpace(); | ||||
7973 | return optimizeMemoryInst(I, CmpX->getPointerOperand(), | ||||
7974 | CmpX->getCompareOperand()->getType(), AS); | ||||
7975 | } | ||||
7976 | |||||
7977 | BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I); | ||||
7978 | |||||
7979 | if (BinOp && BinOp->getOpcode() == Instruction::And && EnableAndCmpSinking && | ||||
7980 | sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts)) | ||||
7981 | return true; | ||||
7982 | |||||
7983 | // TODO: Move this into the switch on opcode - it handles shifts already. | ||||
7984 | if (BinOp && (BinOp->getOpcode() == Instruction::AShr || | ||||
7985 | BinOp->getOpcode() == Instruction::LShr)) { | ||||
7986 | ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1)); | ||||
7987 | if (CI && TLI->hasExtractBitsInsn()) | ||||
7988 | if (OptimizeExtractBits(BinOp, CI, *TLI, *DL)) | ||||
7989 | return true; | ||||
7990 | } | ||||
7991 | |||||
7992 | if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { | ||||
7993 | if (GEPI->hasAllZeroIndices()) { | ||||
7994 | /// The GEP operand must be a pointer, so must its result -> BitCast | ||||
7995 | Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), | ||||
7996 | GEPI->getName(), GEPI); | ||||
7997 | NC->setDebugLoc(GEPI->getDebugLoc()); | ||||
7998 | GEPI->replaceAllUsesWith(NC); | ||||
7999 | GEPI->eraseFromParent(); | ||||
8000 | ++NumGEPsElim; | ||||
8001 | optimizeInst(NC, ModifiedDT); | ||||
8002 | return true; | ||||
8003 | } | ||||
8004 | if (tryUnmergingGEPsAcrossIndirectBr(GEPI, TTI)) { | ||||
8005 | return true; | ||||
8006 | } | ||||
8007 | return false; | ||||
8008 | } | ||||
8009 | |||||
8010 | if (FreezeInst *FI = dyn_cast<FreezeInst>(I)) { | ||||
8011 | // freeze(icmp a, const)) -> icmp (freeze a), const | ||||
8012 | // This helps generate efficient conditional jumps. | ||||
8013 | Instruction *CmpI = nullptr; | ||||
8014 | if (ICmpInst *II = dyn_cast<ICmpInst>(FI->getOperand(0))) | ||||
8015 | CmpI = II; | ||||
8016 | else if (FCmpInst *F = dyn_cast<FCmpInst>(FI->getOperand(0))) | ||||
8017 | CmpI = F->getFastMathFlags().none() ? F : nullptr; | ||||
8018 | |||||
8019 | if (CmpI && CmpI->hasOneUse()) { | ||||
8020 | auto Op0 = CmpI->getOperand(0), Op1 = CmpI->getOperand(1); | ||||
8021 | bool Const0 = isa<ConstantInt>(Op0) || isa<ConstantFP>(Op0) || | ||||
8022 | isa<ConstantPointerNull>(Op0); | ||||
8023 | bool Const1 = isa<ConstantInt>(Op1) || isa<ConstantFP>(Op1) || | ||||
8024 | isa<ConstantPointerNull>(Op1); | ||||
8025 | if (Const0 || Const1) { | ||||
8026 | if (!Const0 || !Const1) { | ||||
8027 | auto *F = new FreezeInst(Const0 ? Op1 : Op0, "", CmpI); | ||||
8028 | F->takeName(FI); | ||||
8029 | CmpI->setOperand(Const0 ? 1 : 0, F); | ||||
8030 | } | ||||
8031 | FI->replaceAllUsesWith(CmpI); | ||||
8032 | FI->eraseFromParent(); | ||||
8033 | return true; | ||||
8034 | } | ||||
8035 | } | ||||
8036 | return false; | ||||
8037 | } | ||||
8038 | |||||
8039 | if (tryToSinkFreeOperands(I)) | ||||
8040 | return true; | ||||
8041 | |||||
8042 | switch (I->getOpcode()) { | ||||
8043 | case Instruction::Shl: | ||||
8044 | case Instruction::LShr: | ||||
8045 | case Instruction::AShr: | ||||
8046 | return optimizeShiftInst(cast<BinaryOperator>(I)); | ||||
8047 | case Instruction::Call: | ||||
8048 | return optimizeCallInst(cast<CallInst>(I), ModifiedDT); | ||||
8049 | case Instruction::Select: | ||||
8050 | return optimizeSelectInst(cast<SelectInst>(I)); | ||||
8051 | case Instruction::ShuffleVector: | ||||
8052 | return optimizeShuffleVectorInst(cast<ShuffleVectorInst>(I)); | ||||
8053 | case Instruction::Switch: | ||||
8054 | return optimizeSwitchInst(cast<SwitchInst>(I)); | ||||
8055 | case Instruction::ExtractElement: | ||||
8056 | return optimizeExtractElementInst(cast<ExtractElementInst>(I)); | ||||
8057 | case Instruction::Br: | ||||
8058 | return optimizeBranch(cast<BranchInst>(I), *TLI); | ||||
8059 | } | ||||
8060 | |||||
8061 | return false; | ||||
8062 | } | ||||
8063 | |||||
8064 | /// Given an OR instruction, check to see if this is a bitreverse | ||||
8065 | /// idiom. If so, insert the new intrinsic and return true. | ||||
8066 | bool CodeGenPrepare::makeBitReverse(Instruction &I) { | ||||
8067 | if (!I.getType()->isIntegerTy() || | ||||
8068 | !TLI->isOperationLegalOrCustom(ISD::BITREVERSE, | ||||
8069 | TLI->getValueType(*DL, I.getType(), true))) | ||||
8070 | return false; | ||||
8071 | |||||
8072 | SmallVector<Instruction *, 4> Insts; | ||||
8073 | if (!recognizeBSwapOrBitReverseIdiom(&I, false, true, Insts)) | ||||
8074 | return false; | ||||
8075 | Instruction *LastInst = Insts.back(); | ||||
8076 | I.replaceAllUsesWith(LastInst); | ||||
8077 | RecursivelyDeleteTriviallyDeadInstructions( | ||||
8078 | &I, TLInfo, nullptr, | ||||
8079 | [&](Value *V) { removeAllAssertingVHReferences(V); }); | ||||
8080 | return true; | ||||
8081 | } | ||||
8082 | |||||
8083 | // In this pass we look for GEP and cast instructions that are used | ||||
8084 | // across basic blocks and rewrite them to improve basic-block-at-a-time | ||||
8085 | // selection. | ||||
8086 | bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, bool &ModifiedDT) { | ||||
8087 | SunkAddrs.clear(); | ||||
8088 | bool MadeChange = false; | ||||
8089 | |||||
8090 | CurInstIterator = BB.begin(); | ||||
8091 | while (CurInstIterator != BB.end()) { | ||||
8092 | MadeChange |= optimizeInst(&*CurInstIterator++, ModifiedDT); | ||||
8093 | if (ModifiedDT) | ||||
8094 | return true; | ||||
8095 | } | ||||
8096 | |||||
8097 | bool MadeBitReverse = true; | ||||
8098 | while (MadeBitReverse) { | ||||
8099 | MadeBitReverse = false; | ||||
8100 | for (auto &I : reverse(BB)) { | ||||
8101 | if (makeBitReverse(I)) { | ||||
8102 | MadeBitReverse = MadeChange = true; | ||||
8103 | break; | ||||
8104 | } | ||||
8105 | } | ||||
8106 | } | ||||
8107 | MadeChange |= dupRetToEnableTailCallOpts(&BB, ModifiedDT); | ||||
8108 | |||||
8109 | return MadeChange; | ||||
8110 | } | ||||
8111 | |||||
8112 | // Some CGP optimizations may move or alter what's computed in a block. Check | ||||
8113 | // whether a dbg.value intrinsic could be pointed at a more appropriate operand. | ||||
8114 | bool CodeGenPrepare::fixupDbgValue(Instruction *I) { | ||||
8115 | assert(isa<DbgValueInst>(I))(static_cast <bool> (isa<DbgValueInst>(I)) ? void (0) : __assert_fail ("isa<DbgValueInst>(I)", "llvm/lib/CodeGen/CodeGenPrepare.cpp" , 8115, __extension__ __PRETTY_FUNCTION__)); | ||||
8116 | DbgValueInst &DVI = *cast<DbgValueInst>(I); | ||||
8117 | |||||
8118 | // Does this dbg.value refer to a sunk address calculation? | ||||
8119 | bool AnyChange = false; | ||||
8120 | SmallDenseSet<Value *> LocationOps(DVI.location_ops().begin(), | ||||
8121 | DVI.location_ops().end()); | ||||
8122 | for (Value *Location : LocationOps) { | ||||
8123 | WeakTrackingVH SunkAddrVH = SunkAddrs[Location]; | ||||
8124 | Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr; | ||||
8125 | if (SunkAddr) { | ||||
8126 | // Point dbg.value at locally computed address, which should give the best | ||||
8127 | // opportunity to be accurately lowered. This update may change the type | ||||
8128 | // of pointer being referred to; however this makes no difference to | ||||
8129 | // debugging information, and we can't generate bitcasts that may affect | ||||
8130 | // codegen. | ||||
8131 | DVI.replaceVariableLocationOp(Location, SunkAddr); | ||||
8132 | AnyChange = true; | ||||
8133 | } | ||||
8134 | } | ||||
8135 | return AnyChange; | ||||
8136 | } | ||||
8137 | |||||
8138 | // A llvm.dbg.value may be using a value before its definition, due to | ||||
8139 | // optimizations in this pass and others. Scan for such dbg.values, and rescue | ||||
8140 | // them by moving the dbg.value to immediately after the value definition. | ||||
8141 | // FIXME: Ideally this should never be necessary, and this has the potential | ||||
8142 | // to re-order dbg.value intrinsics. | ||||
8143 | bool CodeGenPrepare::placeDbgValues(Function &F) { | ||||
8144 | bool MadeChange = false; | ||||
8145 | DominatorTree DT(F); | ||||
8146 | |||||
8147 | for (BasicBlock &BB : F) { | ||||
8148 | for (Instruction &Insn : llvm::make_early_inc_range(BB)) { | ||||
8149 | DbgValueInst *DVI = dyn_cast<DbgValueInst>(&Insn); | ||||
8150 | if (!DVI) | ||||
8151 | continue; | ||||
8152 | |||||
8153 | SmallVector<Instruction *, 4> VIs; | ||||
8154 | for (Value *V : DVI->getValues()) | ||||
8155 | if (Instruction *VI = dyn_cast_or_null<Instruction>(V)) | ||||
8156 | VIs.push_back(VI); | ||||
8157 | |||||
8158 | // This DVI may depend on multiple instructions, complicating any | ||||
8159 | // potential sink. This block takes the defensive approach, opting to | ||||
8160 | // "undef" the DVI if it has more than one instruction and any of them do | ||||
8161 | // not dominate DVI. | ||||
8162 | for (Instruction *VI : VIs) { | ||||
8163 | if (VI->isTerminator()) | ||||
8164 | continue; | ||||
8165 | |||||
8166 | // If VI is a phi in a block with an EHPad terminator, we can't insert | ||||
8167 | // after it. | ||||
8168 | if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad()) | ||||
8169 | continue; | ||||
8170 | |||||
8171 | // If the defining instruction dominates the dbg.value, we do not need | ||||
8172 | // to move the dbg.value. | ||||
8173 | if (DT.dominates(VI, DVI)) | ||||
8174 | continue; | ||||
8175 | |||||
8176 | // If we depend on multiple instructions and any of them doesn't | ||||
8177 | // dominate this DVI, we probably can't salvage it: moving it to | ||||
8178 | // after any of the instructions could cause us to lose the others. | ||||
8179 | if (VIs.size() > 1) { | ||||
8180 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Unable to find valid location for Debug Value, undefing:\n" << *DVI; } } while (false) | ||||
8181 | dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Unable to find valid location for Debug Value, undefing:\n" << *DVI; } } while (false) | ||||
8182 | << "Unable to find valid location for Debug Value, undefing:\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Unable to find valid location for Debug Value, undefing:\n" << *DVI; } } while (false) | ||||
8183 | << *DVI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Unable to find valid location for Debug Value, undefing:\n" << *DVI; } } while (false); | ||||
8184 | DVI->setUndef(); | ||||
8185 | break; | ||||
8186 | } | ||||
8187 | |||||
8188 | LLVM_DEBUG(dbgs() << "Moving Debug Value before :\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI; } } while (false) | ||||
8189 | << *DVI << ' ' << *VI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI; } } while (false); | ||||
8190 | DVI->removeFromParent(); | ||||
8191 | if (isa<PHINode>(VI)) | ||||
8192 | DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt()); | ||||
8193 | else | ||||
8194 | DVI->insertAfter(VI); | ||||
8195 | MadeChange = true; | ||||
8196 | ++NumDbgValueMoved; | ||||
8197 | } | ||||
8198 | } | ||||
8199 | } | ||||
8200 | return MadeChange; | ||||
8201 | } | ||||
8202 | |||||
8203 | // Group scattered pseudo probes in a block to favor SelectionDAG. Scattered | ||||
8204 | // probes can be chained dependencies of other regular DAG nodes and block DAG | ||||
8205 | // combine optimizations. | ||||
8206 | bool CodeGenPrepare::placePseudoProbes(Function &F) { | ||||
8207 | bool MadeChange = false; | ||||
8208 | for (auto &Block : F) { | ||||
8209 | // Move the rest probes to the beginning of the block. | ||||
8210 | auto FirstInst = Block.getFirstInsertionPt(); | ||||
8211 | while (FirstInst != Block.end() && FirstInst->isDebugOrPseudoInst()) | ||||
8212 | ++FirstInst; | ||||
8213 | BasicBlock::iterator I(FirstInst); | ||||
8214 | I++; | ||||
8215 | while (I != Block.end()) { | ||||
8216 | if (auto *II = dyn_cast<PseudoProbeInst>(I++)) { | ||||
8217 | II->moveBefore(&*FirstInst); | ||||
8218 | MadeChange = true; | ||||
8219 | } | ||||
8220 | } | ||||
8221 | } | ||||
8222 | return MadeChange; | ||||
8223 | } | ||||
8224 | |||||
8225 | /// Scale down both weights to fit into uint32_t. | ||||
8226 | static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) { | ||||
8227 | uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse; | ||||
8228 | uint32_t Scale = (NewMax / std::numeric_limits<uint32_t>::max()) + 1; | ||||
8229 | NewTrue = NewTrue / Scale; | ||||
8230 | NewFalse = NewFalse / Scale; | ||||
8231 | } | ||||
8232 | |||||
8233 | /// Some targets prefer to split a conditional branch like: | ||||
8234 | /// \code | ||||
8235 | /// %0 = icmp ne i32 %a, 0 | ||||
8236 | /// %1 = icmp ne i32 %b, 0 | ||||
8237 | /// %or.cond = or i1 %0, %1 | ||||
8238 | /// br i1 %or.cond, label %TrueBB, label %FalseBB | ||||
8239 | /// \endcode | ||||
8240 | /// into multiple branch instructions like: | ||||
8241 | /// \code | ||||
8242 | /// bb1: | ||||
8243 | /// %0 = icmp ne i32 %a, 0 | ||||
8244 | /// br i1 %0, label %TrueBB, label %bb2 | ||||
8245 | /// bb2: | ||||
8246 | /// %1 = icmp ne i32 %b, 0 | ||||
8247 | /// br i1 %1, label %TrueBB, label %FalseBB | ||||
8248 | /// \endcode | ||||
8249 | /// This usually allows instruction selection to do even further optimizations | ||||
8250 | /// and combine the compare with the branch instruction. Currently this is | ||||
8251 | /// applied for targets which have "cheap" jump instructions. | ||||
8252 | /// | ||||
8253 | /// FIXME: Remove the (equivalent?) implementation in SelectionDAG. | ||||
8254 | /// | ||||
8255 | bool CodeGenPrepare::splitBranchCondition(Function &F, bool &ModifiedDT) { | ||||
8256 | if (!TM->Options.EnableFastISel || TLI->isJumpExpensive()) | ||||
8257 | return false; | ||||
8258 | |||||
8259 | bool MadeChange = false; | ||||
8260 | for (auto &BB : F) { | ||||
8261 | // Does this BB end with the following? | ||||
8262 | // %cond1 = icmp|fcmp|binary instruction ... | ||||
8263 | // %cond2 = icmp|fcmp|binary instruction ... | ||||
8264 | // %cond.or = or|and i1 %cond1, cond2 | ||||
8265 | // br i1 %cond.or label %dest1, label %dest2" | ||||
8266 | Instruction *LogicOp; | ||||
8267 | BasicBlock *TBB, *FBB; | ||||
8268 | if (!match(BB.getTerminator(), | ||||
8269 | m_Br(m_OneUse(m_Instruction(LogicOp)), TBB, FBB))) | ||||
8270 | continue; | ||||
8271 | |||||
8272 | auto *Br1 = cast<BranchInst>(BB.getTerminator()); | ||||
8273 | if (Br1->getMetadata(LLVMContext::MD_unpredictable)) | ||||
8274 | continue; | ||||
8275 | |||||
8276 | // The merging of mostly empty BB can cause a degenerate branch. | ||||
8277 | if (TBB == FBB) | ||||
8278 | continue; | ||||
8279 | |||||
8280 | unsigned Opc; | ||||
8281 | Value *Cond1, *Cond2; | ||||
8282 | if (match(LogicOp, | ||||
8283 | m_LogicalAnd(m_OneUse(m_Value(Cond1)), m_OneUse(m_Value(Cond2))))) | ||||
8284 | Opc = Instruction::And; | ||||
8285 | else if (match(LogicOp, m_LogicalOr(m_OneUse(m_Value(Cond1)), | ||||
8286 | m_OneUse(m_Value(Cond2))))) | ||||
8287 | Opc = Instruction::Or; | ||||
8288 | else | ||||
8289 | continue; | ||||
8290 | |||||
8291 | auto IsGoodCond = [](Value *Cond) { | ||||
8292 | return match( | ||||
8293 | Cond, | ||||
8294 | m_CombineOr(m_Cmp(), m_CombineOr(m_LogicalAnd(m_Value(), m_Value()), | ||||
8295 | m_LogicalOr(m_Value(), m_Value())))); | ||||
8296 | }; | ||||
8297 | if (!IsGoodCond(Cond1) || !IsGoodCond(Cond2)) | ||||
8298 | continue; | ||||
8299 | |||||
8300 | LLVM_DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Before branch condition splitting\n" ; BB.dump(); } } while (false); | ||||
8301 | |||||
8302 | // Create a new BB. | ||||
8303 | auto *TmpBB = | ||||
8304 | BasicBlock::Create(BB.getContext(), BB.getName() + ".cond.split", | ||||
8305 | BB.getParent(), BB.getNextNode()); | ||||
8306 | |||||
8307 | // Update original basic block by using the first condition directly by the | ||||
8308 | // branch instruction and removing the no longer needed and/or instruction. | ||||
8309 | Br1->setCondition(Cond1); | ||||
8310 | LogicOp->eraseFromParent(); | ||||
8311 | |||||
8312 | // Depending on the condition we have to either replace the true or the | ||||
8313 | // false successor of the original branch instruction. | ||||
8314 | if (Opc == Instruction::And) | ||||
8315 | Br1->setSuccessor(0, TmpBB); | ||||
8316 | else | ||||
8317 | Br1->setSuccessor(1, TmpBB); | ||||
8318 | |||||
8319 | // Fill in the new basic block. | ||||
8320 | auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB); | ||||
8321 | if (auto *I = dyn_cast<Instruction>(Cond2)) { | ||||
8322 | I->removeFromParent(); | ||||
8323 | I->insertBefore(Br2); | ||||
8324 | } | ||||
8325 | |||||
8326 | // Update PHI nodes in both successors. The original BB needs to be | ||||
8327 | // replaced in one successor's PHI nodes, because the branch comes now from | ||||
8328 | // the newly generated BB (NewBB). In the other successor we need to add one | ||||
8329 | // incoming edge to the PHI nodes, because both branch instructions target | ||||
8330 | // now the same successor. Depending on the original branch condition | ||||
8331 | // (and/or) we have to swap the successors (TrueDest, FalseDest), so that | ||||
8332 | // we perform the correct update for the PHI nodes. | ||||
8333 | // This doesn't change the successor order of the just created branch | ||||
8334 | // instruction (or any other instruction). | ||||
8335 | if (Opc == Instruction::Or) | ||||
8336 | std::swap(TBB, FBB); | ||||
8337 | |||||
8338 | // Replace the old BB with the new BB. | ||||
8339 | TBB->replacePhiUsesWith(&BB, TmpBB); | ||||
8340 | |||||
8341 | // Add another incoming edge form the new BB. | ||||
8342 | for (PHINode &PN : FBB->phis()) { | ||||
8343 | auto *Val = PN.getIncomingValueForBlock(&BB); | ||||
8344 | PN.addIncoming(Val, TmpBB); | ||||
8345 | } | ||||
8346 | |||||
8347 | // Update the branch weights (from SelectionDAGBuilder:: | ||||
8348 | // FindMergedConditions). | ||||
8349 | if (Opc == Instruction::Or) { | ||||
8350 | // Codegen X | Y as: | ||||
8351 | // BB1: | ||||
8352 | // jmp_if_X TBB | ||||
8353 | // jmp TmpBB | ||||
8354 | // TmpBB: | ||||
8355 | // jmp_if_Y TBB | ||||
8356 | // jmp FBB | ||||
8357 | // | ||||
8358 | |||||
8359 | // We have flexibility in setting Prob for BB1 and Prob for NewBB. | ||||
8360 | // The requirement is that | ||||
8361 | // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) | ||||
8362 | // = TrueProb for original BB. | ||||
8363 | // Assuming the original weights are A and B, one choice is to set BB1's | ||||
8364 | // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice | ||||
8365 | // assumes that | ||||
8366 | // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. | ||||
8367 | // Another choice is to assume TrueProb for BB1 equals to TrueProb for | ||||
8368 | // TmpBB, but the math is more complicated. | ||||
8369 | uint64_t TrueWeight, FalseWeight; | ||||
8370 | if (extractBranchWeights(*Br1, TrueWeight, FalseWeight)) { | ||||
8371 | uint64_t NewTrueWeight = TrueWeight; | ||||
8372 | uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight; | ||||
8373 | scaleWeights(NewTrueWeight, NewFalseWeight); | ||||
8374 | Br1->setMetadata(LLVMContext::MD_prof, | ||||
8375 | MDBuilder(Br1->getContext()) | ||||
8376 | .createBranchWeights(TrueWeight, FalseWeight)); | ||||
8377 | |||||
8378 | NewTrueWeight = TrueWeight; | ||||
8379 | NewFalseWeight = 2 * FalseWeight; | ||||
8380 | scaleWeights(NewTrueWeight, NewFalseWeight); | ||||
8381 | Br2->setMetadata(LLVMContext::MD_prof, | ||||
8382 | MDBuilder(Br2->getContext()) | ||||
8383 | .createBranchWeights(TrueWeight, FalseWeight)); | ||||
8384 | } | ||||
8385 | } else { | ||||
8386 | // Codegen X & Y as: | ||||
8387 | // BB1: | ||||
8388 | // jmp_if_X TmpBB | ||||
8389 | // jmp FBB | ||||
8390 | // TmpBB: | ||||
8391 | // jmp_if_Y TBB | ||||
8392 | // jmp FBB | ||||
8393 | // | ||||
8394 | // This requires creation of TmpBB after CurBB. | ||||
8395 | |||||
8396 | // We have flexibility in setting Prob for BB1 and Prob for TmpBB. | ||||
8397 | // The requirement is that | ||||
8398 | // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) | ||||
8399 | // = FalseProb for original BB. | ||||
8400 | // Assuming the original weights are A and B, one choice is to set BB1's | ||||
8401 | // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice | ||||
8402 | // assumes that | ||||
8403 | // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB. | ||||
8404 | uint64_t TrueWeight, FalseWeight; | ||||
8405 | if (extractBranchWeights(*Br1, TrueWeight, FalseWeight)) { | ||||
8406 | uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight; | ||||
8407 | uint64_t NewFalseWeight = FalseWeight; | ||||
8408 | scaleWeights(NewTrueWeight, NewFalseWeight); | ||||
8409 | Br1->setMetadata(LLVMContext::MD_prof, | ||||
8410 | MDBuilder(Br1->getContext()) | ||||
8411 | .createBranchWeights(TrueWeight, FalseWeight)); | ||||
8412 | |||||
8413 | NewTrueWeight = 2 * TrueWeight; | ||||
8414 | NewFalseWeight = FalseWeight; | ||||
8415 | scaleWeights(NewTrueWeight, NewFalseWeight); | ||||
8416 | Br2->setMetadata(LLVMContext::MD_prof, | ||||
8417 | MDBuilder(Br2->getContext()) | ||||
8418 | .createBranchWeights(TrueWeight, FalseWeight)); | ||||
8419 | } | ||||
8420 | } | ||||
8421 | |||||
8422 | ModifiedDT = true; | ||||
8423 | MadeChange = true; | ||||
8424 | |||||
8425 | LLVM_DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "After branch condition splitting\n" ; BB.dump(); TmpBB->dump(); } } while (false) | ||||
8426 | TmpBB->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "After branch condition splitting\n" ; BB.dump(); TmpBB->dump(); } } while (false); | ||||
8427 | } | ||||
8428 | return MadeChange; | ||||
8429 | } |
1 | //===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file exposes the class definitions of all of the subclasses of the |
10 | // Instruction class. This is meant to be an easy way to get access to all |
11 | // instruction subclasses. |
12 | // |
13 | //===----------------------------------------------------------------------===// |
14 | |
15 | #ifndef LLVM_IR_INSTRUCTIONS_H |
16 | #define LLVM_IR_INSTRUCTIONS_H |
17 | |
18 | #include "llvm/ADT/ArrayRef.h" |
19 | #include "llvm/ADT/Bitfields.h" |
20 | #include "llvm/ADT/MapVector.h" |
21 | #include "llvm/ADT/None.h" |
22 | #include "llvm/ADT/STLExtras.h" |
23 | #include "llvm/ADT/SmallVector.h" |
24 | #include "llvm/ADT/Twine.h" |
25 | #include "llvm/ADT/iterator.h" |
26 | #include "llvm/ADT/iterator_range.h" |
27 | #include "llvm/IR/CFG.h" |
28 | #include "llvm/IR/Constant.h" |
29 | #include "llvm/IR/DerivedTypes.h" |
30 | #include "llvm/IR/InstrTypes.h" |
31 | #include "llvm/IR/Instruction.h" |
32 | #include "llvm/IR/OperandTraits.h" |
33 | #include "llvm/IR/Use.h" |
34 | #include "llvm/IR/User.h" |
35 | #include "llvm/Support/AtomicOrdering.h" |
36 | #include "llvm/Support/ErrorHandling.h" |
37 | #include <cassert> |
38 | #include <cstddef> |
39 | #include <cstdint> |
40 | #include <iterator> |
41 | |
42 | namespace llvm { |
43 | |
44 | class APFloat; |
45 | class APInt; |
46 | class BasicBlock; |
47 | class BlockAddress; |
48 | class ConstantInt; |
49 | class DataLayout; |
50 | class StringRef; |
51 | class Type; |
52 | class Value; |
53 | |
54 | //===----------------------------------------------------------------------===// |
55 | // AllocaInst Class |
56 | //===----------------------------------------------------------------------===// |
57 | |
58 | /// an instruction to allocate memory on the stack |
59 | class AllocaInst : public UnaryInstruction { |
60 | Type *AllocatedType; |
61 | |
62 | using AlignmentField = AlignmentBitfieldElementT<0>; |
63 | using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>; |
64 | using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>; |
65 | static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField, |
66 | SwiftErrorField>(), |
67 | "Bitfields must be contiguous"); |
68 | |
69 | protected: |
70 | // Note: Instruction needs to be a friend here to call cloneImpl. |
71 | friend class Instruction; |
72 | |
73 | AllocaInst *cloneImpl() const; |
74 | |
75 | public: |
76 | explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, |
77 | const Twine &Name, Instruction *InsertBefore); |
78 | AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, |
79 | const Twine &Name, BasicBlock *InsertAtEnd); |
80 | |
81 | AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, |
82 | Instruction *InsertBefore); |
83 | AllocaInst(Type *Ty, unsigned AddrSpace, |
84 | const Twine &Name, BasicBlock *InsertAtEnd); |
85 | |
86 | AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, |
87 | const Twine &Name = "", Instruction *InsertBefore = nullptr); |
88 | AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, |
89 | const Twine &Name, BasicBlock *InsertAtEnd); |
90 | |
91 | /// Return true if there is an allocation size parameter to the allocation |
92 | /// instruction that is not 1. |
93 | bool isArrayAllocation() const; |
94 | |
95 | /// Get the number of elements allocated. For a simple allocation of a single |
96 | /// element, this will return a constant 1 value. |
97 | const Value *getArraySize() const { return getOperand(0); } |
98 | Value *getArraySize() { return getOperand(0); } |
99 | |
100 | /// Overload to return most specific pointer type. |
101 | PointerType *getType() const { |
102 | return cast<PointerType>(Instruction::getType()); |
103 | } |
104 | |
105 | /// Return the address space for the allocation. |
106 | unsigned getAddressSpace() const { |
107 | return getType()->getAddressSpace(); |
108 | } |
109 | |
110 | /// Get allocation size in bits. Returns None if size can't be determined, |
111 | /// e.g. in case of a VLA. |
112 | Optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const; |
113 | |
114 | /// Return the type that is being allocated by the instruction. |
115 | Type *getAllocatedType() const { return AllocatedType; } |
116 | /// for use only in special circumstances that need to generically |
117 | /// transform a whole instruction (eg: IR linking and vectorization). |
118 | void setAllocatedType(Type *Ty) { AllocatedType = Ty; } |
119 | |
120 | /// Return the alignment of the memory that is being allocated by the |
121 | /// instruction. |
122 | Align getAlign() const { |
123 | return Align(1ULL << getSubclassData<AlignmentField>()); |
124 | } |
125 | |
126 | void setAlignment(Align Align) { |
127 | setSubclassData<AlignmentField>(Log2(Align)); |
128 | } |
129 | |
130 | /// Return true if this alloca is in the entry block of the function and is a |
131 | /// constant size. If so, the code generator will fold it into the |
132 | /// prolog/epilog code, so it is basically free. |
133 | bool isStaticAlloca() const; |
134 | |
135 | /// Return true if this alloca is used as an inalloca argument to a call. Such |
136 | /// allocas are never considered static even if they are in the entry block. |
137 | bool isUsedWithInAlloca() const { |
138 | return getSubclassData<UsedWithInAllocaField>(); |
139 | } |
140 | |
141 | /// Specify whether this alloca is used to represent the arguments to a call. |
142 | void setUsedWithInAlloca(bool V) { |
143 | setSubclassData<UsedWithInAllocaField>(V); |
144 | } |
145 | |
146 | /// Return true if this alloca is used as a swifterror argument to a call. |
147 | bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); } |
148 | /// Specify whether this alloca is used to represent a swifterror. |
149 | void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); } |
150 | |
151 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
152 | static bool classof(const Instruction *I) { |
153 | return (I->getOpcode() == Instruction::Alloca); |
154 | } |
155 | static bool classof(const Value *V) { |
156 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
157 | } |
158 | |
159 | private: |
160 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
161 | // method so that subclasses cannot accidentally use it. |
162 | template <typename Bitfield> |
163 | void setSubclassData(typename Bitfield::Type Value) { |
164 | Instruction::setSubclassData<Bitfield>(Value); |
165 | } |
166 | }; |
167 | |
168 | //===----------------------------------------------------------------------===// |
169 | // LoadInst Class |
170 | //===----------------------------------------------------------------------===// |
171 | |
172 | /// An instruction for reading from memory. This uses the SubclassData field in |
173 | /// Value to store whether or not the load is volatile. |
174 | class LoadInst : public UnaryInstruction { |
175 | using VolatileField = BoolBitfieldElementT<0>; |
176 | using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; |
177 | using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; |
178 | static_assert( |
179 | Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), |
180 | "Bitfields must be contiguous"); |
181 | |
182 | void AssertOK(); |
183 | |
184 | protected: |
185 | // Note: Instruction needs to be a friend here to call cloneImpl. |
186 | friend class Instruction; |
187 | |
188 | LoadInst *cloneImpl() const; |
189 | |
190 | public: |
191 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, |
192 | Instruction *InsertBefore); |
193 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd); |
194 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
195 | Instruction *InsertBefore); |
196 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
197 | BasicBlock *InsertAtEnd); |
198 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
199 | Align Align, Instruction *InsertBefore = nullptr); |
200 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
201 | Align Align, BasicBlock *InsertAtEnd); |
202 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
203 | Align Align, AtomicOrdering Order, |
204 | SyncScope::ID SSID = SyncScope::System, |
205 | Instruction *InsertBefore = nullptr); |
206 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
207 | Align Align, AtomicOrdering Order, SyncScope::ID SSID, |
208 | BasicBlock *InsertAtEnd); |
209 | |
210 | /// Return true if this is a load from a volatile memory location. |
211 | bool isVolatile() const { return getSubclassData<VolatileField>(); } |
212 | |
213 | /// Specify whether this is a volatile load or not. |
214 | void setVolatile(bool V) { setSubclassData<VolatileField>(V); } |
215 | |
216 | /// Return the alignment of the access that is being performed. |
217 | Align getAlign() const { |
218 | return Align(1ULL << (getSubclassData<AlignmentField>())); |
219 | } |
220 | |
221 | void setAlignment(Align Align) { |
222 | setSubclassData<AlignmentField>(Log2(Align)); |
223 | } |
224 | |
225 | /// Returns the ordering constraint of this load instruction. |
226 | AtomicOrdering getOrdering() const { |
227 | return getSubclassData<OrderingField>(); |
228 | } |
229 | /// Sets the ordering constraint of this load instruction. May not be Release |
230 | /// or AcquireRelease. |
231 | void setOrdering(AtomicOrdering Ordering) { |
232 | setSubclassData<OrderingField>(Ordering); |
233 | } |
234 | |
235 | /// Returns the synchronization scope ID of this load instruction. |
236 | SyncScope::ID getSyncScopeID() const { |
237 | return SSID; |
238 | } |
239 | |
240 | /// Sets the synchronization scope ID of this load instruction. |
241 | void setSyncScopeID(SyncScope::ID SSID) { |
242 | this->SSID = SSID; |
243 | } |
244 | |
245 | /// Sets the ordering constraint and the synchronization scope ID of this load |
246 | /// instruction. |
247 | void setAtomic(AtomicOrdering Ordering, |
248 | SyncScope::ID SSID = SyncScope::System) { |
249 | setOrdering(Ordering); |
250 | setSyncScopeID(SSID); |
251 | } |
252 | |
253 | bool isSimple() const { return !isAtomic() && !isVolatile(); } |
254 | |
255 | bool isUnordered() const { |
256 | return (getOrdering() == AtomicOrdering::NotAtomic || |
257 | getOrdering() == AtomicOrdering::Unordered) && |
258 | !isVolatile(); |
259 | } |
260 | |
261 | Value *getPointerOperand() { return getOperand(0); } |
262 | const Value *getPointerOperand() const { return getOperand(0); } |
263 | static unsigned getPointerOperandIndex() { return 0U; } |
264 | Type *getPointerOperandType() const { return getPointerOperand()->getType(); } |
265 | |
266 | /// Returns the address space of the pointer operand. |
267 | unsigned getPointerAddressSpace() const { |
268 | return getPointerOperandType()->getPointerAddressSpace(); |
269 | } |
270 | |
271 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
272 | static bool classof(const Instruction *I) { |
273 | return I->getOpcode() == Instruction::Load; |
274 | } |
275 | static bool classof(const Value *V) { |
276 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
277 | } |
278 | |
279 | private: |
280 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
281 | // method so that subclasses cannot accidentally use it. |
282 | template <typename Bitfield> |
283 | void setSubclassData(typename Bitfield::Type Value) { |
284 | Instruction::setSubclassData<Bitfield>(Value); |
285 | } |
286 | |
287 | /// The synchronization scope ID of this load instruction. Not quite enough |
288 | /// room in SubClassData for everything, so synchronization scope ID gets its |
289 | /// own field. |
290 | SyncScope::ID SSID; |
291 | }; |
292 | |
293 | //===----------------------------------------------------------------------===// |
294 | // StoreInst Class |
295 | //===----------------------------------------------------------------------===// |
296 | |
297 | /// An instruction for storing to memory. |
298 | class StoreInst : public Instruction { |
299 | using VolatileField = BoolBitfieldElementT<0>; |
300 | using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; |
301 | using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; |
302 | static_assert( |
303 | Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), |
304 | "Bitfields must be contiguous"); |
305 | |
306 | void AssertOK(); |
307 | |
308 | protected: |
309 | // Note: Instruction needs to be a friend here to call cloneImpl. |
310 | friend class Instruction; |
311 | |
312 | StoreInst *cloneImpl() const; |
313 | |
314 | public: |
315 | StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore); |
316 | StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd); |
317 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore); |
318 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd); |
319 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, |
320 | Instruction *InsertBefore = nullptr); |
321 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, |
322 | BasicBlock *InsertAtEnd); |
323 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, |
324 | AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System, |
325 | Instruction *InsertBefore = nullptr); |
326 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, |
327 | AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd); |
328 | |
329 | // allocate space for exactly two operands |
330 | void *operator new(size_t S) { return User::operator new(S, 2); } |
331 | void operator delete(void *Ptr) { User::operator delete(Ptr); } |
332 | |
333 | /// Return true if this is a store to a volatile memory location. |
334 | bool isVolatile() const { return getSubclassData<VolatileField>(); } |
335 | |
336 | /// Specify whether this is a volatile store or not. |
337 | void setVolatile(bool V) { setSubclassData<VolatileField>(V); } |
338 | |
339 | /// Transparently provide more efficient getOperand methods. |
340 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
341 | |
342 | Align getAlign() const { |
343 | return Align(1ULL << (getSubclassData<AlignmentField>())); |
344 | } |
345 | |
346 | void setAlignment(Align Align) { |
347 | setSubclassData<AlignmentField>(Log2(Align)); |
348 | } |
349 | |
350 | /// Returns the ordering constraint of this store instruction. |
351 | AtomicOrdering getOrdering() const { |
352 | return getSubclassData<OrderingField>(); |
353 | } |
354 | |
355 | /// Sets the ordering constraint of this store instruction. May not be |
356 | /// Acquire or AcquireRelease. |
357 | void setOrdering(AtomicOrdering Ordering) { |
358 | setSubclassData<OrderingField>(Ordering); |
359 | } |
360 | |
361 | /// Returns the synchronization scope ID of this store instruction. |
362 | SyncScope::ID getSyncScopeID() const { |
363 | return SSID; |
364 | } |
365 | |
366 | /// Sets the synchronization scope ID of this store instruction. |
367 | void setSyncScopeID(SyncScope::ID SSID) { |
368 | this->SSID = SSID; |
369 | } |
370 | |
371 | /// Sets the ordering constraint and the synchronization scope ID of this |
372 | /// store instruction. |
373 | void setAtomic(AtomicOrdering Ordering, |
374 | SyncScope::ID SSID = SyncScope::System) { |
375 | setOrdering(Ordering); |
376 | setSyncScopeID(SSID); |
377 | } |
378 | |
379 | bool isSimple() const { return !isAtomic() && !isVolatile(); } |
380 | |
381 | bool isUnordered() const { |
382 | return (getOrdering() == AtomicOrdering::NotAtomic || |
383 | getOrdering() == AtomicOrdering::Unordered) && |
384 | !isVolatile(); |
385 | } |
386 | |
387 | Value *getValueOperand() { return getOperand(0); } |
388 | const Value *getValueOperand() const { return getOperand(0); } |
389 | |
390 | Value *getPointerOperand() { return getOperand(1); } |
391 | const Value *getPointerOperand() const { return getOperand(1); } |
392 | static unsigned getPointerOperandIndex() { return 1U; } |
393 | Type *getPointerOperandType() const { return getPointerOperand()->getType(); } |
394 | |
395 | /// Returns the address space of the pointer operand. |
396 | unsigned getPointerAddressSpace() const { |
397 | return getPointerOperandType()->getPointerAddressSpace(); |
398 | } |
399 | |
400 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
401 | static bool classof(const Instruction *I) { |
402 | return I->getOpcode() == Instruction::Store; |
403 | } |
404 | static bool classof(const Value *V) { |
405 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
406 | } |
407 | |
408 | private: |
409 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
410 | // method so that subclasses cannot accidentally use it. |
411 | template <typename Bitfield> |
412 | void setSubclassData(typename Bitfield::Type Value) { |
413 | Instruction::setSubclassData<Bitfield>(Value); |
414 | } |
415 | |
416 | /// The synchronization scope ID of this store instruction. Not quite enough |
417 | /// room in SubClassData for everything, so synchronization scope ID gets its |
418 | /// own field. |
419 | SyncScope::ID SSID; |
420 | }; |
421 | |
422 | template <> |
423 | struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> { |
424 | }; |
425 | |
426 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits <StoreInst>::op_begin(this); } StoreInst::const_op_iterator StoreInst::op_begin() const { return OperandTraits<StoreInst >::op_begin(const_cast<StoreInst*>(this)); } StoreInst ::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst >::op_end(this); } StoreInst::const_op_iterator StoreInst:: op_end() const { return OperandTraits<StoreInst>::op_end (const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand (unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits<StoreInst>::operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 426, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<StoreInst >::op_begin(const_cast<StoreInst*>(this))[i_nocapture ].get()); } void StoreInst::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<StoreInst>::operands(this) && "setOperand() out of range!" ) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 426, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<StoreInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned StoreInst::getNumOperands() const { return OperandTraits<StoreInst>::operands(this); } template <int Idx_nocapture> Use &StoreInst::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &StoreInst::Op() const { return this->OpFrom <Idx_nocapture>(this); } |
427 | |
428 | //===----------------------------------------------------------------------===// |
429 | // FenceInst Class |
430 | //===----------------------------------------------------------------------===// |
431 | |
432 | /// An instruction for ordering other memory operations. |
433 | class FenceInst : public Instruction { |
434 | using OrderingField = AtomicOrderingBitfieldElementT<0>; |
435 | |
436 | void Init(AtomicOrdering Ordering, SyncScope::ID SSID); |
437 | |
438 | protected: |
439 | // Note: Instruction needs to be a friend here to call cloneImpl. |
440 | friend class Instruction; |
441 | |
442 | FenceInst *cloneImpl() const; |
443 | |
444 | public: |
445 | // Ordering may only be Acquire, Release, AcquireRelease, or |
446 | // SequentiallyConsistent. |
447 | FenceInst(LLVMContext &C, AtomicOrdering Ordering, |
448 | SyncScope::ID SSID = SyncScope::System, |
449 | Instruction *InsertBefore = nullptr); |
450 | FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID, |
451 | BasicBlock *InsertAtEnd); |
452 | |
453 | // allocate space for exactly zero operands |
454 | void *operator new(size_t S) { return User::operator new(S, 0); } |
455 | void operator delete(void *Ptr) { User::operator delete(Ptr); } |
456 | |
457 | /// Returns the ordering constraint of this fence instruction. |
458 | AtomicOrdering getOrdering() const { |
459 | return getSubclassData<OrderingField>(); |
460 | } |
461 | |
462 | /// Sets the ordering constraint of this fence instruction. May only be |
463 | /// Acquire, Release, AcquireRelease, or SequentiallyConsistent. |
464 | void setOrdering(AtomicOrdering Ordering) { |
465 | setSubclassData<OrderingField>(Ordering); |
466 | } |
467 | |
468 | /// Returns the synchronization scope ID of this fence instruction. |
469 | SyncScope::ID getSyncScopeID() const { |
470 | return SSID; |
471 | } |
472 | |
473 | /// Sets the synchronization scope ID of this fence instruction. |
474 | void setSyncScopeID(SyncScope::ID SSID) { |
475 | this->SSID = SSID; |
476 | } |
477 | |
478 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
479 | static bool classof(const Instruction *I) { |
480 | return I->getOpcode() == Instruction::Fence; |
481 | } |
482 | static bool classof(const Value *V) { |
483 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
484 | } |
485 | |
486 | private: |
487 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
488 | // method so that subclasses cannot accidentally use it. |
489 | template <typename Bitfield> |
490 | void setSubclassData(typename Bitfield::Type Value) { |
491 | Instruction::setSubclassData<Bitfield>(Value); |
492 | } |
493 | |
494 | /// The synchronization scope ID of this fence instruction. Not quite enough |
495 | /// room in SubClassData for everything, so synchronization scope ID gets its |
496 | /// own field. |
497 | SyncScope::ID SSID; |
498 | }; |
499 | |
500 | //===----------------------------------------------------------------------===// |
501 | // AtomicCmpXchgInst Class |
502 | //===----------------------------------------------------------------------===// |
503 | |
504 | /// An instruction that atomically checks whether a |
505 | /// specified value is in a memory location, and, if it is, stores a new value |
506 | /// there. The value returned by this instruction is a pair containing the |
507 | /// original value as first element, and an i1 indicating success (true) or |
508 | /// failure (false) as second element. |
509 | /// |
510 | class AtomicCmpXchgInst : public Instruction { |
511 | void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align, |
512 | AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, |
513 | SyncScope::ID SSID); |
514 | |
515 | template <unsigned Offset> |
516 | using AtomicOrderingBitfieldElement = |
517 | typename Bitfield::Element<AtomicOrdering, Offset, 3, |
518 | AtomicOrdering::LAST>; |
519 | |
520 | protected: |
521 | // Note: Instruction needs to be a friend here to call cloneImpl. |
522 | friend class Instruction; |
523 | |
524 | AtomicCmpXchgInst *cloneImpl() const; |
525 | |
526 | public: |
527 | AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, |
528 | AtomicOrdering SuccessOrdering, |
529 | AtomicOrdering FailureOrdering, SyncScope::ID SSID, |
530 | Instruction *InsertBefore = nullptr); |
531 | AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, |
532 | AtomicOrdering SuccessOrdering, |
533 | AtomicOrdering FailureOrdering, SyncScope::ID SSID, |
534 | BasicBlock *InsertAtEnd); |
535 | |
536 | // allocate space for exactly three operands |
537 | void *operator new(size_t S) { return User::operator new(S, 3); } |
538 | void operator delete(void *Ptr) { User::operator delete(Ptr); } |
539 | |
540 | using VolatileField = BoolBitfieldElementT<0>; |
541 | using WeakField = BoolBitfieldElementT<VolatileField::NextBit>; |
542 | using SuccessOrderingField = |
543 | AtomicOrderingBitfieldElementT<WeakField::NextBit>; |
544 | using FailureOrderingField = |
545 | AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>; |
546 | using AlignmentField = |
547 | AlignmentBitfieldElementT<FailureOrderingField::NextBit>; |
548 | static_assert( |
549 | Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField, |
550 | FailureOrderingField, AlignmentField>(), |
551 | "Bitfields must be contiguous"); |
552 | |
553 | /// Return the alignment of the memory that is being allocated by the |
554 | /// instruction. |
555 | Align getAlign() const { |
556 | return Align(1ULL << getSubclassData<AlignmentField>()); |
557 | } |
558 | |
559 | void setAlignment(Align Align) { |
560 | setSubclassData<AlignmentField>(Log2(Align)); |
561 | } |
562 | |
563 | /// Return true if this is a cmpxchg from a volatile memory |
564 | /// location. |
565 | /// |
566 | bool isVolatile() const { return getSubclassData<VolatileField>(); } |
567 | |
568 | /// Specify whether this is a volatile cmpxchg. |
569 | /// |
570 | void setVolatile(bool V) { setSubclassData<VolatileField>(V); } |
571 | |
572 | /// Return true if this cmpxchg may spuriously fail. |
573 | bool isWeak() const { return getSubclassData<WeakField>(); } |
574 | |
575 | void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); } |
576 | |
577 | /// Transparently provide more efficient getOperand methods. |
578 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
579 | |
580 | static bool isValidSuccessOrdering(AtomicOrdering Ordering) { |
581 | return Ordering != AtomicOrdering::NotAtomic && |
582 | Ordering != AtomicOrdering::Unordered; |
583 | } |
584 | |
585 | static bool isValidFailureOrdering(AtomicOrdering Ordering) { |
586 | return Ordering != AtomicOrdering::NotAtomic && |
587 | Ordering != AtomicOrdering::Unordered && |
588 | Ordering != AtomicOrdering::AcquireRelease && |
589 | Ordering != AtomicOrdering::Release; |
590 | } |
591 | |
592 | /// Returns the success ordering constraint of this cmpxchg instruction. |
593 | AtomicOrdering getSuccessOrdering() const { |
594 | return getSubclassData<SuccessOrderingField>(); |
595 | } |
596 | |
597 | /// Sets the success ordering constraint of this cmpxchg instruction. |
598 | void setSuccessOrdering(AtomicOrdering Ordering) { |
599 | assert(isValidSuccessOrdering(Ordering) &&(static_cast <bool> (isValidSuccessOrdering(Ordering) && "invalid CmpXchg success ordering") ? void (0) : __assert_fail ("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\"" , "llvm/include/llvm/IR/Instructions.h", 600, __extension__ __PRETTY_FUNCTION__ )) |
600 | "invalid CmpXchg success ordering")(static_cast <bool> (isValidSuccessOrdering(Ordering) && "invalid CmpXchg success ordering") ? void (0) : __assert_fail ("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\"" , "llvm/include/llvm/IR/Instructions.h", 600, __extension__ __PRETTY_FUNCTION__ )); |
601 | setSubclassData<SuccessOrderingField>(Ordering); |
602 | } |
603 | |
604 | /// Returns the failure ordering constraint of this cmpxchg instruction. |
605 | AtomicOrdering getFailureOrdering() const { |
606 | return getSubclassData<FailureOrderingField>(); |
607 | } |
608 | |
609 | /// Sets the failure ordering constraint of this cmpxchg instruction. |
610 | void setFailureOrdering(AtomicOrdering Ordering) { |
611 | assert(isValidFailureOrdering(Ordering) &&(static_cast <bool> (isValidFailureOrdering(Ordering) && "invalid CmpXchg failure ordering") ? void (0) : __assert_fail ("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\"" , "llvm/include/llvm/IR/Instructions.h", 612, __extension__ __PRETTY_FUNCTION__ )) |
612 | "invalid CmpXchg failure ordering")(static_cast <bool> (isValidFailureOrdering(Ordering) && "invalid CmpXchg failure ordering") ? void (0) : __assert_fail ("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\"" , "llvm/include/llvm/IR/Instructions.h", 612, __extension__ __PRETTY_FUNCTION__ )); |
613 | setSubclassData<FailureOrderingField>(Ordering); |
614 | } |
615 | |
616 | /// Returns a single ordering which is at least as strong as both the |
617 | /// success and failure orderings for this cmpxchg. |
618 | AtomicOrdering getMergedOrdering() const { |
619 | if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent) |
620 | return AtomicOrdering::SequentiallyConsistent; |
621 | if (getFailureOrdering() == AtomicOrdering::Acquire) { |
622 | if (getSuccessOrdering() == AtomicOrdering::Monotonic) |
623 | return AtomicOrdering::Acquire; |
624 | if (getSuccessOrdering() == AtomicOrdering::Release) |
625 | return AtomicOrdering::AcquireRelease; |
626 | } |
627 | return getSuccessOrdering(); |
628 | } |
629 | |
630 | /// Returns the synchronization scope ID of this cmpxchg instruction. |
631 | SyncScope::ID getSyncScopeID() const { |
632 | return SSID; |
633 | } |
634 | |
635 | /// Sets the synchronization scope ID of this cmpxchg instruction. |
636 | void setSyncScopeID(SyncScope::ID SSID) { |
637 | this->SSID = SSID; |
638 | } |
639 | |
640 | Value *getPointerOperand() { return getOperand(0); } |
641 | const Value *getPointerOperand() const { return getOperand(0); } |
642 | static unsigned getPointerOperandIndex() { return 0U; } |
643 | |
644 | Value *getCompareOperand() { return getOperand(1); } |
645 | const Value *getCompareOperand() const { return getOperand(1); } |
646 | |
647 | Value *getNewValOperand() { return getOperand(2); } |
648 | const Value *getNewValOperand() const { return getOperand(2); } |
649 | |
650 | /// Returns the address space of the pointer operand. |
651 | unsigned getPointerAddressSpace() const { |
652 | return getPointerOperand()->getType()->getPointerAddressSpace(); |
653 | } |
654 | |
655 | /// Returns the strongest permitted ordering on failure, given the |
656 | /// desired ordering on success. |
657 | /// |
658 | /// If the comparison in a cmpxchg operation fails, there is no atomic store |
659 | /// so release semantics cannot be provided. So this function drops explicit |
660 | /// Release requests from the AtomicOrdering. A SequentiallyConsistent |
661 | /// operation would remain SequentiallyConsistent. |
662 | static AtomicOrdering |
663 | getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) { |
664 | switch (SuccessOrdering) { |
665 | default: |
666 | llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering" , "llvm/include/llvm/IR/Instructions.h", 666); |
667 | case AtomicOrdering::Release: |
668 | case AtomicOrdering::Monotonic: |
669 | return AtomicOrdering::Monotonic; |
670 | case AtomicOrdering::AcquireRelease: |
671 | case AtomicOrdering::Acquire: |
672 | return AtomicOrdering::Acquire; |
673 | case AtomicOrdering::SequentiallyConsistent: |
674 | return AtomicOrdering::SequentiallyConsistent; |
675 | } |
676 | } |
677 | |
678 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
679 | static bool classof(const Instruction *I) { |
680 | return I->getOpcode() == Instruction::AtomicCmpXchg; |
681 | } |
682 | static bool classof(const Value *V) { |
683 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
684 | } |
685 | |
686 | private: |
687 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
688 | // method so that subclasses cannot accidentally use it. |
689 | template <typename Bitfield> |
690 | void setSubclassData(typename Bitfield::Type Value) { |
691 | Instruction::setSubclassData<Bitfield>(Value); |
692 | } |
693 | |
694 | /// The synchronization scope ID of this cmpxchg instruction. Not quite |
695 | /// enough room in SubClassData for everything, so synchronization scope ID |
696 | /// gets its own field. |
697 | SyncScope::ID SSID; |
698 | }; |
699 | |
700 | template <> |
701 | struct OperandTraits<AtomicCmpXchgInst> : |
702 | public FixedNumOperandTraits<AtomicCmpXchgInst, 3> { |
703 | }; |
704 | |
705 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() { return OperandTraits<AtomicCmpXchgInst>::op_begin(this ); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst:: op_begin() const { return OperandTraits<AtomicCmpXchgInst> ::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst ::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits <AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst:: const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits <AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst *>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 705, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<AtomicCmpXchgInst >::op_begin(const_cast<AtomicCmpXchgInst*>(this))[i_nocapture ].get()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 705, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<AtomicCmpXchgInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned AtomicCmpXchgInst::getNumOperands () const { return OperandTraits<AtomicCmpXchgInst>::operands (this); } template <int Idx_nocapture> Use &AtomicCmpXchgInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &AtomicCmpXchgInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } |
706 | |
707 | //===----------------------------------------------------------------------===// |
708 | // AtomicRMWInst Class |
709 | //===----------------------------------------------------------------------===// |
710 | |
711 | /// an instruction that atomically reads a memory location, |
712 | /// combines it with another value, and then stores the result back. Returns |
713 | /// the old value. |
714 | /// |
715 | class AtomicRMWInst : public Instruction { |
716 | protected: |
717 | // Note: Instruction needs to be a friend here to call cloneImpl. |
718 | friend class Instruction; |
719 | |
720 | AtomicRMWInst *cloneImpl() const; |
721 | |
722 | public: |
723 | /// This enumeration lists the possible modifications atomicrmw can make. In |
724 | /// the descriptions, 'p' is the pointer to the instruction's memory location, |
725 | /// 'old' is the initial value of *p, and 'v' is the other value passed to the |
726 | /// instruction. These instructions always return 'old'. |
727 | enum BinOp : unsigned { |
728 | /// *p = v |
729 | Xchg, |
730 | /// *p = old + v |
731 | Add, |
732 | /// *p = old - v |
733 | Sub, |
734 | /// *p = old & v |
735 | And, |
736 | /// *p = ~(old & v) |
737 | Nand, |
738 | /// *p = old | v |
739 | Or, |
740 | /// *p = old ^ v |
741 | Xor, |
742 | /// *p = old >signed v ? old : v |
743 | Max, |
744 | /// *p = old <signed v ? old : v |
745 | Min, |
746 | /// *p = old >unsigned v ? old : v |
747 | UMax, |
748 | /// *p = old <unsigned v ? old : v |
749 | UMin, |
750 | |
751 | /// *p = old + v |
752 | FAdd, |
753 | |
754 | /// *p = old - v |
755 | FSub, |
756 | |
757 | /// *p = maxnum(old, v) |
758 | /// \p maxnum matches the behavior of \p llvm.maxnum.*. |
759 | FMax, |
760 | |
761 | /// *p = minnum(old, v) |
762 | /// \p minnum matches the behavior of \p llvm.minnum.*. |
763 | FMin, |
764 | |
765 | FIRST_BINOP = Xchg, |
766 | LAST_BINOP = FMin, |
767 | BAD_BINOP |
768 | }; |
769 | |
770 | private: |
771 | template <unsigned Offset> |
772 | using AtomicOrderingBitfieldElement = |
773 | typename Bitfield::Element<AtomicOrdering, Offset, 3, |
774 | AtomicOrdering::LAST>; |
775 | |
776 | template <unsigned Offset> |
777 | using BinOpBitfieldElement = |
778 | typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>; |
779 | |
780 | public: |
781 | AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, |
782 | AtomicOrdering Ordering, SyncScope::ID SSID, |
783 | Instruction *InsertBefore = nullptr); |
784 | AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, |
785 | AtomicOrdering Ordering, SyncScope::ID SSID, |
786 | BasicBlock *InsertAtEnd); |
787 | |
788 | // allocate space for exactly two operands |
789 | void *operator new(size_t S) { return User::operator new(S, 2); } |
790 | void operator delete(void *Ptr) { User::operator delete(Ptr); } |
791 | |
792 | using VolatileField = BoolBitfieldElementT<0>; |
793 | using AtomicOrderingField = |
794 | AtomicOrderingBitfieldElementT<VolatileField::NextBit>; |
795 | using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>; |
796 | using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>; |
797 | static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField, |
798 | OperationField, AlignmentField>(), |
799 | "Bitfields must be contiguous"); |
800 | |
801 | BinOp getOperation() const { return getSubclassData<OperationField>(); } |
802 | |
803 | static StringRef getOperationName(BinOp Op); |
804 | |
805 | static bool isFPOperation(BinOp Op) { |
806 | switch (Op) { |
807 | case AtomicRMWInst::FAdd: |
808 | case AtomicRMWInst::FSub: |
809 | case AtomicRMWInst::FMax: |
810 | case AtomicRMWInst::FMin: |
811 | return true; |
812 | default: |
813 | return false; |
814 | } |
815 | } |
816 | |
817 | void setOperation(BinOp Operation) { |
818 | setSubclassData<OperationField>(Operation); |
819 | } |
820 | |
821 | /// Return the alignment of the memory that is being allocated by the |
822 | /// instruction. |
823 | Align getAlign() const { |
824 | return Align(1ULL << getSubclassData<AlignmentField>()); |
825 | } |
826 | |
827 | void setAlignment(Align Align) { |
828 | setSubclassData<AlignmentField>(Log2(Align)); |
829 | } |
830 | |
831 | /// Return true if this is a RMW on a volatile memory location. |
832 | /// |
833 | bool isVolatile() const { return getSubclassData<VolatileField>(); } |
834 | |
835 | /// Specify whether this is a volatile RMW or not. |
836 | /// |
837 | void setVolatile(bool V) { setSubclassData<VolatileField>(V); } |
838 | |
839 | /// Transparently provide more efficient getOperand methods. |
840 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
841 | |
842 | /// Returns the ordering constraint of this rmw instruction. |
843 | AtomicOrdering getOrdering() const { |
844 | return getSubclassData<AtomicOrderingField>(); |
845 | } |
846 | |
847 | /// Sets the ordering constraint of this rmw instruction. |
848 | void setOrdering(AtomicOrdering Ordering) { |
849 | assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic.") ? void (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\"" , "llvm/include/llvm/IR/Instructions.h", 850, __extension__ __PRETTY_FUNCTION__ )) |
850 | "atomicrmw instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic.") ? void (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\"" , "llvm/include/llvm/IR/Instructions.h", 850, __extension__ __PRETTY_FUNCTION__ )); |
851 | assert(Ordering != AtomicOrdering::Unordered &&(static_cast <bool> (Ordering != AtomicOrdering::Unordered && "atomicrmw instructions cannot be unordered.") ? void (0) : __assert_fail ("Ordering != AtomicOrdering::Unordered && \"atomicrmw instructions cannot be unordered.\"" , "llvm/include/llvm/IR/Instructions.h", 852, __extension__ __PRETTY_FUNCTION__ )) |
852 | "atomicrmw instructions cannot be unordered.")(static_cast <bool> (Ordering != AtomicOrdering::Unordered && "atomicrmw instructions cannot be unordered.") ? void (0) : __assert_fail ("Ordering != AtomicOrdering::Unordered && \"atomicrmw instructions cannot be unordered.\"" , "llvm/include/llvm/IR/Instructions.h", 852, __extension__ __PRETTY_FUNCTION__ )); |
853 | setSubclassData<AtomicOrderingField>(Ordering); |
854 | } |
855 | |
856 | /// Returns the synchronization scope ID of this rmw instruction. |
857 | SyncScope::ID getSyncScopeID() const { |
858 | return SSID; |
859 | } |
860 | |
861 | /// Sets the synchronization scope ID of this rmw instruction. |
862 | void setSyncScopeID(SyncScope::ID SSID) { |
863 | this->SSID = SSID; |
864 | } |
865 | |
866 | Value *getPointerOperand() { return getOperand(0); } |
867 | const Value *getPointerOperand() const { return getOperand(0); } |
868 | static unsigned getPointerOperandIndex() { return 0U; } |
869 | |
870 | Value *getValOperand() { return getOperand(1); } |
871 | const Value *getValOperand() const { return getOperand(1); } |
872 | |
873 | /// Returns the address space of the pointer operand. |
874 | unsigned getPointerAddressSpace() const { |
875 | return getPointerOperand()->getType()->getPointerAddressSpace(); |
876 | } |
877 | |
878 | bool isFloatingPointOperation() const { |
879 | return isFPOperation(getOperation()); |
880 | } |
881 | |
882 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
883 | static bool classof(const Instruction *I) { |
884 | return I->getOpcode() == Instruction::AtomicRMW; |
885 | } |
886 | static bool classof(const Value *V) { |
887 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
888 | } |
889 | |
890 | private: |
891 | void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align, |
892 | AtomicOrdering Ordering, SyncScope::ID SSID); |
893 | |
894 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
895 | // method so that subclasses cannot accidentally use it. |
896 | template <typename Bitfield> |
897 | void setSubclassData(typename Bitfield::Type Value) { |
898 | Instruction::setSubclassData<Bitfield>(Value); |
899 | } |
900 | |
901 | /// The synchronization scope ID of this rmw instruction. Not quite enough |
902 | /// room in SubClassData for everything, so synchronization scope ID gets its |
903 | /// own field. |
904 | SyncScope::ID SSID; |
905 | }; |
906 | |
907 | template <> |
908 | struct OperandTraits<AtomicRMWInst> |
909 | : public FixedNumOperandTraits<AtomicRMWInst,2> { |
910 | }; |
911 | |
912 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst ::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits <AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*> (this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end() { return OperandTraits<AtomicRMWInst>::op_end(this); } AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const { return OperandTraits<AtomicRMWInst>::op_end(const_cast <AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand (unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 912, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<AtomicRMWInst >::op_begin(const_cast<AtomicRMWInst*>(this))[i_nocapture ].get()); } void AtomicRMWInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 912, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<AtomicRMWInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned AtomicRMWInst::getNumOperands() const { return OperandTraits<AtomicRMWInst>::operands( this); } template <int Idx_nocapture> Use &AtomicRMWInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &AtomicRMWInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } |
913 | |
914 | //===----------------------------------------------------------------------===// |
915 | // GetElementPtrInst Class |
916 | //===----------------------------------------------------------------------===// |
917 | |
918 | // checkGEPType - Simple wrapper function to give a better assertion failure |
919 | // message on bad indexes for a gep instruction. |
920 | // |
921 | inline Type *checkGEPType(Type *Ty) { |
922 | assert(Ty && "Invalid GetElementPtrInst indices for type!")(static_cast <bool> (Ty && "Invalid GetElementPtrInst indices for type!" ) ? void (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\"" , "llvm/include/llvm/IR/Instructions.h", 922, __extension__ __PRETTY_FUNCTION__ )); |
923 | return Ty; |
924 | } |
925 | |
926 | /// an instruction for type-safe pointer arithmetic to |
927 | /// access elements of arrays and structs |
928 | /// |
929 | class GetElementPtrInst : public Instruction { |
930 | Type *SourceElementType; |
931 | Type *ResultElementType; |
932 | |
933 | GetElementPtrInst(const GetElementPtrInst &GEPI); |
934 | |
935 | /// Constructors - Create a getelementptr instruction with a base pointer an |
936 | /// list of indices. The first ctor can optionally insert before an existing |
937 | /// instruction, the second appends the new instruction to the specified |
938 | /// BasicBlock. |
939 | inline GetElementPtrInst(Type *PointeeType, Value *Ptr, |
940 | ArrayRef<Value *> IdxList, unsigned Values, |
941 | const Twine &NameStr, Instruction *InsertBefore); |
942 | inline GetElementPtrInst(Type *PointeeType, Value *Ptr, |
943 | ArrayRef<Value *> IdxList, unsigned Values, |
944 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
945 | |
946 | void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr); |
947 | |
948 | protected: |
949 | // Note: Instruction needs to be a friend here to call cloneImpl. |
950 | friend class Instruction; |
951 | |
952 | GetElementPtrInst *cloneImpl() const; |
953 | |
954 | public: |
955 | static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, |
956 | ArrayRef<Value *> IdxList, |
957 | const Twine &NameStr = "", |
958 | Instruction *InsertBefore = nullptr) { |
959 | unsigned Values = 1 + unsigned(IdxList.size()); |
960 | assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type" ) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\"" , "llvm/include/llvm/IR/Instructions.h", 960, __extension__ __PRETTY_FUNCTION__ )); |
961 | assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType ()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType )) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)" , "llvm/include/llvm/IR/Instructions.h", 962, __extension__ __PRETTY_FUNCTION__ )) |
962 | ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType ()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType )) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)" , "llvm/include/llvm/IR/Instructions.h", 962, __extension__ __PRETTY_FUNCTION__ )); |
963 | return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, |
964 | NameStr, InsertBefore); |
965 | } |
966 | |
967 | static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, |
968 | ArrayRef<Value *> IdxList, |
969 | const Twine &NameStr, |
970 | BasicBlock *InsertAtEnd) { |
971 | unsigned Values = 1 + unsigned(IdxList.size()); |
972 | assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type" ) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\"" , "llvm/include/llvm/IR/Instructions.h", 972, __extension__ __PRETTY_FUNCTION__ )); |
973 | assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType ()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType )) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)" , "llvm/include/llvm/IR/Instructions.h", 974, __extension__ __PRETTY_FUNCTION__ )) |
974 | ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType ()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType )) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)" , "llvm/include/llvm/IR/Instructions.h", 974, __extension__ __PRETTY_FUNCTION__ )); |
975 | return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, |
976 | NameStr, InsertAtEnd); |
977 | } |
978 | |
979 | /// Create an "inbounds" getelementptr. See the documentation for the |
980 | /// "inbounds" flag in LangRef.html for details. |
981 | static GetElementPtrInst * |
982 | CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList, |
983 | const Twine &NameStr = "", |
984 | Instruction *InsertBefore = nullptr) { |
985 | GetElementPtrInst *GEP = |
986 | Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore); |
987 | GEP->setIsInBounds(true); |
988 | return GEP; |
989 | } |
990 | |
991 | static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr, |
992 | ArrayRef<Value *> IdxList, |
993 | const Twine &NameStr, |
994 | BasicBlock *InsertAtEnd) { |
995 | GetElementPtrInst *GEP = |
996 | Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd); |
997 | GEP->setIsInBounds(true); |
998 | return GEP; |
999 | } |
1000 | |
1001 | /// Transparently provide more efficient getOperand methods. |
1002 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
1003 | |
1004 | Type *getSourceElementType() const { return SourceElementType; } |
1005 | |
1006 | void setSourceElementType(Type *Ty) { SourceElementType = Ty; } |
1007 | void setResultElementType(Type *Ty) { ResultElementType = Ty; } |
1008 | |
1009 | Type *getResultElementType() const { |
1010 | assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()-> getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType )) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)" , "llvm/include/llvm/IR/Instructions.h", 1011, __extension__ __PRETTY_FUNCTION__ )) |
1011 | ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()-> getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType )) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)" , "llvm/include/llvm/IR/Instructions.h", 1011, __extension__ __PRETTY_FUNCTION__ )); |
1012 | return ResultElementType; |
1013 | } |
1014 | |
1015 | /// Returns the address space of this instruction's pointer type. |
1016 | unsigned getAddressSpace() const { |
1017 | // Note that this is always the same as the pointer operand's address space |
1018 | // and that is cheaper to compute, so cheat here. |
1019 | return getPointerAddressSpace(); |
1020 | } |
1021 | |
1022 | /// Returns the result type of a getelementptr with the given source |
1023 | /// element type and indexes. |
1024 | /// |
1025 | /// Null is returned if the indices are invalid for the specified |
1026 | /// source element type. |
1027 | static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList); |
1028 | static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList); |
1029 | static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList); |
1030 | |
1031 | /// Return the type of the element at the given index of an indexable |
1032 | /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})". |
1033 | /// |
1034 | /// Returns null if the type can't be indexed, or the given index is not |
1035 | /// legal for the given type. |
1036 | static Type *getTypeAtIndex(Type *Ty, Value *Idx); |
1037 | static Type *getTypeAtIndex(Type *Ty, uint64_t Idx); |
1038 | |
1039 | inline op_iterator idx_begin() { return op_begin()+1; } |
1040 | inline const_op_iterator idx_begin() const { return op_begin()+1; } |
1041 | inline op_iterator idx_end() { return op_end(); } |
1042 | inline const_op_iterator idx_end() const { return op_end(); } |
1043 | |
1044 | inline iterator_range<op_iterator> indices() { |
1045 | return make_range(idx_begin(), idx_end()); |
1046 | } |
1047 | |
1048 | inline iterator_range<const_op_iterator> indices() const { |
1049 | return make_range(idx_begin(), idx_end()); |
1050 | } |
1051 | |
1052 | Value *getPointerOperand() { |
1053 | return getOperand(0); |
1054 | } |
1055 | const Value *getPointerOperand() const { |
1056 | return getOperand(0); |
1057 | } |
1058 | static unsigned getPointerOperandIndex() { |
1059 | return 0U; // get index for modifying correct operand. |
1060 | } |
1061 | |
1062 | /// Method to return the pointer operand as a |
1063 | /// PointerType. |
1064 | Type *getPointerOperandType() const { |
1065 | return getPointerOperand()->getType(); |
1066 | } |
1067 | |
1068 | /// Returns the address space of the pointer operand. |
1069 | unsigned getPointerAddressSpace() const { |
1070 | return getPointerOperandType()->getPointerAddressSpace(); |
1071 | } |
1072 | |
1073 | /// Returns the pointer type returned by the GEP |
1074 | /// instruction, which may be a vector of pointers. |
1075 | static Type *getGEPReturnType(Type *ElTy, Value *Ptr, |
1076 | ArrayRef<Value *> IdxList) { |
1077 | PointerType *OrigPtrTy = cast<PointerType>(Ptr->getType()->getScalarType()); |
1078 | unsigned AddrSpace = OrigPtrTy->getAddressSpace(); |
1079 | Type *ResultElemTy = checkGEPType(getIndexedType(ElTy, IdxList)); |
1080 | Type *PtrTy = OrigPtrTy->isOpaque() |
1081 | ? PointerType::get(OrigPtrTy->getContext(), AddrSpace) |
1082 | : PointerType::get(ResultElemTy, AddrSpace); |
1083 | // Vector GEP |
1084 | if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) { |
1085 | ElementCount EltCount = PtrVTy->getElementCount(); |
1086 | return VectorType::get(PtrTy, EltCount); |
1087 | } |
1088 | for (Value *Index : IdxList) |
1089 | if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) { |
1090 | ElementCount EltCount = IndexVTy->getElementCount(); |
1091 | return VectorType::get(PtrTy, EltCount); |
1092 | } |
1093 | // Scalar GEP |
1094 | return PtrTy; |
1095 | } |
1096 | |
1097 | unsigned getNumIndices() const { // Note: always non-negative |
1098 | return getNumOperands() - 1; |
1099 | } |
1100 | |
1101 | bool hasIndices() const { |
1102 | return getNumOperands() > 1; |
1103 | } |
1104 | |
1105 | /// Return true if all of the indices of this GEP are |
1106 | /// zeros. If so, the result pointer and the first operand have the same |
1107 | /// value, just potentially different types. |
1108 | bool hasAllZeroIndices() const; |
1109 | |
1110 | /// Return true if all of the indices of this GEP are |
1111 | /// constant integers. If so, the result pointer and the first operand have |
1112 | /// a constant offset between them. |
1113 | bool hasAllConstantIndices() const; |
1114 | |
1115 | /// Set or clear the inbounds flag on this GEP instruction. |
1116 | /// See LangRef.html for the meaning of inbounds on a getelementptr. |
1117 | void setIsInBounds(bool b = true); |
1118 | |
1119 | /// Determine whether the GEP has the inbounds flag. |
1120 | bool isInBounds() const; |
1121 | |
1122 | /// Accumulate the constant address offset of this GEP if possible. |
1123 | /// |
1124 | /// This routine accepts an APInt into which it will accumulate the constant |
1125 | /// offset of this GEP if the GEP is in fact constant. If the GEP is not |
1126 | /// all-constant, it returns false and the value of the offset APInt is |
1127 | /// undefined (it is *not* preserved!). The APInt passed into this routine |
1128 | /// must be at least as wide as the IntPtr type for the address space of |
1129 | /// the base GEP pointer. |
1130 | bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const; |
1131 | bool collectOffset(const DataLayout &DL, unsigned BitWidth, |
1132 | MapVector<Value *, APInt> &VariableOffsets, |
1133 | APInt &ConstantOffset) const; |
1134 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1135 | static bool classof(const Instruction *I) { |
1136 | return (I->getOpcode() == Instruction::GetElementPtr); |
1137 | } |
1138 | static bool classof(const Value *V) { |
1139 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1140 | } |
1141 | }; |
1142 | |
1143 | template <> |
1144 | struct OperandTraits<GetElementPtrInst> : |
1145 | public VariadicOperandTraits<GetElementPtrInst, 1> { |
1146 | }; |
1147 | |
1148 | GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, |
1149 | ArrayRef<Value *> IdxList, unsigned Values, |
1150 | const Twine &NameStr, |
1151 | Instruction *InsertBefore) |
1152 | : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, |
1153 | OperandTraits<GetElementPtrInst>::op_end(this) - Values, |
1154 | Values, InsertBefore), |
1155 | SourceElementType(PointeeType), |
1156 | ResultElementType(getIndexedType(PointeeType, IdxList)) { |
1157 | assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()-> getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType )) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)" , "llvm/include/llvm/IR/Instructions.h", 1158, __extension__ __PRETTY_FUNCTION__ )) |
1158 | ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()-> getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType )) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)" , "llvm/include/llvm/IR/Instructions.h", 1158, __extension__ __PRETTY_FUNCTION__ )); |
1159 | init(Ptr, IdxList, NameStr); |
1160 | } |
1161 | |
1162 | GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, |
1163 | ArrayRef<Value *> IdxList, unsigned Values, |
1164 | const Twine &NameStr, |
1165 | BasicBlock *InsertAtEnd) |
1166 | : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, |
1167 | OperandTraits<GetElementPtrInst>::op_end(this) - Values, |
1168 | Values, InsertAtEnd), |
1169 | SourceElementType(PointeeType), |
1170 | ResultElementType(getIndexedType(PointeeType, IdxList)) { |
1171 | assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()-> getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType )) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)" , "llvm/include/llvm/IR/Instructions.h", 1172, __extension__ __PRETTY_FUNCTION__ )) |
1172 | ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()-> getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType )) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)" , "llvm/include/llvm/IR/Instructions.h", 1172, __extension__ __PRETTY_FUNCTION__ )); |
1173 | init(Ptr, IdxList, NameStr); |
1174 | } |
1175 | |
1176 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() { return OperandTraits<GetElementPtrInst>::op_begin(this ); } GetElementPtrInst::const_op_iterator GetElementPtrInst:: op_begin() const { return OperandTraits<GetElementPtrInst> ::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst ::op_iterator GetElementPtrInst::op_end() { return OperandTraits <GetElementPtrInst>::op_end(this); } GetElementPtrInst:: const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits <GetElementPtrInst>::op_end(const_cast<GetElementPtrInst *>(this)); } Value *GetElementPtrInst::getOperand(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1176, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<GetElementPtrInst >::op_begin(const_cast<GetElementPtrInst*>(this))[i_nocapture ].get()); } void GetElementPtrInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1176, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<GetElementPtrInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned GetElementPtrInst::getNumOperands () const { return OperandTraits<GetElementPtrInst>::operands (this); } template <int Idx_nocapture> Use &GetElementPtrInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &GetElementPtrInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } |
1177 | |
1178 | //===----------------------------------------------------------------------===// |
1179 | // ICmpInst Class |
1180 | //===----------------------------------------------------------------------===// |
1181 | |
1182 | /// This instruction compares its operands according to the predicate given |
1183 | /// to the constructor. It only operates on integers or pointers. The operands |
1184 | /// must be identical types. |
1185 | /// Represent an integer comparison operator. |
1186 | class ICmpInst: public CmpInst { |
1187 | void AssertOK() { |
1188 | assert(isIntPredicate() &&(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value" ) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\"" , "llvm/include/llvm/IR/Instructions.h", 1189, __extension__ __PRETTY_FUNCTION__ )) |
1189 | "Invalid ICmp predicate value")(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value" ) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\"" , "llvm/include/llvm/IR/Instructions.h", 1189, __extension__ __PRETTY_FUNCTION__ )); |
1190 | assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand (1)->getType() && "Both operands to ICmp instruction are not of the same type!" ) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\"" , "llvm/include/llvm/IR/Instructions.h", 1191, __extension__ __PRETTY_FUNCTION__ )) |
1191 | "Both operands to ICmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand (1)->getType() && "Both operands to ICmp instruction are not of the same type!" ) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\"" , "llvm/include/llvm/IR/Instructions.h", 1191, __extension__ __PRETTY_FUNCTION__ )); |
1192 | // Check that the operands are the right type |
1193 | assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy () || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction") ? void (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\"" , "llvm/include/llvm/IR/Instructions.h", 1195, __extension__ __PRETTY_FUNCTION__ )) |
1194 | getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy () || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction") ? void (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\"" , "llvm/include/llvm/IR/Instructions.h", 1195, __extension__ __PRETTY_FUNCTION__ )) |
1195 | "Invalid operand types for ICmp instruction")(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy () || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction") ? void (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\"" , "llvm/include/llvm/IR/Instructions.h", 1195, __extension__ __PRETTY_FUNCTION__ )); |
1196 | } |
1197 | |
1198 | protected: |
1199 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1200 | friend class Instruction; |
1201 | |
1202 | /// Clone an identical ICmpInst |
1203 | ICmpInst *cloneImpl() const; |
1204 | |
1205 | public: |
1206 | /// Constructor with insert-before-instruction semantics. |
1207 | ICmpInst( |
1208 | Instruction *InsertBefore, ///< Where to insert |
1209 | Predicate pred, ///< The predicate to use for the comparison |
1210 | Value *LHS, ///< The left-hand-side of the expression |
1211 | Value *RHS, ///< The right-hand-side of the expression |
1212 | const Twine &NameStr = "" ///< Name of the instruction |
1213 | ) : CmpInst(makeCmpResultType(LHS->getType()), |
1214 | Instruction::ICmp, pred, LHS, RHS, NameStr, |
1215 | InsertBefore) { |
1216 | #ifndef NDEBUG |
1217 | AssertOK(); |
1218 | #endif |
1219 | } |
1220 | |
1221 | /// Constructor with insert-at-end semantics. |
1222 | ICmpInst( |
1223 | BasicBlock &InsertAtEnd, ///< Block to insert into. |
1224 | Predicate pred, ///< The predicate to use for the comparison |
1225 | Value *LHS, ///< The left-hand-side of the expression |
1226 | Value *RHS, ///< The right-hand-side of the expression |
1227 | const Twine &NameStr = "" ///< Name of the instruction |
1228 | ) : CmpInst(makeCmpResultType(LHS->getType()), |
1229 | Instruction::ICmp, pred, LHS, RHS, NameStr, |
1230 | &InsertAtEnd) { |
1231 | #ifndef NDEBUG |
1232 | AssertOK(); |
1233 | #endif |
1234 | } |
1235 | |
1236 | /// Constructor with no-insertion semantics |
1237 | ICmpInst( |
1238 | Predicate pred, ///< The predicate to use for the comparison |
1239 | Value *LHS, ///< The left-hand-side of the expression |
1240 | Value *RHS, ///< The right-hand-side of the expression |
1241 | const Twine &NameStr = "" ///< Name of the instruction |
1242 | ) : CmpInst(makeCmpResultType(LHS->getType()), |
1243 | Instruction::ICmp, pred, LHS, RHS, NameStr) { |
1244 | #ifndef NDEBUG |
1245 | AssertOK(); |
1246 | #endif |
1247 | } |
1248 | |
1249 | /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc. |
1250 | /// @returns the predicate that would be the result if the operand were |
1251 | /// regarded as signed. |
1252 | /// Return the signed version of the predicate |
1253 | Predicate getSignedPredicate() const { |
1254 | return getSignedPredicate(getPredicate()); |
1255 | } |
1256 | |
1257 | /// This is a static version that you can use without an instruction. |
1258 | /// Return the signed version of the predicate. |
1259 | static Predicate getSignedPredicate(Predicate pred); |
1260 | |
1261 | /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc. |
1262 | /// @returns the predicate that would be the result if the operand were |
1263 | /// regarded as unsigned. |
1264 | /// Return the unsigned version of the predicate |
1265 | Predicate getUnsignedPredicate() const { |
1266 | return getUnsignedPredicate(getPredicate()); |
1267 | } |
1268 | |
1269 | /// This is a static version that you can use without an instruction. |
1270 | /// Return the unsigned version of the predicate. |
1271 | static Predicate getUnsignedPredicate(Predicate pred); |
1272 | |
1273 | /// Return true if this predicate is either EQ or NE. This also |
1274 | /// tests for commutativity. |
1275 | static bool isEquality(Predicate P) { |
1276 | return P == ICMP_EQ || P == ICMP_NE; |
1277 | } |
1278 | |
1279 | /// Return true if this predicate is either EQ or NE. This also |
1280 | /// tests for commutativity. |
1281 | bool isEquality() const { |
1282 | return isEquality(getPredicate()); |
1283 | } |
1284 | |
1285 | /// @returns true if the predicate of this ICmpInst is commutative |
1286 | /// Determine if this relation is commutative. |
1287 | bool isCommutative() const { return isEquality(); } |
1288 | |
1289 | /// Return true if the predicate is relational (not EQ or NE). |
1290 | /// |
1291 | bool isRelational() const { |
1292 | return !isEquality(); |
1293 | } |
1294 | |
1295 | /// Return true if the predicate is relational (not EQ or NE). |
1296 | /// |
1297 | static bool isRelational(Predicate P) { |
1298 | return !isEquality(P); |
1299 | } |
1300 | |
1301 | /// Return true if the predicate is SGT or UGT. |
1302 | /// |
1303 | static bool isGT(Predicate P) { |
1304 | return P == ICMP_SGT || P == ICMP_UGT; |
1305 | } |
1306 | |
1307 | /// Return true if the predicate is SLT or ULT. |
1308 | /// |
1309 | static bool isLT(Predicate P) { |
1310 | return P == ICMP_SLT || P == ICMP_ULT; |
1311 | } |
1312 | |
1313 | /// Return true if the predicate is SGE or UGE. |
1314 | /// |
1315 | static bool isGE(Predicate P) { |
1316 | return P == ICMP_SGE || P == ICMP_UGE; |
1317 | } |
1318 | |
1319 | /// Return true if the predicate is SLE or ULE. |
1320 | /// |
1321 | static bool isLE(Predicate P) { |
1322 | return P == ICMP_SLE || P == ICMP_ULE; |
1323 | } |
1324 | |
1325 | /// Returns the sequence of all ICmp predicates. |
1326 | /// |
1327 | static auto predicates() { return ICmpPredicates(); } |
1328 | |
1329 | /// Exchange the two operands to this instruction in such a way that it does |
1330 | /// not modify the semantics of the instruction. The predicate value may be |
1331 | /// changed to retain the same result if the predicate is order dependent |
1332 | /// (e.g. ult). |
1333 | /// Swap operands and adjust predicate. |
1334 | void swapOperands() { |
1335 | setPredicate(getSwappedPredicate()); |
1336 | Op<0>().swap(Op<1>()); |
1337 | } |
1338 | |
1339 | /// Return result of `LHS Pred RHS` comparison. |
1340 | static bool compare(const APInt &LHS, const APInt &RHS, |
1341 | ICmpInst::Predicate Pred); |
1342 | |
1343 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1344 | static bool classof(const Instruction *I) { |
1345 | return I->getOpcode() == Instruction::ICmp; |
1346 | } |
1347 | static bool classof(const Value *V) { |
1348 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1349 | } |
1350 | }; |
1351 | |
1352 | //===----------------------------------------------------------------------===// |
1353 | // FCmpInst Class |
1354 | //===----------------------------------------------------------------------===// |
1355 | |
1356 | /// This instruction compares its operands according to the predicate given |
1357 | /// to the constructor. It only operates on floating point values or packed |
1358 | /// vectors of floating point values. The operands must be identical types. |
1359 | /// Represents a floating point comparison operator. |
1360 | class FCmpInst: public CmpInst { |
1361 | void AssertOK() { |
1362 | assert(isFPPredicate() && "Invalid FCmp predicate value")(static_cast <bool> (isFPPredicate() && "Invalid FCmp predicate value" ) ? void (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\"" , "llvm/include/llvm/IR/Instructions.h", 1362, __extension__ __PRETTY_FUNCTION__ )); |
1363 | assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand (1)->getType() && "Both operands to FCmp instruction are not of the same type!" ) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\"" , "llvm/include/llvm/IR/Instructions.h", 1364, __extension__ __PRETTY_FUNCTION__ )) |
1364 | "Both operands to FCmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand (1)->getType() && "Both operands to FCmp instruction are not of the same type!" ) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\"" , "llvm/include/llvm/IR/Instructions.h", 1364, __extension__ __PRETTY_FUNCTION__ )); |
1365 | // Check that the operands are the right type |
1366 | assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy () && "Invalid operand types for FCmp instruction") ? void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\"" , "llvm/include/llvm/IR/Instructions.h", 1367, __extension__ __PRETTY_FUNCTION__ )) |
1367 | "Invalid operand types for FCmp instruction")(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy () && "Invalid operand types for FCmp instruction") ? void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\"" , "llvm/include/llvm/IR/Instructions.h", 1367, __extension__ __PRETTY_FUNCTION__ )); |
1368 | } |
1369 | |
1370 | protected: |
1371 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1372 | friend class Instruction; |
1373 | |
1374 | /// Clone an identical FCmpInst |
1375 | FCmpInst *cloneImpl() const; |
1376 | |
1377 | public: |
1378 | /// Constructor with insert-before-instruction semantics. |
1379 | FCmpInst( |
1380 | Instruction *InsertBefore, ///< Where to insert |
1381 | Predicate pred, ///< The predicate to use for the comparison |
1382 | Value *LHS, ///< The left-hand-side of the expression |
1383 | Value *RHS, ///< The right-hand-side of the expression |
1384 | const Twine &NameStr = "" ///< Name of the instruction |
1385 | ) : CmpInst(makeCmpResultType(LHS->getType()), |
1386 | Instruction::FCmp, pred, LHS, RHS, NameStr, |
1387 | InsertBefore) { |
1388 | AssertOK(); |
1389 | } |
1390 | |
1391 | /// Constructor with insert-at-end semantics. |
1392 | FCmpInst( |
1393 | BasicBlock &InsertAtEnd, ///< Block to insert into. |
1394 | Predicate pred, ///< The predicate to use for the comparison |
1395 | Value *LHS, ///< The left-hand-side of the expression |
1396 | Value *RHS, ///< The right-hand-side of the expression |
1397 | const Twine &NameStr = "" ///< Name of the instruction |
1398 | ) : CmpInst(makeCmpResultType(LHS->getType()), |
1399 | Instruction::FCmp, pred, LHS, RHS, NameStr, |
1400 | &InsertAtEnd) { |
1401 | AssertOK(); |
1402 | } |
1403 | |
1404 | /// Constructor with no-insertion semantics |
1405 | FCmpInst( |
1406 | Predicate Pred, ///< The predicate to use for the comparison |
1407 | Value *LHS, ///< The left-hand-side of the expression |
1408 | Value *RHS, ///< The right-hand-side of the expression |
1409 | const Twine &NameStr = "", ///< Name of the instruction |
1410 | Instruction *FlagsSource = nullptr |
1411 | ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS, |
1412 | RHS, NameStr, nullptr, FlagsSource) { |
1413 | AssertOK(); |
1414 | } |
1415 | |
1416 | /// @returns true if the predicate of this instruction is EQ or NE. |
1417 | /// Determine if this is an equality predicate. |
1418 | static bool isEquality(Predicate Pred) { |
1419 | return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ || |
1420 | Pred == FCMP_UNE; |
1421 | } |
1422 | |
1423 | /// @returns true if the predicate of this instruction is EQ or NE. |
1424 | /// Determine if this is an equality predicate. |
1425 | bool isEquality() const { return isEquality(getPredicate()); } |
1426 | |
1427 | /// @returns true if the predicate of this instruction is commutative. |
1428 | /// Determine if this is a commutative predicate. |
1429 | bool isCommutative() const { |
1430 | return isEquality() || |
1431 | getPredicate() == FCMP_FALSE || |
1432 | getPredicate() == FCMP_TRUE || |
1433 | getPredicate() == FCMP_ORD || |
1434 | getPredicate() == FCMP_UNO; |
1435 | } |
1436 | |
1437 | /// @returns true if the predicate is relational (not EQ or NE). |
1438 | /// Determine if this a relational predicate. |
1439 | bool isRelational() const { return !isEquality(); } |
1440 | |
1441 | /// Exchange the two operands to this instruction in such a way that it does |
1442 | /// not modify the semantics of the instruction. The predicate value may be |
1443 | /// changed to retain the same result if the predicate is order dependent |
1444 | /// (e.g. ult). |
1445 | /// Swap operands and adjust predicate. |
1446 | void swapOperands() { |
1447 | setPredicate(getSwappedPredicate()); |
1448 | Op<0>().swap(Op<1>()); |
1449 | } |
1450 | |
1451 | /// Returns the sequence of all FCmp predicates. |
1452 | /// |
1453 | static auto predicates() { return FCmpPredicates(); } |
1454 | |
1455 | /// Return result of `LHS Pred RHS` comparison. |
1456 | static bool compare(const APFloat &LHS, const APFloat &RHS, |
1457 | FCmpInst::Predicate Pred); |
1458 | |
1459 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
1460 | static bool classof(const Instruction *I) { |
1461 | return I->getOpcode() == Instruction::FCmp; |
1462 | } |
1463 | static bool classof(const Value *V) { |
1464 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1465 | } |
1466 | }; |
1467 | |
1468 | //===----------------------------------------------------------------------===// |
1469 | /// This class represents a function call, abstracting a target |
1470 | /// machine's calling convention. This class uses low bit of the SubClassData |
1471 | /// field to indicate whether or not this is a tail call. The rest of the bits |
1472 | /// hold the calling convention of the call. |
1473 | /// |
1474 | class CallInst : public CallBase { |
1475 | CallInst(const CallInst &CI); |
1476 | |
1477 | /// Construct a CallInst given a range of arguments. |
1478 | /// Construct a CallInst from a range of arguments |
1479 | inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1480 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, |
1481 | Instruction *InsertBefore); |
1482 | |
1483 | inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1484 | const Twine &NameStr, Instruction *InsertBefore) |
1485 | : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {} |
1486 | |
1487 | /// Construct a CallInst given a range of arguments. |
1488 | /// Construct a CallInst from a range of arguments |
1489 | inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1490 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, |
1491 | BasicBlock *InsertAtEnd); |
1492 | |
1493 | explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr, |
1494 | Instruction *InsertBefore); |
1495 | |
1496 | CallInst(FunctionType *ty, Value *F, const Twine &NameStr, |
1497 | BasicBlock *InsertAtEnd); |
1498 | |
1499 | void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, |
1500 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); |
1501 | void init(FunctionType *FTy, Value *Func, const Twine &NameStr); |
1502 | |
1503 | /// Compute the number of operands to allocate. |
1504 | static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { |
1505 | // We need one operand for the called function, plus the input operand |
1506 | // counts provided. |
1507 | return 1 + NumArgs + NumBundleInputs; |
1508 | } |
1509 | |
1510 | protected: |
1511 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1512 | friend class Instruction; |
1513 | |
1514 | CallInst *cloneImpl() const; |
1515 | |
1516 | public: |
1517 | static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "", |
1518 | Instruction *InsertBefore = nullptr) { |
1519 | return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore); |
1520 | } |
1521 | |
1522 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1523 | const Twine &NameStr, |
1524 | Instruction *InsertBefore = nullptr) { |
1525 | return new (ComputeNumOperands(Args.size())) |
1526 | CallInst(Ty, Func, Args, None, NameStr, InsertBefore); |
1527 | } |
1528 | |
1529 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1530 | ArrayRef<OperandBundleDef> Bundles = None, |
1531 | const Twine &NameStr = "", |
1532 | Instruction *InsertBefore = nullptr) { |
1533 | const int NumOperands = |
1534 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); |
1535 | const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
1536 | |
1537 | return new (NumOperands, DescriptorBytes) |
1538 | CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore); |
1539 | } |
1540 | |
1541 | static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr, |
1542 | BasicBlock *InsertAtEnd) { |
1543 | return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd); |
1544 | } |
1545 | |
1546 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1547 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
1548 | return new (ComputeNumOperands(Args.size())) |
1549 | CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd); |
1550 | } |
1551 | |
1552 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1553 | ArrayRef<OperandBundleDef> Bundles, |
1554 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
1555 | const int NumOperands = |
1556 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); |
1557 | const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
1558 | |
1559 | return new (NumOperands, DescriptorBytes) |
1560 | CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd); |
1561 | } |
1562 | |
1563 | static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "", |
1564 | Instruction *InsertBefore = nullptr) { |
1565 | return Create(Func.getFunctionType(), Func.getCallee(), NameStr, |
1566 | InsertBefore); |
1567 | } |
1568 | |
1569 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, |
1570 | ArrayRef<OperandBundleDef> Bundles = None, |
1571 | const Twine &NameStr = "", |
1572 | Instruction *InsertBefore = nullptr) { |
1573 | return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, |
1574 | NameStr, InsertBefore); |
1575 | } |
1576 | |
1577 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, |
1578 | const Twine &NameStr, |
1579 | Instruction *InsertBefore = nullptr) { |
1580 | return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, |
1581 | InsertBefore); |
1582 | } |
1583 | |
1584 | static CallInst *Create(FunctionCallee Func, const Twine &NameStr, |
1585 | BasicBlock *InsertAtEnd) { |
1586 | return Create(Func.getFunctionType(), Func.getCallee(), NameStr, |
1587 | InsertAtEnd); |
1588 | } |
1589 | |
1590 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, |
1591 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
1592 | return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, |
1593 | InsertAtEnd); |
1594 | } |
1595 | |
1596 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, |
1597 | ArrayRef<OperandBundleDef> Bundles, |
1598 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
1599 | return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, |
1600 | NameStr, InsertAtEnd); |
1601 | } |
1602 | |
1603 | /// Create a clone of \p CI with a different set of operand bundles and |
1604 | /// insert it before \p InsertPt. |
1605 | /// |
1606 | /// The returned call instruction is identical \p CI in every way except that |
1607 | /// the operand bundles for the new instruction are set to the operand bundles |
1608 | /// in \p Bundles. |
1609 | static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles, |
1610 | Instruction *InsertPt = nullptr); |
1611 | |
1612 | /// Generate the IR for a call to malloc: |
1613 | /// 1. Compute the malloc call's argument as the specified type's size, |
1614 | /// possibly multiplied by the array size if the array size is not |
1615 | /// constant 1. |
1616 | /// 2. Call malloc with that argument. |
1617 | /// 3. Bitcast the result of the malloc call to the specified type. |
1618 | static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, |
1619 | Type *AllocTy, Value *AllocSize, |
1620 | Value *ArraySize = nullptr, |
1621 | Function *MallocF = nullptr, |
1622 | const Twine &Name = ""); |
1623 | static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, |
1624 | Type *AllocTy, Value *AllocSize, |
1625 | Value *ArraySize = nullptr, |
1626 | Function *MallocF = nullptr, |
1627 | const Twine &Name = ""); |
1628 | static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, |
1629 | Type *AllocTy, Value *AllocSize, |
1630 | Value *ArraySize = nullptr, |
1631 | ArrayRef<OperandBundleDef> Bundles = None, |
1632 | Function *MallocF = nullptr, |
1633 | const Twine &Name = ""); |
1634 | static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, |
1635 | Type *AllocTy, Value *AllocSize, |
1636 | Value *ArraySize = nullptr, |
1637 | ArrayRef<OperandBundleDef> Bundles = None, |
1638 | Function *MallocF = nullptr, |
1639 | const Twine &Name = ""); |
1640 | /// Generate the IR for a call to the builtin free function. |
1641 | static Instruction *CreateFree(Value *Source, Instruction *InsertBefore); |
1642 | static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd); |
1643 | static Instruction *CreateFree(Value *Source, |
1644 | ArrayRef<OperandBundleDef> Bundles, |
1645 | Instruction *InsertBefore); |
1646 | static Instruction *CreateFree(Value *Source, |
1647 | ArrayRef<OperandBundleDef> Bundles, |
1648 | BasicBlock *InsertAtEnd); |
1649 | |
1650 | // Note that 'musttail' implies 'tail'. |
1651 | enum TailCallKind : unsigned { |
1652 | TCK_None = 0, |
1653 | TCK_Tail = 1, |
1654 | TCK_MustTail = 2, |
1655 | TCK_NoTail = 3, |
1656 | TCK_LAST = TCK_NoTail |
1657 | }; |
1658 | |
1659 | using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>; |
1660 | static_assert( |
1661 | Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(), |
1662 | "Bitfields must be contiguous"); |
1663 | |
1664 | TailCallKind getTailCallKind() const { |
1665 | return getSubclassData<TailCallKindField>(); |
1666 | } |
1667 | |
1668 | bool isTailCall() const { |
1669 | TailCallKind Kind = getTailCallKind(); |
1670 | return Kind == TCK_Tail || Kind == TCK_MustTail; |
1671 | } |
1672 | |
1673 | bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; } |
1674 | |
1675 | bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; } |
1676 | |
1677 | void setTailCallKind(TailCallKind TCK) { |
1678 | setSubclassData<TailCallKindField>(TCK); |
1679 | } |
1680 | |
1681 | void setTailCall(bool IsTc = true) { |
1682 | setTailCallKind(IsTc ? TCK_Tail : TCK_None); |
1683 | } |
1684 | |
1685 | /// Return true if the call can return twice |
1686 | bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); } |
1687 | void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); } |
1688 | |
1689 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1690 | static bool classof(const Instruction *I) { |
1691 | return I->getOpcode() == Instruction::Call; |
1692 | } |
1693 | static bool classof(const Value *V) { |
1694 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1695 | } |
1696 | |
1697 | /// Updates profile metadata by scaling it by \p S / \p T. |
1698 | void updateProfWeight(uint64_t S, uint64_t T); |
1699 | |
1700 | private: |
1701 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
1702 | // method so that subclasses cannot accidentally use it. |
1703 | template <typename Bitfield> |
1704 | void setSubclassData(typename Bitfield::Type Value) { |
1705 | Instruction::setSubclassData<Bitfield>(Value); |
1706 | } |
1707 | }; |
1708 | |
1709 | CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1710 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, |
1711 | BasicBlock *InsertAtEnd) |
1712 | : CallBase(Ty->getReturnType(), Instruction::Call, |
1713 | OperandTraits<CallBase>::op_end(this) - |
1714 | (Args.size() + CountBundleInputs(Bundles) + 1), |
1715 | unsigned(Args.size() + CountBundleInputs(Bundles) + 1), |
1716 | InsertAtEnd) { |
1717 | init(Ty, Func, Args, Bundles, NameStr); |
1718 | } |
1719 | |
1720 | CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1721 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, |
1722 | Instruction *InsertBefore) |
1723 | : CallBase(Ty->getReturnType(), Instruction::Call, |
1724 | OperandTraits<CallBase>::op_end(this) - |
1725 | (Args.size() + CountBundleInputs(Bundles) + 1), |
1726 | unsigned(Args.size() + CountBundleInputs(Bundles) + 1), |
1727 | InsertBefore) { |
1728 | init(Ty, Func, Args, Bundles, NameStr); |
1729 | } |
1730 | |
1731 | //===----------------------------------------------------------------------===// |
1732 | // SelectInst Class |
1733 | //===----------------------------------------------------------------------===// |
1734 | |
1735 | /// This class represents the LLVM 'select' instruction. |
1736 | /// |
1737 | class SelectInst : public Instruction { |
1738 | SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, |
1739 | Instruction *InsertBefore) |
1740 | : Instruction(S1->getType(), Instruction::Select, |
1741 | &Op<0>(), 3, InsertBefore) { |
1742 | init(C, S1, S2); |
1743 | setName(NameStr); |
1744 | } |
1745 | |
1746 | SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, |
1747 | BasicBlock *InsertAtEnd) |
1748 | : Instruction(S1->getType(), Instruction::Select, |
1749 | &Op<0>(), 3, InsertAtEnd) { |
1750 | init(C, S1, S2); |
1751 | setName(NameStr); |
1752 | } |
1753 | |
1754 | void init(Value *C, Value *S1, Value *S2) { |
1755 | assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")(static_cast <bool> (!areInvalidOperands(C, S1, S2) && "Invalid operands for select") ? void (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\"" , "llvm/include/llvm/IR/Instructions.h", 1755, __extension__ __PRETTY_FUNCTION__ )); |
1756 | Op<0>() = C; |
1757 | Op<1>() = S1; |
1758 | Op<2>() = S2; |
1759 | } |
1760 | |
1761 | protected: |
1762 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1763 | friend class Instruction; |
1764 | |
1765 | SelectInst *cloneImpl() const; |
1766 | |
1767 | public: |
1768 | static SelectInst *Create(Value *C, Value *S1, Value *S2, |
1769 | const Twine &NameStr = "", |
1770 | Instruction *InsertBefore = nullptr, |
1771 | Instruction *MDFrom = nullptr) { |
1772 | SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore); |
1773 | if (MDFrom) |
1774 | Sel->copyMetadata(*MDFrom); |
1775 | return Sel; |
1776 | } |
1777 | |
1778 | static SelectInst *Create(Value *C, Value *S1, Value *S2, |
1779 | const Twine &NameStr, |
1780 | BasicBlock *InsertAtEnd) { |
1781 | return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd); |
1782 | } |
1783 | |
1784 | const Value *getCondition() const { return Op<0>(); } |
1785 | const Value *getTrueValue() const { return Op<1>(); } |
1786 | const Value *getFalseValue() const { return Op<2>(); } |
1787 | Value *getCondition() { return Op<0>(); } |
1788 | Value *getTrueValue() { return Op<1>(); } |
1789 | Value *getFalseValue() { return Op<2>(); } |
1790 | |
1791 | void setCondition(Value *V) { Op<0>() = V; } |
1792 | void setTrueValue(Value *V) { Op<1>() = V; } |
1793 | void setFalseValue(Value *V) { Op<2>() = V; } |
1794 | |
1795 | /// Swap the true and false values of the select instruction. |
1796 | /// This doesn't swap prof metadata. |
1797 | void swapValues() { Op<1>().swap(Op<2>()); } |
1798 | |
1799 | /// Return a string if the specified operands are invalid |
1800 | /// for a select operation, otherwise return null. |
1801 | static const char *areInvalidOperands(Value *Cond, Value *True, Value *False); |
1802 | |
1803 | /// Transparently provide more efficient getOperand methods. |
1804 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
1805 | |
1806 | OtherOps getOpcode() const { |
1807 | return static_cast<OtherOps>(Instruction::getOpcode()); |
1808 | } |
1809 | |
1810 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1811 | static bool classof(const Instruction *I) { |
1812 | return I->getOpcode() == Instruction::Select; |
1813 | } |
1814 | static bool classof(const Value *V) { |
1815 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1816 | } |
1817 | }; |
1818 | |
1819 | template <> |
1820 | struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> { |
1821 | }; |
1822 | |
1823 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits <SelectInst>::op_begin(this); } SelectInst::const_op_iterator SelectInst::op_begin() const { return OperandTraits<SelectInst >::op_begin(const_cast<SelectInst*>(this)); } SelectInst ::op_iterator SelectInst::op_end() { return OperandTraits< SelectInst>::op_end(this); } SelectInst::const_op_iterator SelectInst::op_end() const { return OperandTraits<SelectInst >::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<SelectInst>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1823, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<SelectInst >::op_begin(const_cast<SelectInst*>(this))[i_nocapture ].get()); } void SelectInst::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<SelectInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1823, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<SelectInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned SelectInst::getNumOperands() const { return OperandTraits<SelectInst>::operands(this); } template <int Idx_nocapture> Use &SelectInst::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &SelectInst::Op() const { return this->OpFrom<Idx_nocapture>(this); } |
1824 | |
1825 | //===----------------------------------------------------------------------===// |
1826 | // VAArgInst Class |
1827 | //===----------------------------------------------------------------------===// |
1828 | |
1829 | /// This class represents the va_arg llvm instruction, which returns |
1830 | /// an argument of the specified type given a va_list and increments that list |
1831 | /// |
1832 | class VAArgInst : public UnaryInstruction { |
1833 | protected: |
1834 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1835 | friend class Instruction; |
1836 | |
1837 | VAArgInst *cloneImpl() const; |
1838 | |
1839 | public: |
1840 | VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "", |
1841 | Instruction *InsertBefore = nullptr) |
1842 | : UnaryInstruction(Ty, VAArg, List, InsertBefore) { |
1843 | setName(NameStr); |
1844 | } |
1845 | |
1846 | VAArgInst(Value *List, Type *Ty, const Twine &NameStr, |
1847 | BasicBlock *InsertAtEnd) |
1848 | : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) { |
1849 | setName(NameStr); |
1850 | } |
1851 | |
1852 | Value *getPointerOperand() { return getOperand(0); } |
1853 | const Value *getPointerOperand() const { return getOperand(0); } |
1854 | static unsigned getPointerOperandIndex() { return 0U; } |
1855 | |
1856 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1857 | static bool classof(const Instruction *I) { |
1858 | return I->getOpcode() == VAArg; |
1859 | } |
1860 | static bool classof(const Value *V) { |
1861 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1862 | } |
1863 | }; |
1864 | |
1865 | //===----------------------------------------------------------------------===// |
1866 | // ExtractElementInst Class |
1867 | //===----------------------------------------------------------------------===// |
1868 | |
1869 | /// This instruction extracts a single (scalar) |
1870 | /// element from a VectorType value |
1871 | /// |
1872 | class ExtractElementInst : public Instruction { |
1873 | ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "", |
1874 | Instruction *InsertBefore = nullptr); |
1875 | ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr, |
1876 | BasicBlock *InsertAtEnd); |
1877 | |
1878 | protected: |
1879 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1880 | friend class Instruction; |
1881 | |
1882 | ExtractElementInst *cloneImpl() const; |
1883 | |
1884 | public: |
1885 | static ExtractElementInst *Create(Value *Vec, Value *Idx, |
1886 | const Twine &NameStr = "", |
1887 | Instruction *InsertBefore = nullptr) { |
1888 | return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore); |
1889 | } |
1890 | |
1891 | static ExtractElementInst *Create(Value *Vec, Value *Idx, |
1892 | const Twine &NameStr, |
1893 | BasicBlock *InsertAtEnd) { |
1894 | return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd); |
1895 | } |
1896 | |
1897 | /// Return true if an extractelement instruction can be |
1898 | /// formed with the specified operands. |
1899 | static bool isValidOperands(const Value *Vec, const Value *Idx); |
1900 | |
1901 | Value *getVectorOperand() { return Op<0>(); } |
1902 | Value *getIndexOperand() { return Op<1>(); } |
1903 | const Value *getVectorOperand() const { return Op<0>(); } |
1904 | const Value *getIndexOperand() const { return Op<1>(); } |
1905 | |
1906 | VectorType *getVectorOperandType() const { |
1907 | return cast<VectorType>(getVectorOperand()->getType()); |
1908 | } |
1909 | |
1910 | /// Transparently provide more efficient getOperand methods. |
1911 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
1912 | |
1913 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1914 | static bool classof(const Instruction *I) { |
1915 | return I->getOpcode() == Instruction::ExtractElement; |
1916 | } |
1917 | static bool classof(const Value *V) { |
1918 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1919 | } |
1920 | }; |
1921 | |
1922 | template <> |
1923 | struct OperandTraits<ExtractElementInst> : |
1924 | public FixedNumOperandTraits<ExtractElementInst, 2> { |
1925 | }; |
1926 | |
1927 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin( ) { return OperandTraits<ExtractElementInst>::op_begin( this); } ExtractElementInst::const_op_iterator ExtractElementInst ::op_begin() const { return OperandTraits<ExtractElementInst >::op_begin(const_cast<ExtractElementInst*>(this)); } ExtractElementInst::op_iterator ExtractElementInst::op_end() { return OperandTraits<ExtractElementInst>::op_end(this ); } ExtractElementInst::const_op_iterator ExtractElementInst ::op_end() const { return OperandTraits<ExtractElementInst >::op_end(const_cast<ExtractElementInst*>(this)); } Value *ExtractElementInst::getOperand(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits< ExtractElementInst>::operands(this) && "getOperand() out of range!" ) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1927, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<ExtractElementInst >::op_begin(const_cast<ExtractElementInst*>(this))[i_nocapture ].get()); } void ExtractElementInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1927, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<ExtractElementInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned ExtractElementInst::getNumOperands () const { return OperandTraits<ExtractElementInst>::operands (this); } template <int Idx_nocapture> Use &ExtractElementInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &ExtractElementInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } |
1928 | |
1929 | //===----------------------------------------------------------------------===// |
1930 | // InsertElementInst Class |
1931 | //===----------------------------------------------------------------------===// |
1932 | |
1933 | /// This instruction inserts a single (scalar) |
1934 | /// element into a VectorType value |
1935 | /// |
1936 | class InsertElementInst : public Instruction { |
1937 | InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, |
1938 | const Twine &NameStr = "", |
1939 | Instruction *InsertBefore = nullptr); |
1940 | InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr, |
1941 | BasicBlock *InsertAtEnd); |
1942 | |
1943 | protected: |
1944 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1945 | friend class Instruction; |
1946 | |
1947 | InsertElementInst *cloneImpl() const; |
1948 | |
1949 | public: |
1950 | static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, |
1951 | const Twine &NameStr = "", |
1952 | Instruction *InsertBefore = nullptr) { |
1953 | return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore); |
1954 | } |
1955 | |
1956 | static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, |
1957 | const Twine &NameStr, |
1958 | BasicBlock *InsertAtEnd) { |
1959 | return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd); |
1960 | } |
1961 | |
1962 | /// Return true if an insertelement instruction can be |
1963 | /// formed with the specified operands. |
1964 | static bool isValidOperands(const Value *Vec, const Value *NewElt, |
1965 | const Value *Idx); |
1966 | |
1967 | /// Overload to return most specific vector type. |
1968 | /// |
1969 | VectorType *getType() const { |
1970 | return cast<VectorType>(Instruction::getType()); |
1971 | } |
1972 | |
1973 | /// Transparently provide more efficient getOperand methods. |
1974 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
1975 | |
1976 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1977 | static bool classof(const Instruction *I) { |
1978 | return I->getOpcode() == Instruction::InsertElement; |
1979 | } |
1980 | static bool classof(const Value *V) { |
1981 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1982 | } |
1983 | }; |
1984 | |
1985 | template <> |
1986 | struct OperandTraits<InsertElementInst> : |
1987 | public FixedNumOperandTraits<InsertElementInst, 3> { |
1988 | }; |
1989 | |
1990 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() { return OperandTraits<InsertElementInst>::op_begin(this ); } InsertElementInst::const_op_iterator InsertElementInst:: op_begin() const { return OperandTraits<InsertElementInst> ::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst ::op_iterator InsertElementInst::op_end() { return OperandTraits <InsertElementInst>::op_end(this); } InsertElementInst:: const_op_iterator InsertElementInst::op_end() const { return OperandTraits <InsertElementInst>::op_end(const_cast<InsertElementInst *>(this)); } Value *InsertElementInst::getOperand(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits<InsertElementInst>::operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1990, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<InsertElementInst >::op_begin(const_cast<InsertElementInst*>(this))[i_nocapture ].get()); } void InsertElementInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<InsertElementInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1990, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<InsertElementInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned InsertElementInst::getNumOperands () const { return OperandTraits<InsertElementInst>::operands (this); } template <int Idx_nocapture> Use &InsertElementInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &InsertElementInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } |
1991 | |
1992 | //===----------------------------------------------------------------------===// |
1993 | // ShuffleVectorInst Class |
1994 | //===----------------------------------------------------------------------===// |
1995 | |
1996 | constexpr int UndefMaskElem = -1; |
1997 | |
1998 | /// This instruction constructs a fixed permutation of two |
1999 | /// input vectors. |
2000 | /// |
2001 | /// For each element of the result vector, the shuffle mask selects an element |
2002 | /// from one of the input vectors to copy to the result. Non-negative elements |
2003 | /// in the mask represent an index into the concatenated pair of input vectors. |
2004 | /// UndefMaskElem (-1) specifies that the result element is undefined. |
2005 | /// |
2006 | /// For scalable vectors, all the elements of the mask must be 0 or -1. This |
2007 | /// requirement may be relaxed in the future. |
2008 | class ShuffleVectorInst : public Instruction { |
2009 | SmallVector<int, 4> ShuffleMask; |
2010 | Constant *ShuffleMaskForBitcode; |
2011 | |
2012 | protected: |
2013 | // Note: Instruction needs to be a friend here to call cloneImpl. |
2014 | friend class Instruction; |
2015 | |
2016 | ShuffleVectorInst *cloneImpl() const; |
2017 | |
2018 | public: |
2019 | ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "", |
2020 | Instruction *InsertBefore = nullptr); |
2021 | ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr, |
2022 | BasicBlock *InsertAtEnd); |
2023 | ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "", |
2024 | Instruction *InsertBefore = nullptr); |
2025 | ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr, |
2026 | BasicBlock *InsertAtEnd); |
2027 | ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, |
2028 | const Twine &NameStr = "", |
2029 | Instruction *InsertBefor = nullptr); |
2030 | ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, |
2031 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
2032 | ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, |
2033 | const Twine &NameStr = "", |
2034 | Instruction *InsertBefor = nullptr); |
2035 | ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, |
2036 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
2037 | |
2038 | void *operator new(size_t S) { return User::operator new(S, 2); } |
2039 | void operator delete(void *Ptr) { return User::operator delete(Ptr); } |
2040 | |
2041 | /// Swap the operands and adjust the mask to preserve the semantics |
2042 | /// of the instruction. |
2043 | void commute(); |
2044 | |
2045 | /// Return true if a shufflevector instruction can be |
2046 | /// formed with the specified operands. |
2047 | static bool isValidOperands(const Value *V1, const Value *V2, |
2048 | const Value *Mask); |
2049 | static bool isValidOperands(const Value *V1, const Value *V2, |
2050 | ArrayRef<int> Mask); |
2051 | |
2052 | /// Overload to return most specific vector type. |
2053 | /// |
2054 | VectorType *getType() const { |
2055 | return cast<VectorType>(Instruction::getType()); |
2056 | } |
2057 | |
2058 | /// Transparently provide more efficient getOperand methods. |
2059 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
2060 | |
2061 | /// Return the shuffle mask value of this instruction for the given element |
2062 | /// index. Return UndefMaskElem if the element is undef. |
2063 | int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; } |
2064 | |
2065 | /// Convert the input shuffle mask operand to a vector of integers. Undefined |
2066 | /// elements of the mask are returned as UndefMaskElem. |
2067 | static void getShuffleMask(const Constant *Mask, |
2068 | SmallVectorImpl<int> &Result); |
2069 | |
2070 | /// Return the mask for this instruction as a vector of integers. Undefined |
2071 | /// elements of the mask are returned as UndefMaskElem. |
2072 | void getShuffleMask(SmallVectorImpl<int> &Result) const { |
2073 | Result.assign(ShuffleMask.begin(), ShuffleMask.end()); |
2074 | } |
2075 | |
2076 | /// Return the mask for this instruction, for use in bitcode. |
2077 | /// |
2078 | /// TODO: This is temporary until we decide a new bitcode encoding for |
2079 | /// shufflevector. |
2080 | Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; } |
2081 | |
2082 | static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask, |
2083 | Type *ResultTy); |
2084 | |
2085 | void setShuffleMask(ArrayRef<int> Mask); |
2086 | |
2087 | ArrayRef<int> getShuffleMask() const { return ShuffleMask; } |
2088 | |
2089 | /// Return true if this shuffle returns a vector with a different number of |
2090 | /// elements than its source vectors. |
2091 | /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3> |
2092 | /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5> |
2093 | bool changesLength() const { |
2094 | unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType()) |
2095 | ->getElementCount() |
2096 | .getKnownMinValue(); |
2097 | unsigned NumMaskElts = ShuffleMask.size(); |
2098 | return NumSourceElts != NumMaskElts; |
2099 | } |
2100 | |
2101 | /// Return true if this shuffle returns a vector with a greater number of |
2102 | /// elements than its source vectors. |
2103 | /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3> |
2104 | bool increasesLength() const { |
2105 | unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType()) |
2106 | ->getElementCount() |
2107 | .getKnownMinValue(); |
2108 | unsigned NumMaskElts = ShuffleMask.size(); |
2109 | return NumSourceElts < NumMaskElts; |
2110 | } |
2111 | |
2112 | /// Return true if this shuffle mask chooses elements from exactly one source |
2113 | /// vector. |
2114 | /// Example: <7,5,undef,7> |
2115 | /// This assumes that vector operands are the same length as the mask. |
2116 | static bool isSingleSourceMask(ArrayRef<int> Mask); |
2117 | static bool isSingleSourceMask(const Constant *Mask) { |
2118 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2118, __extension__ __PRETTY_FUNCTION__ )); |
2119 | SmallVector<int, 16> MaskAsInts; |
2120 | getShuffleMask(Mask, MaskAsInts); |
2121 | return isSingleSourceMask(MaskAsInts); |
2122 | } |
2123 | |
2124 | /// Return true if this shuffle chooses elements from exactly one source |
2125 | /// vector without changing the length of that vector. |
2126 | /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3> |
2127 | /// TODO: Optionally allow length-changing shuffles. |
2128 | bool isSingleSource() const { |
2129 | return !changesLength() && isSingleSourceMask(ShuffleMask); |
2130 | } |
2131 | |
2132 | /// Return true if this shuffle mask chooses elements from exactly one source |
2133 | /// vector without lane crossings. A shuffle using this mask is not |
2134 | /// necessarily a no-op because it may change the number of elements from its |
2135 | /// input vectors or it may provide demanded bits knowledge via undef lanes. |
2136 | /// Example: <undef,undef,2,3> |
2137 | static bool isIdentityMask(ArrayRef<int> Mask); |
2138 | static bool isIdentityMask(const Constant *Mask) { |
2139 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2139, __extension__ __PRETTY_FUNCTION__ )); |
2140 | |
2141 | // Not possible to express a shuffle mask for a scalable vector for this |
2142 | // case. |
2143 | if (isa<ScalableVectorType>(Mask->getType())) |
2144 | return false; |
2145 | |
2146 | SmallVector<int, 16> MaskAsInts; |
2147 | getShuffleMask(Mask, MaskAsInts); |
2148 | return isIdentityMask(MaskAsInts); |
2149 | } |
2150 | |
2151 | /// Return true if this shuffle chooses elements from exactly one source |
2152 | /// vector without lane crossings and does not change the number of elements |
2153 | /// from its input vectors. |
2154 | /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef> |
2155 | bool isIdentity() const { |
2156 | // Not possible to express a shuffle mask for a scalable vector for this |
2157 | // case. |
2158 | if (isa<ScalableVectorType>(getType())) |
2159 | return false; |
2160 | |
2161 | return !changesLength() && isIdentityMask(ShuffleMask); |
2162 | } |
2163 | |
2164 | /// Return true if this shuffle lengthens exactly one source vector with |
2165 | /// undefs in the high elements. |
2166 | bool isIdentityWithPadding() const; |
2167 | |
2168 | /// Return true if this shuffle extracts the first N elements of exactly one |
2169 | /// source vector. |
2170 | bool isIdentityWithExtract() const; |
2171 | |
2172 | /// Return true if this shuffle concatenates its 2 source vectors. This |
2173 | /// returns false if either input is undefined. In that case, the shuffle is |
2174 | /// is better classified as an identity with padding operation. |
2175 | bool isConcat() const; |
2176 | |
2177 | /// Return true if this shuffle mask chooses elements from its source vectors |
2178 | /// without lane crossings. A shuffle using this mask would be |
2179 | /// equivalent to a vector select with a constant condition operand. |
2180 | /// Example: <4,1,6,undef> |
2181 | /// This returns false if the mask does not choose from both input vectors. |
2182 | /// In that case, the shuffle is better classified as an identity shuffle. |
2183 | /// This assumes that vector operands are the same length as the mask |
2184 | /// (a length-changing shuffle can never be equivalent to a vector select). |
2185 | static bool isSelectMask(ArrayRef<int> Mask); |
2186 | static bool isSelectMask(const Constant *Mask) { |
2187 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2187, __extension__ __PRETTY_FUNCTION__ )); |
2188 | SmallVector<int, 16> MaskAsInts; |
2189 | getShuffleMask(Mask, MaskAsInts); |
2190 | return isSelectMask(MaskAsInts); |
2191 | } |
2192 | |
2193 | /// Return true if this shuffle chooses elements from its source vectors |
2194 | /// without lane crossings and all operands have the same number of elements. |
2195 | /// In other words, this shuffle is equivalent to a vector select with a |
2196 | /// constant condition operand. |
2197 | /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3> |
2198 | /// This returns false if the mask does not choose from both input vectors. |
2199 | /// In that case, the shuffle is better classified as an identity shuffle. |
2200 | /// TODO: Optionally allow length-changing shuffles. |
2201 | bool isSelect() const { |
2202 | return !changesLength() && isSelectMask(ShuffleMask); |
2203 | } |
2204 | |
2205 | /// Return true if this shuffle mask swaps the order of elements from exactly |
2206 | /// one source vector. |
2207 | /// Example: <7,6,undef,4> |
2208 | /// This assumes that vector operands are the same length as the mask. |
2209 | static bool isReverseMask(ArrayRef<int> Mask); |
2210 | static bool isReverseMask(const Constant *Mask) { |
2211 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2211, __extension__ __PRETTY_FUNCTION__ )); |
2212 | SmallVector<int, 16> MaskAsInts; |
2213 | getShuffleMask(Mask, MaskAsInts); |
2214 | return isReverseMask(MaskAsInts); |
2215 | } |
2216 | |
2217 | /// Return true if this shuffle swaps the order of elements from exactly |
2218 | /// one source vector. |
2219 | /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef> |
2220 | /// TODO: Optionally allow length-changing shuffles. |
2221 | bool isReverse() const { |
2222 | return !changesLength() && isReverseMask(ShuffleMask); |
2223 | } |
2224 | |
2225 | /// Return true if this shuffle mask chooses all elements with the same value |
2226 | /// as the first element of exactly one source vector. |
2227 | /// Example: <4,undef,undef,4> |
2228 | /// This assumes that vector operands are the same length as the mask. |
2229 | static bool isZeroEltSplatMask(ArrayRef<int> Mask); |
2230 | static bool isZeroEltSplatMask(const Constant *Mask) { |
2231 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2231, __extension__ __PRETTY_FUNCTION__ )); |
2232 | SmallVector<int, 16> MaskAsInts; |
2233 | getShuffleMask(Mask, MaskAsInts); |
2234 | return isZeroEltSplatMask(MaskAsInts); |
2235 | } |
2236 | |
2237 | /// Return true if all elements of this shuffle are the same value as the |
2238 | /// first element of exactly one source vector without changing the length |
2239 | /// of that vector. |
2240 | /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0> |
2241 | /// TODO: Optionally allow length-changing shuffles. |
2242 | /// TODO: Optionally allow splats from other elements. |
2243 | bool isZeroEltSplat() const { |
2244 | return !changesLength() && isZeroEltSplatMask(ShuffleMask); |
2245 | } |
2246 | |
2247 | /// Return true if this shuffle mask is a transpose mask. |
2248 | /// Transpose vector masks transpose a 2xn matrix. They read corresponding |
2249 | /// even- or odd-numbered vector elements from two n-dimensional source |
2250 | /// vectors and write each result into consecutive elements of an |
2251 | /// n-dimensional destination vector. Two shuffles are necessary to complete |
2252 | /// the transpose, one for the even elements and another for the odd elements. |
2253 | /// This description closely follows how the TRN1 and TRN2 AArch64 |
2254 | /// instructions operate. |
2255 | /// |
2256 | /// For example, a simple 2x2 matrix can be transposed with: |
2257 | /// |
2258 | /// ; Original matrix |
2259 | /// m0 = < a, b > |
2260 | /// m1 = < c, d > |
2261 | /// |
2262 | /// ; Transposed matrix |
2263 | /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 > |
2264 | /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 > |
2265 | /// |
2266 | /// For matrices having greater than n columns, the resulting nx2 transposed |
2267 | /// matrix is stored in two result vectors such that one vector contains |
2268 | /// interleaved elements from all the even-numbered rows and the other vector |
2269 | /// contains interleaved elements from all the odd-numbered rows. For example, |
2270 | /// a 2x4 matrix can be transposed with: |
2271 | /// |
2272 | /// ; Original matrix |
2273 | /// m0 = < a, b, c, d > |
2274 | /// m1 = < e, f, g, h > |
2275 | /// |
2276 | /// ; Transposed matrix |
2277 | /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 > |
2278 | /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 > |
2279 | static bool isTransposeMask(ArrayRef<int> Mask); |
2280 | static bool isTransposeMask(const Constant *Mask) { |
2281 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2281, __extension__ __PRETTY_FUNCTION__ )); |
2282 | SmallVector<int, 16> MaskAsInts; |
2283 | getShuffleMask(Mask, MaskAsInts); |
2284 | return isTransposeMask(MaskAsInts); |
2285 | } |
2286 | |
2287 | /// Return true if this shuffle transposes the elements of its inputs without |
2288 | /// changing the length of the vectors. This operation may also be known as a |
2289 | /// merge or interleave. See the description for isTransposeMask() for the |
2290 | /// exact specification. |
2291 | /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6> |
2292 | bool isTranspose() const { |
2293 | return !changesLength() && isTransposeMask(ShuffleMask); |
2294 | } |
2295 | |
2296 | /// Return true if this shuffle mask is a splice mask, concatenating the two |
2297 | /// inputs together and then extracts an original width vector starting from |
2298 | /// the splice index. |
2299 | /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4> |
2300 | static bool isSpliceMask(ArrayRef<int> Mask, int &Index); |
2301 | static bool isSpliceMask(const Constant *Mask, int &Index) { |
2302 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2302, __extension__ __PRETTY_FUNCTION__ )); |
2303 | SmallVector<int, 16> MaskAsInts; |
2304 | getShuffleMask(Mask, MaskAsInts); |
2305 | return isSpliceMask(MaskAsInts, Index); |
2306 | } |
2307 | |
2308 | /// Return true if this shuffle splices two inputs without changing the length |
2309 | /// of the vectors. This operation concatenates the two inputs together and |
2310 | /// then extracts an original width vector starting from the splice index. |
2311 | /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4> |
2312 | bool isSplice(int &Index) const { |
2313 | return !changesLength() && isSpliceMask(ShuffleMask, Index); |
2314 | } |
2315 | |
2316 | /// Return true if this shuffle mask is an extract subvector mask. |
2317 | /// A valid extract subvector mask returns a smaller vector from a single |
2318 | /// source operand. The base extraction index is returned as well. |
2319 | static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts, |
2320 | int &Index); |
2321 | static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts, |
2322 | int &Index) { |
2323 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2323, __extension__ __PRETTY_FUNCTION__ )); |
2324 | // Not possible to express a shuffle mask for a scalable vector for this |
2325 | // case. |
2326 | if (isa<ScalableVectorType>(Mask->getType())) |
2327 | return false; |
2328 | SmallVector<int, 16> MaskAsInts; |
2329 | getShuffleMask(Mask, MaskAsInts); |
2330 | return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index); |
2331 | } |
2332 | |
2333 | /// Return true if this shuffle mask is an extract subvector mask. |
2334 | bool isExtractSubvectorMask(int &Index) const { |
2335 | // Not possible to express a shuffle mask for a scalable vector for this |
2336 | // case. |
2337 | if (isa<ScalableVectorType>(getType())) |
2338 | return false; |
2339 | |
2340 | int NumSrcElts = |
2341 | cast<FixedVectorType>(Op<0>()->getType())->getNumElements(); |
2342 | return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index); |
2343 | } |
2344 | |
2345 | /// Return true if this shuffle mask is an insert subvector mask. |
2346 | /// A valid insert subvector mask inserts the lowest elements of a second |
2347 | /// source operand into an in-place first source operand operand. |
2348 | /// Both the sub vector width and the insertion index is returned. |
2349 | static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts, |
2350 | int &NumSubElts, int &Index); |
2351 | static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts, |
2352 | int &NumSubElts, int &Index) { |
2353 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2353, __extension__ __PRETTY_FUNCTION__ )); |
2354 | // Not possible to express a shuffle mask for a scalable vector for this |
2355 | // case. |
2356 | if (isa<ScalableVectorType>(Mask->getType())) |
2357 | return false; |
2358 | SmallVector<int, 16> MaskAsInts; |
2359 | getShuffleMask(Mask, MaskAsInts); |
2360 | return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index); |
2361 | } |
2362 | |
2363 | /// Return true if this shuffle mask is an insert subvector mask. |
2364 | bool isInsertSubvectorMask(int &NumSubElts, int &Index) const { |
2365 | // Not possible to express a shuffle mask for a scalable vector for this |
2366 | // case. |
2367 | if (isa<ScalableVectorType>(getType())) |
2368 | return false; |
2369 | |
2370 | int NumSrcElts = |
2371 | cast<FixedVectorType>(Op<0>()->getType())->getNumElements(); |
2372 | return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index); |
2373 | } |
2374 | |
2375 | /// Return true if this shuffle mask replicates each of the \p VF elements |
2376 | /// in a vector \p ReplicationFactor times. |
2377 | /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is: |
2378 | /// <0,0,0,1,1,1,2,2,2,3,3,3> |
2379 | static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor, |
2380 | int &VF); |
2381 | static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor, |
2382 | int &VF) { |
2383 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2383, __extension__ __PRETTY_FUNCTION__ )); |
2384 | // Not possible to express a shuffle mask for a scalable vector for this |
2385 | // case. |
2386 | if (isa<ScalableVectorType>(Mask->getType())) |
2387 | return false; |
2388 | SmallVector<int, 16> MaskAsInts; |
2389 | getShuffleMask(Mask, MaskAsInts); |
2390 | return isReplicationMask(MaskAsInts, ReplicationFactor, VF); |
2391 | } |
2392 | |
2393 | /// Return true if this shuffle mask is a replication mask. |
2394 | bool isReplicationMask(int &ReplicationFactor, int &VF) const; |
2395 | |
2396 | /// Change values in a shuffle permute mask assuming the two vector operands |
2397 | /// of length InVecNumElts have swapped position. |
2398 | static void commuteShuffleMask(MutableArrayRef<int> Mask, |
2399 | unsigned InVecNumElts) { |
2400 | for (int &Idx : Mask) { |
2401 | if (Idx == -1) |
2402 | continue; |
2403 | Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts; |
2404 | assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&(static_cast <bool> (Idx >= 0 && Idx < (int )InVecNumElts * 2 && "shufflevector mask index out of range" ) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\"" , "llvm/include/llvm/IR/Instructions.h", 2405, __extension__ __PRETTY_FUNCTION__ )) |
2405 | "shufflevector mask index out of range")(static_cast <bool> (Idx >= 0 && Idx < (int )InVecNumElts * 2 && "shufflevector mask index out of range" ) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\"" , "llvm/include/llvm/IR/Instructions.h", 2405, __extension__ __PRETTY_FUNCTION__ )); |
2406 | } |
2407 | } |
2408 | |
2409 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
2410 | static bool classof(const Instruction *I) { |
2411 | return I->getOpcode() == Instruction::ShuffleVector; |
2412 | } |
2413 | static bool classof(const Value *V) { |
2414 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
2415 | } |
2416 | }; |
2417 | |
2418 | template <> |
2419 | struct OperandTraits<ShuffleVectorInst> |
2420 | : public FixedNumOperandTraits<ShuffleVectorInst, 2> {}; |
2421 | |
2422 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() { return OperandTraits<ShuffleVectorInst>::op_begin(this ); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst:: op_begin() const { return OperandTraits<ShuffleVectorInst> ::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst ::op_iterator ShuffleVectorInst::op_end() { return OperandTraits <ShuffleVectorInst>::op_end(this); } ShuffleVectorInst:: const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits <ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst *>(this)); } Value *ShuffleVectorInst::getOperand(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 2422, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<ShuffleVectorInst >::op_begin(const_cast<ShuffleVectorInst*>(this))[i_nocapture ].get()); } void ShuffleVectorInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 2422, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<ShuffleVectorInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned ShuffleVectorInst::getNumOperands () const { return OperandTraits<ShuffleVectorInst>::operands (this); } template <int Idx_nocapture> Use &ShuffleVectorInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &ShuffleVectorInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } |
2423 | |
2424 | //===----------------------------------------------------------------------===// |
2425 | // ExtractValueInst Class |
2426 | //===----------------------------------------------------------------------===// |
2427 | |
2428 | /// This instruction extracts a struct member or array |
2429 | /// element value from an aggregate value. |
2430 | /// |
2431 | class ExtractValueInst : public UnaryInstruction { |
2432 | SmallVector<unsigned, 4> Indices; |
2433 | |
2434 | ExtractValueInst(const ExtractValueInst &EVI); |
2435 | |
2436 | /// Constructors - Create a extractvalue instruction with a base aggregate |
2437 | /// value and a list of indices. The first ctor can optionally insert before |
2438 | /// an existing instruction, the second appends the new instruction to the |
2439 | /// specified BasicBlock. |
2440 | inline ExtractValueInst(Value *Agg, |
2441 | ArrayRef<unsigned> Idxs, |
2442 | const Twine &NameStr, |
2443 | Instruction *InsertBefore); |
2444 | inline ExtractValueInst(Value *Agg, |
2445 | ArrayRef<unsigned> Idxs, |
2446 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
2447 | |
2448 | void init(ArrayRef<unsigned> Idxs, const Twine &NameStr); |
2449 | |
2450 | protected: |
2451 | // Note: Instruction needs to be a friend here to call cloneImpl. |
2452 | friend class Instruction; |
2453 | |
2454 | ExtractValueInst *cloneImpl() const; |
2455 | |
2456 | public: |
2457 | static ExtractValueInst *Create(Value *Agg, |
2458 | ArrayRef<unsigned> Idxs, |
2459 | const Twine &NameStr = "", |
2460 | Instruction *InsertBefore = nullptr) { |
2461 | return new |
2462 | ExtractValueInst(Agg, Idxs, NameStr, InsertBefore); |
2463 | } |
2464 | |
2465 | static ExtractValueInst *Create(Value *Agg, |
2466 | ArrayRef<unsigned> Idxs, |
2467 | const Twine &NameStr, |
2468 | BasicBlock *InsertAtEnd) { |
2469 | return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd); |
2470 | } |
2471 | |
2472 | /// Returns the type of the element that would be extracted |
2473 | /// with an extractvalue instruction with the specified parameters. |
2474 | /// |
2475 | /// Null is returned if the indices are invalid for the specified type. |
2476 | static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs); |
2477 | |
2478 | using idx_iterator = const unsigned*; |
2479 | |
2480 | inline idx_iterator idx_begin() const { return Indices.begin(); } |
2481 | inline idx_iterator idx_end() const { return Indices.end(); } |
2482 | inline iterator_range<idx_iterator> indices() const { |
2483 | return make_range(idx_begin(), idx_end()); |
2484 | } |
2485 | |
2486 | Value *getAggregateOperand() { |
2487 | return getOperand(0); |
2488 | } |
2489 | const Value *getAggregateOperand() const { |
2490 | return getOperand(0); |
2491 | } |
2492 | static unsigned getAggregateOperandIndex() { |
2493 | return 0U; // get index for modifying correct operand |
2494 | } |
2495 | |
2496 | ArrayRef<unsigned> getIndices() const { |
2497 | return Indices; |
2498 | } |
2499 | |
2500 | unsigned getNumIndices() const { |
2501 | return (unsigned)Indices.size(); |
2502 | } |
2503 | |
2504 | bool hasIndices() const { |
2505 | return true; |
2506 | } |
2507 | |
2508 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
2509 | static bool classof(const Instruction *I) { |
2510 | return I->getOpcode() == Instruction::ExtractValue; |
2511 | } |
2512 | static bool classof(const Value *V) { |
2513 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
2514 | } |
2515 | }; |
2516 | |
2517 | ExtractValueInst::ExtractValueInst(Value *Agg, |
2518 | ArrayRef<unsigned> Idxs, |
2519 | const Twine &NameStr, |
2520 | Instruction *InsertBefore) |
2521 | : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), |
2522 | ExtractValue, Agg, InsertBefore) { |
2523 | init(Idxs, NameStr); |
2524 | } |
2525 | |
2526 | ExtractValueInst::ExtractValueInst(Value *Agg, |
2527 | ArrayRef<unsigned> Idxs, |
2528 | const Twine &NameStr, |
2529 | BasicBlock *InsertAtEnd) |
2530 | : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), |
2531 | ExtractValue, Agg, InsertAtEnd) { |
2532 | init(Idxs, NameStr); |
2533 | } |
2534 | |
2535 | //===----------------------------------------------------------------------===// |
2536 | // InsertValueInst Class |
2537 | //===----------------------------------------------------------------------===// |
2538 | |
2539 | /// This instruction inserts a struct field of array element |
2540 | /// value into an aggregate value. |
2541 | /// |
2542 | class InsertValueInst : public Instruction { |
2543 | SmallVector<unsigned, 4> Indices; |
2544 | |
2545 | InsertValueInst(const InsertValueInst &IVI); |
2546 | |
2547 | /// Constructors - Create a insertvalue instruction with a base aggregate |
2548 | /// value, a value to insert, and a list of indices. The first ctor can |
2549 | /// optionally insert before an existing instruction, the second appends |
2550 | /// the new instruction to the specified BasicBlock. |
2551 | inline InsertValueInst(Value *Agg, Value *Val, |
2552 | ArrayRef<unsigned> Idxs, |
2553 | const Twine &NameStr, |
2554 | Instruction *InsertBefore); |
2555 | inline InsertValueInst(Value *Agg, Value *Val, |
2556 | ArrayRef<unsigned> Idxs, |
2557 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
2558 | |
2559 | /// Constructors - These two constructors are convenience methods because one |
2560 | /// and two index insertvalue instructions are so common. |
2561 | InsertValueInst(Value *Agg, Value *Val, unsigned Idx, |
2562 | const Twine &NameStr = "", |
2563 | Instruction *InsertBefore = nullptr); |
2564 | InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr, |
2565 | BasicBlock *InsertAtEnd); |
2566 | |
2567 | void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, |
2568 | const Twine &NameStr); |
2569 | |
2570 | protected: |
2571 | // Note: Instruction needs to be a friend here to call cloneImpl. |
2572 | friend class Instruction; |
2573 | |
2574 | InsertValueInst *cloneImpl() const; |
2575 | |
2576 | public: |
2577 | // allocate space for exactly two operands |
2578 | void *operator new(size_t S) { return User::operator new(S, 2); } |
2579 | void operator delete(void *Ptr) { User::operator delete(Ptr); } |
2580 | |
2581 | static InsertValueInst *Create(Value *Agg, Value *Val, |
2582 | ArrayRef<unsigned> Idxs, |
2583 | const Twine &NameStr = "", |
2584 | Instruction *InsertBefore = nullptr) { |
2585 | return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore); |
2586 | } |
2587 | |
2588 | static InsertValueInst *Create(Value *Agg, Value *Val, |
2589 | ArrayRef<unsigned> Idxs, |
2590 | const Twine &NameStr, |
2591 | BasicBlock *InsertAtEnd) { |
2592 | return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd); |
2593 | } |
2594 | |
2595 | /// Transparently provide more efficient getOperand methods. |
2596 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
2597 | |
2598 | using idx_iterator = const unsigned*; |
2599 | |
2600 | inline idx_iterator idx_begin() const { return Indices.begin(); } |
2601 | inline idx_iterator idx_end() const { return Indices.end(); } |
2602 | inline iterator_range<idx_iterator> indices() const { |
2603 | return make_range(idx_begin(), idx_end()); |
2604 | } |
2605 | |
2606 | Value *getAggregateOperand() { |
2607 | return getOperand(0); |
2608 | } |
2609 | const Value *getAggregateOperand() const { |
2610 | return getOperand(0); |
2611 | } |
2612 | static unsigned getAggregateOperandIndex() { |
2613 | return 0U; // get index for modifying correct operand |
2614 | } |
2615 | |
2616 | Value *getInsertedValueOperand() { |
2617 | return getOperand(1); |
2618 | } |
2619 | const Value *getInsertedValueOperand() const { |
2620 | return getOperand(1); |
2621 | } |
2622 | static unsigned getInsertedValueOperandIndex() { |
2623 | return 1U; // get index for modifying correct operand |
2624 | } |
2625 | |
2626 | ArrayRef<unsigned> getIndices() const { |
2627 | return Indices; |
2628 | } |
2629 | |
2630 | unsigned getNumIndices() const { |
2631 | return (unsigned)Indices.size(); |
2632 | } |
2633 | |
2634 | bool hasIndices() const { |
2635 | return true; |
2636 | } |
2637 | |
2638 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
2639 | static bool classof(const Instruction *I) { |
2640 | return I->getOpcode() == Instruction::InsertValue; |
2641 | } |
2642 | static bool classof(const Value *V) { |
2643 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
2644 | } |
2645 | }; |
2646 | |
2647 | template <> |
2648 | struct OperandTraits<InsertValueInst> : |
2649 | public FixedNumOperandTraits<InsertValueInst, 2> { |
2650 | }; |
2651 | |
2652 | InsertValueInst::InsertValueInst(Value *Agg, |
2653 | Value *Val, |
2654 | ArrayRef<unsigned> Idxs, |
2655 | const Twine &NameStr, |
2656 | Instruction *InsertBefore) |
2657 | : Instruction(Agg->getType(), InsertValue, |
2658 | OperandTraits<InsertValueInst>::op_begin(this), |
2659 | 2, InsertBefore) { |
2660 | init(Agg, Val, Idxs, NameStr); |
2661 | } |
2662 | |
2663 | InsertValueInst::InsertValueInst(Value *Agg, |
2664 | Value *Val, |
2665 | ArrayRef<unsigned> Idxs, |
2666 | const Twine &NameStr, |
2667 | BasicBlock *InsertAtEnd) |
2668 | : Instruction(Agg->getType(), InsertValue, |
2669 | OperandTraits<InsertValueInst>::op_begin(this), |
2670 | 2, InsertAtEnd) { |
2671 | init(Agg, Val, Idxs, NameStr); |
2672 | } |
2673 | |
2674 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst ::const_op_iterator InsertValueInst::op_begin() const { return OperandTraits<InsertValueInst>::op_begin(const_cast< InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst ::op_end() { return OperandTraits<InsertValueInst>::op_end (this); } InsertValueInst::const_op_iterator InsertValueInst:: op_end() const { return OperandTraits<InsertValueInst>:: op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<InsertValueInst>:: operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 2674, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<InsertValueInst >::op_begin(const_cast<InsertValueInst*>(this))[i_nocapture ].get()); } void InsertValueInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<InsertValueInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 2674, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<InsertValueInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned InsertValueInst::getNumOperands () const { return OperandTraits<InsertValueInst>::operands (this); } template <int Idx_nocapture> Use &InsertValueInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &InsertValueInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } |
2675 | |
2676 | //===----------------------------------------------------------------------===// |
2677 | // PHINode Class |
2678 | //===----------------------------------------------------------------------===// |
2679 | |
2680 | // PHINode - The PHINode class is used to represent the magical mystical PHI |
2681 | // node, that can not exist in nature, but can be synthesized in a computer |
2682 | // scientist's overactive imagination. |
2683 | // |
2684 | class PHINode : public Instruction { |
2685 | /// The number of operands actually allocated. NumOperands is |
2686 | /// the number actually in use. |
2687 | unsigned ReservedSpace; |
2688 | |
2689 | PHINode(const PHINode &PN); |
2690 | |
2691 | explicit PHINode(Type *Ty, unsigned NumReservedValues, |
2692 | const Twine &NameStr = "", |
2693 | Instruction *InsertBefore = nullptr) |
2694 | : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore), |
2695 | ReservedSpace(NumReservedValues) { |
2696 | assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!" ) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\"" , "llvm/include/llvm/IR/Instructions.h", 2696, __extension__ __PRETTY_FUNCTION__ )); |
2697 | setName(NameStr); |
2698 | allocHungoffUses(ReservedSpace); |
2699 | } |
2700 | |
2701 | PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, |
2702 | BasicBlock *InsertAtEnd) |
2703 | : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd), |
2704 | ReservedSpace(NumReservedValues) { |
2705 | assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!" ) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\"" , "llvm/include/llvm/IR/Instructions.h", 2705, __extension__ __PRETTY_FUNCTION__ )); |
2706 | setName(NameStr); |
2707 | allocHungoffUses(ReservedSpace); |
2708 | } |
2709 | |
2710 | protected: |
2711 | // Note: Instruction needs to be a friend here to call cloneImpl. |
2712 | friend class Instruction; |
2713 | |
2714 | PHINode *cloneImpl() const; |
2715 | |
2716 | // allocHungoffUses - this is more complicated than the generic |
2717 | // User::allocHungoffUses, because we have to allocate Uses for the incoming |
2718 | // values and pointers to the incoming blocks, all in one allocation. |
2719 | void allocHungoffUses(unsigned N) { |
2720 | User::allocHungoffUses(N, /* IsPhi */ true); |
2721 | } |
2722 | |
2723 | public: |
2724 | /// Constructors - NumReservedValues is a hint for the number of incoming |
2725 | /// edges that this phi node will have (use 0 if you really have no idea). |
2726 | static PHINode *Create(Type *Ty, unsigned NumReservedValues, |
2727 | const Twine &NameStr = "", |
2728 | Instruction *InsertBefore = nullptr) { |
2729 | return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore); |
2730 | } |
2731 | |
2732 | static PHINode *Create(Type *Ty, unsigned NumReservedValues, |
2733 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
2734 | return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd); |
2735 | } |
2736 | |
2737 | /// Provide fast operand accessors |
2738 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
2739 | |
2740 | // Block iterator interface. This provides access to the list of incoming |
2741 | // basic blocks, which parallels the list of incoming values. |
2742 | |
2743 | using block_iterator = BasicBlock **; |
2744 | using const_block_iterator = BasicBlock * const *; |
2745 | |
2746 | block_iterator block_begin() { |
2747 | return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace); |
2748 | } |
2749 | |
2750 | const_block_iterator block_begin() const { |
2751 | return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace); |
2752 | } |
2753 | |
2754 | block_iterator block_end() { |
2755 | return block_begin() + getNumOperands(); |
2756 | } |
2757 | |
2758 | const_block_iterator block_end() const { |
2759 | return block_begin() + getNumOperands(); |
2760 | } |
2761 | |
2762 | iterator_range<block_iterator> blocks() { |
2763 | return make_range(block_begin(), block_end()); |
2764 | } |
2765 | |
2766 | iterator_range<const_block_iterator> blocks() const { |
2767 | return make_range(block_begin(), block_end()); |
2768 | } |
2769 | |
2770 | op_range incoming_values() { return operands(); } |
2771 | |
2772 | const_op_range incoming_values() const { return operands(); } |
2773 | |
2774 | /// Return the number of incoming edges |
2775 | /// |
2776 | unsigned getNumIncomingValues() const { return getNumOperands(); } |
2777 | |
2778 | /// Return incoming value number x |
2779 | /// |
2780 | Value *getIncomingValue(unsigned i) const { |
2781 | return getOperand(i); |
2782 | } |
2783 | void setIncomingValue(unsigned i, Value *V) { |
2784 | assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!" ) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\"" , "llvm/include/llvm/IR/Instructions.h", 2784, __extension__ __PRETTY_FUNCTION__ )); |
2785 | assert(getType() == V->getType() &&(static_cast <bool> (getType() == V->getType() && "All operands to PHI node must be the same type as the PHI node!" ) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\"" , "llvm/include/llvm/IR/Instructions.h", 2786, __extension__ __PRETTY_FUNCTION__ )) |
2786 | "All operands to PHI node must be the same type as the PHI node!")(static_cast <bool> (getType() == V->getType() && "All operands to PHI node must be the same type as the PHI node!" ) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\"" , "llvm/include/llvm/IR/Instructions.h", 2786, __extension__ __PRETTY_FUNCTION__ )); |
2787 | setOperand(i, V); |
2788 | } |
2789 | |
2790 | static unsigned getOperandNumForIncomingValue(unsigned i) { |
2791 | return i; |
2792 | } |
2793 | |
2794 | static unsigned getIncomingValueNumForOperand(unsigned i) { |
2795 | return i; |
2796 | } |
2797 | |
2798 | /// Return incoming basic block number @p i. |
2799 | /// |
2800 | BasicBlock *getIncomingBlock(unsigned i) const { |
2801 | return block_begin()[i]; |
2802 | } |
2803 | |
2804 | /// Return incoming basic block corresponding |
2805 | /// to an operand of the PHI. |
2806 | /// |
2807 | BasicBlock *getIncomingBlock(const Use &U) const { |
2808 | assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?" ) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\"" , "llvm/include/llvm/IR/Instructions.h", 2808, __extension__ __PRETTY_FUNCTION__ )); |
2809 | return getIncomingBlock(unsigned(&U - op_begin())); |
2810 | } |
2811 | |
2812 | /// Return incoming basic block corresponding |
2813 | /// to value use iterator. |
2814 | /// |
2815 | BasicBlock *getIncomingBlock(Value::const_user_iterator I) const { |
2816 | return getIncomingBlock(I.getUse()); |
2817 | } |
2818 | |
2819 | void setIncomingBlock(unsigned i, BasicBlock *BB) { |
2820 | assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!" ) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\"" , "llvm/include/llvm/IR/Instructions.h", 2820, __extension__ __PRETTY_FUNCTION__ )); |
2821 | block_begin()[i] = BB; |
2822 | } |
2823 | |
2824 | /// Replace every incoming basic block \p Old to basic block \p New. |
2825 | void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) { |
2826 | assert(New && Old && "PHI node got a null basic block!")(static_cast <bool> (New && Old && "PHI node got a null basic block!" ) ? void (0) : __assert_fail ("New && Old && \"PHI node got a null basic block!\"" , "llvm/include/llvm/IR/Instructions.h", 2826, __extension__ __PRETTY_FUNCTION__ )); |
2827 | for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) |
2828 | if (getIncomingBlock(Op) == Old) |
2829 | setIncomingBlock(Op, New); |
2830 | } |
2831 | |
2832 | /// Add an incoming value to the end of the PHI list |
2833 | /// |
2834 | void addIncoming(Value *V, BasicBlock *BB) { |
2835 | if (getNumOperands() == ReservedSpace) |
2836 | growOperands(); // Get more space! |
2837 | // Initialize some new operands. |
2838 | setNumHungOffUseOperands(getNumOperands() + 1); |
2839 | setIncomingValue(getNumOperands() - 1, V); |
2840 | setIncomingBlock(getNumOperands() - 1, BB); |
2841 | } |
2842 | |
2843 | /// Remove an incoming value. This is useful if a |
2844 | /// predecessor basic block is deleted. The value removed is returned. |
2845 | /// |
2846 | /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty |
2847 | /// is true), the PHI node is destroyed and any uses of it are replaced with |
2848 | /// dummy values. The only time there should be zero incoming values to a PHI |
2849 | /// node is when the block is dead, so this strategy is sound. |
2850 | /// |
2851 | Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true); |
2852 | |
2853 | Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) { |
2854 | int Idx = getBasicBlockIndex(BB); |
2855 | assert(Idx >= 0 && "Invalid basic block argument to remove!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument to remove!" ) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\"" , "llvm/include/llvm/IR/Instructions.h", 2855, __extension__ __PRETTY_FUNCTION__ )); |
2856 | return removeIncomingValue(Idx, DeletePHIIfEmpty); |
2857 | } |
2858 | |
2859 | /// Return the first index of the specified basic |
2860 | /// block in the value list for this PHI. Returns -1 if no instance. |
2861 | /// |
2862 | int getBasicBlockIndex(const BasicBlock *BB) const { |
2863 | for (unsigned i = 0, e = getNumOperands(); i != e; ++i) |
2864 | if (block_begin()[i] == BB) |
2865 | return i; |
2866 | return -1; |
2867 | } |
2868 | |
2869 | Value *getIncomingValueForBlock(const BasicBlock *BB) const { |
2870 | int Idx = getBasicBlockIndex(BB); |
2871 | assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!" ) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\"" , "llvm/include/llvm/IR/Instructions.h", 2871, __extension__ __PRETTY_FUNCTION__ )); |
2872 | return getIncomingValue(Idx); |
2873 | } |
2874 | |
2875 | /// Set every incoming value(s) for block \p BB to \p V. |
2876 | void setIncomingValueForBlock(const BasicBlock *BB, Value *V) { |
2877 | assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!" ) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\"" , "llvm/include/llvm/IR/Instructions.h", 2877, __extension__ __PRETTY_FUNCTION__ )); |
2878 | bool Found = false; |
2879 | for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) |
2880 | if (getIncomingBlock(Op) == BB) { |
2881 | Found = true; |
2882 | setIncomingValue(Op, V); |
2883 | } |
2884 | (void)Found; |
2885 | assert(Found && "Invalid basic block argument to set!")(static_cast <bool> (Found && "Invalid basic block argument to set!" ) ? void (0) : __assert_fail ("Found && \"Invalid basic block argument to set!\"" , "llvm/include/llvm/IR/Instructions.h", 2885, __extension__ __PRETTY_FUNCTION__ )); |
2886 | } |
2887 | |
2888 | /// If the specified PHI node always merges together the |
2889 | /// same value, return the value, otherwise return null. |
2890 | Value *hasConstantValue() const; |
2891 | |
2892 | /// Whether the specified PHI node always merges |
2893 | /// together the same value, assuming undefs are equal to a unique |
2894 | /// non-undef value. |
2895 | bool hasConstantOrUndefValue() const; |
2896 | |
2897 | /// If the PHI node is complete which means all of its parent's predecessors |
2898 | /// have incoming value in this PHI, return true, otherwise return false. |
2899 | bool isComplete() const { |
2900 | return llvm::all_of(predecessors(getParent()), |
2901 | [this](const BasicBlock *Pred) { |
2902 | return getBasicBlockIndex(Pred) >= 0; |
2903 | }); |
2904 | } |
2905 | |
2906 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
2907 | static bool classof(const Instruction *I) { |
2908 | return I->getOpcode() == Instruction::PHI; |
2909 | } |
2910 | static bool classof(const Value *V) { |
2911 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
2912 | } |
2913 | |
2914 | private: |
2915 | void growOperands(); |
2916 | }; |
2917 | |
2918 | template <> |
2919 | struct OperandTraits<PHINode> : public HungoffOperandTraits<2> { |
2920 | }; |
2921 | |
2922 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits <PHINode>::op_begin(this); } PHINode::const_op_iterator PHINode::op_begin() const { return OperandTraits<PHINode> ::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator PHINode::op_end() { return OperandTraits<PHINode>::op_end (this); } PHINode::const_op_iterator PHINode::op_end() const { return OperandTraits<PHINode>::op_end(const_cast<PHINode *>(this)); } Value *PHINode::getOperand(unsigned i_nocapture ) const { (static_cast <bool> (i_nocapture < OperandTraits <PHINode>::operands(this) && "getOperand() out of range!" ) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 2922, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<PHINode >::op_begin(const_cast<PHINode*>(this))[i_nocapture] .get()); } void PHINode::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<PHINode>::operands(this) && "setOperand() out of range!" ) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 2922, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<PHINode>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned PHINode::getNumOperands() const { return OperandTraits<PHINode>::operands(this); } template <int Idx_nocapture> Use &PHINode::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &PHINode::Op() const { return this->OpFrom <Idx_nocapture>(this); } |
2923 | |
2924 | //===----------------------------------------------------------------------===// |
2925 | // LandingPadInst Class |
2926 | //===----------------------------------------------------------------------===// |
2927 | |
2928 | //===--------------------------------------------------------------------------- |
2929 | /// The landingpad instruction holds all of the information |
2930 | /// necessary to generate correct exception handling. The landingpad instruction |
2931 | /// cannot be moved from the top of a landing pad block, which itself is |
2932 | /// accessible only from the 'unwind' edge of an invoke. This uses the |
2933 | /// SubclassData field in Value to store whether or not the landingpad is a |
2934 | /// cleanup. |
2935 | /// |
2936 | class LandingPadInst : public Instruction { |
2937 | using CleanupField = BoolBitfieldElementT<0>; |
2938 | |
2939 | /// The number of operands actually allocated. NumOperands is |
2940 | /// the number actually in use. |
2941 | unsigned ReservedSpace; |
2942 | |
2943 | LandingPadInst(const LandingPadInst &LP); |
2944 | |
2945 | public: |
2946 | enum ClauseType { Catch, Filter }; |
2947 | |
2948 | private: |
2949 | explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, |
2950 | const Twine &NameStr, Instruction *InsertBefore); |
2951 | explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, |
2952 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
2953 | |
2954 | // Allocate space for exactly zero operands. |
2955 | void *operator new(size_t S) { return User::operator new(S); } |
2956 | |
2957 | void growOperands(unsigned Size); |
2958 | void init(unsigned NumReservedValues, const Twine &NameStr); |
2959 | |
2960 | protected: |
2961 | // Note: Instruction needs to be a friend here to call cloneImpl. |
2962 | friend class Instruction; |
2963 | |
2964 | LandingPadInst *cloneImpl() const; |
2965 | |
2966 | public: |
2967 | void operator delete(void *Ptr) { User::operator delete(Ptr); } |
2968 | |
2969 | /// Constructors - NumReservedClauses is a hint for the number of incoming |
2970 | /// clauses that this landingpad will have (use 0 if you really have no idea). |
2971 | static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, |
2972 | const Twine &NameStr = "", |
2973 | Instruction *InsertBefore = nullptr); |
2974 | static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, |
2975 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
2976 | |
2977 | /// Provide fast operand accessors |
2978 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
2979 | |
2980 | /// Return 'true' if this landingpad instruction is a |
2981 | /// cleanup. I.e., it should be run when unwinding even if its landing pad |
2982 | /// doesn't catch the exception. |
2983 | bool isCleanup() const { return getSubclassData<CleanupField>(); } |
2984 | |
2985 | /// Indicate that this landingpad instruction is a cleanup. |
2986 | void setCleanup(bool V) { setSubclassData<CleanupField>(V); } |
2987 | |
2988 | /// Add a catch or filter clause to the landing pad. |
2989 | void addClause(Constant *ClauseVal); |
2990 | |
2991 | /// Get the value of the clause at index Idx. Use isCatch/isFilter to |
2992 | /// determine what type of clause this is. |
2993 | Constant *getClause(unsigned Idx) const { |
2994 | return cast<Constant>(getOperandList()[Idx]); |
2995 | } |
2996 | |
2997 | /// Return 'true' if the clause and index Idx is a catch clause. |
2998 | bool isCatch(unsigned Idx) const { |
2999 | return !isa<ArrayType>(getOperandList()[Idx]->getType()); |
3000 | } |
3001 | |
3002 | /// Return 'true' if the clause and index Idx is a filter clause. |
3003 | bool isFilter(unsigned Idx) const { |
3004 | return isa<ArrayType>(getOperandList()[Idx]->getType()); |
3005 | } |
3006 | |
3007 | /// Get the number of clauses for this landing pad. |
3008 | unsigned getNumClauses() const { return getNumOperands(); } |
3009 | |
3010 | /// Grow the size of the operand list to accommodate the new |
3011 | /// number of clauses. |
3012 | void reserveClauses(unsigned Size) { growOperands(Size); } |
3013 | |
3014 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
3015 | static bool classof(const Instruction *I) { |
3016 | return I->getOpcode() == Instruction::LandingPad; |
3017 | } |
3018 | static bool classof(const Value *V) { |
3019 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
3020 | } |
3021 | }; |
3022 | |
3023 | template <> |
3024 | struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> { |
3025 | }; |
3026 | |
3027 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst ::const_op_iterator LandingPadInst::op_begin() const { return OperandTraits<LandingPadInst>::op_begin(const_cast< LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst ::op_end() { return OperandTraits<LandingPadInst>::op_end (this); } LandingPadInst::const_op_iterator LandingPadInst::op_end () const { return OperandTraits<LandingPadInst>::op_end (const_cast<LandingPadInst*>(this)); } Value *LandingPadInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<LandingPadInst>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3027, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<LandingPadInst >::op_begin(const_cast<LandingPadInst*>(this))[i_nocapture ].get()); } void LandingPadInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<LandingPadInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3027, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<LandingPadInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned LandingPadInst::getNumOperands( ) const { return OperandTraits<LandingPadInst>::operands (this); } template <int Idx_nocapture> Use &LandingPadInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &LandingPadInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } |
3028 | |
3029 | //===----------------------------------------------------------------------===// |
3030 | // ReturnInst Class |
3031 | //===----------------------------------------------------------------------===// |
3032 | |
3033 | //===--------------------------------------------------------------------------- |
3034 | /// Return a value (possibly void), from a function. Execution |
3035 | /// does not continue in this function any longer. |
3036 | /// |
3037 | class ReturnInst : public Instruction { |
3038 | ReturnInst(const ReturnInst &RI); |
3039 | |
3040 | private: |
3041 | // ReturnInst constructors: |
3042 | // ReturnInst() - 'ret void' instruction |
3043 | // ReturnInst( null) - 'ret void' instruction |
3044 | // ReturnInst(Value* X) - 'ret X' instruction |
3045 | // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I |
3046 | // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I |
3047 | // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B |
3048 | // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B |
3049 | // |
3050 | // NOTE: If the Value* passed is of type void then the constructor behaves as |
3051 | // if it was passed NULL. |
3052 | explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr, |
3053 | Instruction *InsertBefore = nullptr); |
3054 | ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd); |
3055 | explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd); |
3056 | |
3057 | protected: |
3058 | // Note: Instruction needs to be a friend here to call cloneImpl. |
3059 | friend class Instruction; |
3060 | |
3061 | ReturnInst *cloneImpl() const; |
3062 | |
3063 | public: |
3064 | static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr, |
3065 | Instruction *InsertBefore = nullptr) { |
3066 | return new(!!retVal) ReturnInst(C, retVal, InsertBefore); |
3067 | } |
3068 | |
3069 | static ReturnInst* Create(LLVMContext &C, Value *retVal, |
3070 | BasicBlock *InsertAtEnd) { |
3071 | return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd); |
3072 | } |
3073 | |
3074 | static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) { |
3075 | return new(0) ReturnInst(C, InsertAtEnd); |
3076 | } |
3077 | |
3078 | /// Provide fast operand accessors |
3079 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
3080 | |
3081 | /// Convenience accessor. Returns null if there is no return value. |
3082 | Value *getReturnValue() const { |
3083 | return getNumOperands() != 0 ? getOperand(0) : nullptr; |
3084 | } |
3085 | |
3086 | unsigned getNumSuccessors() const { return 0; } |
3087 | |
3088 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
3089 | static bool classof(const Instruction *I) { |
3090 | return (I->getOpcode() == Instruction::Ret); |
3091 | } |
3092 | static bool classof(const Value *V) { |
3093 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
3094 | } |
3095 | |
3096 | private: |
3097 | BasicBlock *getSuccessor(unsigned idx) const { |
3098 | llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!" , "llvm/include/llvm/IR/Instructions.h", 3098); |
3099 | } |
3100 | |
3101 | void setSuccessor(unsigned idx, BasicBlock *B) { |
3102 | llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!" , "llvm/include/llvm/IR/Instructions.h", 3102); |
3103 | } |
3104 | }; |
3105 | |
3106 | template <> |
3107 | struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> { |
3108 | }; |
3109 | |
3110 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits <ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator ReturnInst::op_begin() const { return OperandTraits<ReturnInst >::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst ::op_iterator ReturnInst::op_end() { return OperandTraits< ReturnInst>::op_end(this); } ReturnInst::const_op_iterator ReturnInst::op_end() const { return OperandTraits<ReturnInst >::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<ReturnInst>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3110, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<ReturnInst >::op_begin(const_cast<ReturnInst*>(this))[i_nocapture ].get()); } void ReturnInst::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<ReturnInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3110, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<ReturnInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned ReturnInst::getNumOperands() const { return OperandTraits<ReturnInst>::operands(this); } template <int Idx_nocapture> Use &ReturnInst::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &ReturnInst::Op() const { return this->OpFrom<Idx_nocapture>(this); } |
3111 | |
3112 | //===----------------------------------------------------------------------===// |
3113 | // BranchInst Class |
3114 | //===----------------------------------------------------------------------===// |
3115 | |
3116 | //===--------------------------------------------------------------------------- |
3117 | /// Conditional or Unconditional Branch instruction. |
3118 | /// |
3119 | class BranchInst : public Instruction { |
3120 | /// Ops list - Branches are strange. The operands are ordered: |
3121 | /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because |
3122 | /// they don't have to check for cond/uncond branchness. These are mostly |
3123 | /// accessed relative from op_end(). |
3124 | BranchInst(const BranchInst &BI); |
3125 | // BranchInst constructors (where {B, T, F} are blocks, and C is a condition): |
3126 | // BranchInst(BB *B) - 'br B' |
3127 | // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F' |
3128 | // BranchInst(BB* B, Inst *I) - 'br B' insert before I |
3129 | // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I |
3130 | // BranchInst(BB* B, BB *I) - 'br B' insert at end |
3131 | // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end |
3132 | explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr); |
3133 | BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, |
3134 | Instruction *InsertBefore = nullptr); |
3135 | BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd); |
3136 | BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, |
3137 | BasicBlock *InsertAtEnd); |
3138 | |
3139 | void AssertOK(); |
3140 | |
3141 | protected: |
3142 | // Note: Instruction needs to be a friend here to call cloneImpl. |
3143 | friend class Instruction; |
3144 | |
3145 | BranchInst *cloneImpl() const; |
3146 | |
3147 | public: |
3148 | /// Iterator type that casts an operand to a basic block. |
3149 | /// |
3150 | /// This only makes sense because the successors are stored as adjacent |
3151 | /// operands for branch instructions. |
3152 | struct succ_op_iterator |
3153 | : iterator_adaptor_base<succ_op_iterator, value_op_iterator, |
3154 | std::random_access_iterator_tag, BasicBlock *, |
3155 | ptrdiff_t, BasicBlock *, BasicBlock *> { |
3156 | explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} |
3157 | |
3158 | BasicBlock *operator*() const { return cast<BasicBlock>(*I); } |
3159 | BasicBlock *operator->() const { return operator*(); } |
3160 | }; |
3161 | |
3162 | /// The const version of `succ_op_iterator`. |
3163 | struct const_succ_op_iterator |
3164 | : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, |
3165 | std::random_access_iterator_tag, |
3166 | const BasicBlock *, ptrdiff_t, const BasicBlock *, |
3167 | const BasicBlock *> { |
3168 | explicit const_succ_op_iterator(const_value_op_iterator I) |
3169 | : iterator_adaptor_base(I) {} |
3170 | |
3171 | const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } |
3172 | const BasicBlock *operator->() const { return operator*(); } |
3173 | }; |
3174 | |
3175 | static BranchInst *Create(BasicBlock *IfTrue, |
3176 | Instruction *InsertBefore = nullptr) { |
3177 | return new(1) BranchInst(IfTrue, InsertBefore); |
3178 | } |
3179 | |
3180 | static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, |
3181 | Value *Cond, Instruction *InsertBefore = nullptr) { |
3182 | return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore); |
3183 | } |
3184 | |
3185 | static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) { |
3186 | return new(1) BranchInst(IfTrue, InsertAtEnd); |
3187 | } |
3188 | |
3189 | static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, |
3190 | Value *Cond, BasicBlock *InsertAtEnd) { |
3191 | return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd); |
3192 | } |
3193 | |
3194 | /// Transparently provide more efficient getOperand methods. |
3195 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
3196 | |
3197 | bool isUnconditional() const { return getNumOperands() == 1; } |
3198 | bool isConditional() const { return getNumOperands() == 3; } |
3199 | |
3200 | Value *getCondition() const { |
3201 | assert(isConditional() && "Cannot get condition of an uncond branch!")(static_cast <bool> (isConditional() && "Cannot get condition of an uncond branch!" ) ? void (0) : __assert_fail ("isConditional() && \"Cannot get condition of an uncond branch!\"" , "llvm/include/llvm/IR/Instructions.h", 3201, __extension__ __PRETTY_FUNCTION__ )); |
3202 | return Op<-3>(); |
3203 | } |
3204 | |
3205 | void setCondition(Value *V) { |
3206 | assert(isConditional() && "Cannot set condition of unconditional branch!")(static_cast <bool> (isConditional() && "Cannot set condition of unconditional branch!" ) ? void (0) : __assert_fail ("isConditional() && \"Cannot set condition of unconditional branch!\"" , "llvm/include/llvm/IR/Instructions.h", 3206, __extension__ __PRETTY_FUNCTION__ )); |
3207 | Op<-3>() = V; |
3208 | } |
3209 | |
3210 | unsigned getNumSuccessors() const { return 1+isConditional(); } |
3211 | |
3212 | BasicBlock *getSuccessor(unsigned i) const { |
3213 | assert(i < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (i < getNumSuccessors() && "Successor # out of range for Branch!") ? void (0) : __assert_fail ("i < getNumSuccessors() && \"Successor # out of range for Branch!\"" , "llvm/include/llvm/IR/Instructions.h", 3213, __extension__ __PRETTY_FUNCTION__ )); |
3214 | return cast_or_null<BasicBlock>((&Op<-1>() - i)->get()); |
3215 | } |
3216 | |
3217 | void setSuccessor(unsigned idx, BasicBlock *NewSucc) { |
3218 | assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (idx < getNumSuccessors() && "Successor # out of range for Branch!") ? void (0) : __assert_fail ("idx < getNumSuccessors() && \"Successor # out of range for Branch!\"" , "llvm/include/llvm/IR/Instructions.h", 3218, __extension__ __PRETTY_FUNCTION__ )); |
3219 | *(&Op<-1>() - idx) = NewSucc; |
3220 | } |
3221 | |
3222 | /// Swap the successors of this branch instruction. |
3223 | /// |
3224 | /// Swaps the successors of the branch instruction. This also swaps any |
3225 | /// branch weight metadata associated with the instruction so that it |
3226 | /// continues to map correctly to each operand. |
3227 | void swapSuccessors(); |
3228 | |
3229 | iterator_range<succ_op_iterator> successors() { |
3230 | return make_range( |
3231 | succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)), |
3232 | succ_op_iterator(value_op_end())); |
3233 | } |
3234 | |
3235 | iterator_range<const_succ_op_iterator> successors() const { |
3236 | return make_range(const_succ_op_iterator( |
3237 | std::next(value_op_begin(), isConditional() ? 1 : 0)), |
3238 | const_succ_op_iterator(value_op_end())); |
3239 | } |
3240 | |
3241 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
3242 | static bool classof(const Instruction *I) { |
3243 | return (I->getOpcode() == Instruction::Br); |
3244 | } |
3245 | static bool classof(const Value *V) { |
3246 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
3247 | } |
3248 | }; |
3249 | |
3250 | template <> |
3251 | struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> { |
3252 | }; |
3253 | |
3254 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits <BranchInst>::op_begin(this); } BranchInst::const_op_iterator BranchInst::op_begin() const { return OperandTraits<BranchInst >::op_begin(const_cast<BranchInst*>(this)); } BranchInst ::op_iterator BranchInst::op_end() { return OperandTraits< BranchInst>::op_end(this); } BranchInst::const_op_iterator BranchInst::op_end() const { return OperandTraits<BranchInst >::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<BranchInst>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3254, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<BranchInst >::op_begin(const_cast<BranchInst*>(this))[i_nocapture ].get()); } void BranchInst::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<BranchInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3254, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<BranchInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned BranchInst::getNumOperands() const { return OperandTraits<BranchInst>::operands(this); } template <int Idx_nocapture> Use &BranchInst::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &BranchInst::Op() const { return this->OpFrom<Idx_nocapture>(this); } |
3255 | |
3256 | //===----------------------------------------------------------------------===// |
3257 | // SwitchInst Class |
3258 | //===----------------------------------------------------------------------===// |
3259 | |
3260 | //===--------------------------------------------------------------------------- |
3261 | /// Multiway switch |
3262 | /// |
3263 | class SwitchInst : public Instruction { |
3264 | unsigned ReservedSpace; |
3265 | |
3266 | // Operand[0] = Value to switch on |
3267 | // Operand[1] = Default basic block destination |
3268 | // Operand[2n ] = Value to match |
3269 | // Operand[2n+1] = BasicBlock to go to on match |
3270 | SwitchInst(const SwitchInst &SI); |
3271 | |
3272 | /// Create a new switch instruction, specifying a value to switch on and a |
3273 | /// default destination. The number of additional cases can be specified here |
3274 | /// to make memory allocation more efficient. This constructor can also |
3275 | /// auto-insert before another instruction. |
3276 | SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, |
3277 | Instruction *InsertBefore); |
3278 | |
3279 | /// Create a new switch instruction, specifying a value to switch on and a |
3280 | /// default destination. The number of additional cases can be specified here |
3281 | /// to make memory allocation more efficient. This constructor also |
3282 | /// auto-inserts at the end of the specified BasicBlock. |
3283 | SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, |
3284 | BasicBlock *InsertAtEnd); |
3285 | |
3286 | // allocate space for exactly zero operands |
3287 | void *operator new(size_t S) { return User::operator new(S); } |
3288 | |
3289 | void init(Value *Value, BasicBlock *Default, unsigned NumReserved); |
3290 | void growOperands(); |
3291 | |
3292 | protected: |
3293 | // Note: Instruction needs to be a friend here to call cloneImpl. |
3294 | friend class Instruction; |
3295 | |
3296 | SwitchInst *cloneImpl() const; |
3297 | |
3298 | public: |
3299 | void operator delete(void *Ptr) { User::operator delete(Ptr); } |
3300 | |
3301 | // -2 |
3302 | static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1); |
3303 | |
3304 | template <typename CaseHandleT> class CaseIteratorImpl; |
3305 | |
3306 | /// A handle to a particular switch case. It exposes a convenient interface |
3307 | /// to both the case value and the successor block. |
3308 | /// |
3309 | /// We define this as a template and instantiate it to form both a const and |
3310 | /// non-const handle. |
3311 | template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT> |
3312 | class CaseHandleImpl { |
3313 | // Directly befriend both const and non-const iterators. |
3314 | friend class SwitchInst::CaseIteratorImpl< |
3315 | CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>; |
3316 | |
3317 | protected: |
3318 | // Expose the switch type we're parameterized with to the iterator. |
3319 | using SwitchInstType = SwitchInstT; |
3320 | |
3321 | SwitchInstT *SI; |
3322 | ptrdiff_t Index; |
3323 | |
3324 | CaseHandleImpl() = default; |
3325 | CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {} |
3326 | |
3327 | public: |
3328 | /// Resolves case value for current case. |
3329 | ConstantIntT *getCaseValue() const { |
3330 | assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases () && "Index out the number of cases.") ? void (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3331, __extension__ __PRETTY_FUNCTION__ )) |
3331 | "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases () && "Index out the number of cases.") ? void (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3331, __extension__ __PRETTY_FUNCTION__ )); |
3332 | return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2)); |
3333 | } |
3334 | |
3335 | /// Resolves successor for current case. |
3336 | BasicBlockT *getCaseSuccessor() const { |
3337 | assert(((unsigned)Index < SI->getNumCases() ||(static_cast <bool> (((unsigned)Index < SI->getNumCases () || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases." ) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3339, __extension__ __PRETTY_FUNCTION__ )) |
3338 | (unsigned)Index == DefaultPseudoIndex) &&(static_cast <bool> (((unsigned)Index < SI->getNumCases () || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases." ) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3339, __extension__ __PRETTY_FUNCTION__ )) |
3339 | "Index out the number of cases.")(static_cast <bool> (((unsigned)Index < SI->getNumCases () || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases." ) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3339, __extension__ __PRETTY_FUNCTION__ )); |
3340 | return SI->getSuccessor(getSuccessorIndex()); |
3341 | } |
3342 | |
3343 | /// Returns number of current case. |
3344 | unsigned getCaseIndex() const { return Index; } |
3345 | |
3346 | /// Returns successor index for current case successor. |
3347 | unsigned getSuccessorIndex() const { |
3348 | assert(((unsigned)Index == DefaultPseudoIndex ||(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && "Index out the number of cases." ) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3350, __extension__ __PRETTY_FUNCTION__ )) |
3349 | (unsigned)Index < SI->getNumCases()) &&(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && "Index out the number of cases." ) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3350, __extension__ __PRETTY_FUNCTION__ )) |
3350 | "Index out the number of cases.")(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && "Index out the number of cases." ) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3350, __extension__ __PRETTY_FUNCTION__ )); |
3351 | return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0; |
3352 | } |
3353 | |
3354 | bool operator==(const CaseHandleImpl &RHS) const { |
3355 | assert(SI == RHS.SI && "Incompatible operators.")(static_cast <bool> (SI == RHS.SI && "Incompatible operators." ) ? void (0) : __assert_fail ("SI == RHS.SI && \"Incompatible operators.\"" , "llvm/include/llvm/IR/Instructions.h", 3355, __extension__ __PRETTY_FUNCTION__ )); |
3356 | return Index == RHS.Index; |
3357 | } |
3358 | }; |
3359 | |
3360 | using ConstCaseHandle = |
3361 | CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>; |
3362 | |
3363 | class CaseHandle |
3364 | : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> { |
3365 | friend class SwitchInst::CaseIteratorImpl<CaseHandle>; |
3366 | |
3367 | public: |
3368 | CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {} |
3369 | |
3370 | /// Sets the new value for current case. |
3371 | void setValue(ConstantInt *V) const { |
3372 | assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases () && "Index out the number of cases.") ? void (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3373, __extension__ __PRETTY_FUNCTION__ )) |
3373 | "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases () && "Index out the number of cases.") ? void (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3373, __extension__ __PRETTY_FUNCTION__ )); |
3374 | SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V)); |
3375 | } |
3376 | |
3377 | /// Sets the new successor for current case. |
3378 | void setSuccessor(BasicBlock *S) const { |
3379 | SI->setSuccessor(getSuccessorIndex(), S); |
3380 | } |
3381 | }; |
3382 | |
3383 | template <typename CaseHandleT> |
3384 | class CaseIteratorImpl |
3385 | : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>, |
3386 | std::random_access_iterator_tag, |
3387 | const CaseHandleT> { |
3388 | using SwitchInstT = typename CaseHandleT::SwitchInstType; |
3389 | |
3390 | CaseHandleT Case; |
3391 | |
3392 | public: |
3393 | /// Default constructed iterator is in an invalid state until assigned to |
3394 | /// a case for a particular switch. |
3395 | CaseIteratorImpl() = default; |
3396 | |
3397 | /// Initializes case iterator for given SwitchInst and for given |
3398 | /// case number. |
3399 | CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {} |
3400 | |
3401 | /// Initializes case iterator for given SwitchInst and for given |
3402 | /// successor index. |
3403 | static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI, |
3404 | unsigned SuccessorIndex) { |
3405 | assert(SuccessorIndex < SI->getNumSuccessors() &&(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors () && "Successor index # out of range!") ? void (0) : __assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3406, __extension__ __PRETTY_FUNCTION__ )) |
3406 | "Successor index # out of range!")(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors () && "Successor index # out of range!") ? void (0) : __assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3406, __extension__ __PRETTY_FUNCTION__ )); |
3407 | return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1) |
3408 | : CaseIteratorImpl(SI, DefaultPseudoIndex); |
3409 | } |
3410 | |
3411 | /// Support converting to the const variant. This will be a no-op for const |
3412 | /// variant. |
3413 | operator CaseIteratorImpl<ConstCaseHandle>() const { |
3414 | return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index); |
3415 | } |
3416 | |
3417 | CaseIteratorImpl &operator+=(ptrdiff_t N) { |
3418 | // Check index correctness after addition. |
3419 | // Note: Index == getNumCases() means end(). |
3420 | assert(Case.Index + N >= 0 &&(static_cast <bool> (Case.Index + N >= 0 && ( unsigned)(Case.Index + N) <= Case.SI->getNumCases() && "Case.Index out the number of cases.") ? void (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3422, __extension__ __PRETTY_FUNCTION__ )) |
3421 | (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index + N >= 0 && ( unsigned)(Case.Index + N) <= Case.SI->getNumCases() && "Case.Index out the number of cases.") ? void (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3422, __extension__ __PRETTY_FUNCTION__ )) |
3422 | "Case.Index out the number of cases.")(static_cast <bool> (Case.Index + N >= 0 && ( unsigned)(Case.Index + N) <= Case.SI->getNumCases() && "Case.Index out the number of cases.") ? void (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3422, __extension__ __PRETTY_FUNCTION__ )); |
3423 | Case.Index += N; |
3424 | return *this; |
3425 | } |
3426 | CaseIteratorImpl &operator-=(ptrdiff_t N) { |
3427 | // Check index correctness after subtraction. |
3428 | // Note: Case.Index == getNumCases() means end(). |
3429 | assert(Case.Index - N >= 0 &&(static_cast <bool> (Case.Index - N >= 0 && ( unsigned)(Case.Index - N) <= Case.SI->getNumCases() && "Case.Index out the number of cases.") ? void (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3431, __extension__ __PRETTY_FUNCTION__ )) |
3430 | (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index - N >= 0 && ( unsigned)(Case.Index - N) <= Case.SI->getNumCases() && "Case.Index out the number of cases.") ? void (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3431, __extension__ __PRETTY_FUNCTION__ )) |
3431 | "Case.Index out the number of cases.")(static_cast <bool> (Case.Index - N >= 0 && ( unsigned)(Case.Index - N) <= Case.SI->getNumCases() && "Case.Index out the number of cases.") ? void (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3431, __extension__ __PRETTY_FUNCTION__ )); |
3432 | Case.Index -= N; |
3433 | return *this; |
3434 | } |
3435 | ptrdiff_t operator-(const CaseIteratorImpl &RHS) const { |
3436 | assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators." ) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\"" , "llvm/include/llvm/IR/Instructions.h", 3436, __extension__ __PRETTY_FUNCTION__ )); |
3437 | return Case.Index - RHS.Case.Index; |
3438 | } |
3439 | bool operator==(const CaseIteratorImpl &RHS) const { |
3440 | return Case == RHS.Case; |
3441 | } |
3442 | bool operator<(const CaseIteratorImpl &RHS) const { |
3443 | assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators." ) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\"" , "llvm/include/llvm/IR/Instructions.h", 3443, __extension__ __PRETTY_FUNCTION__ )); |
3444 | return Case.Index < RHS.Case.Index; |
3445 | } |
3446 | const CaseHandleT &operator*() const { return Case; } |
3447 | }; |
3448 | |
3449 | using CaseIt = CaseIteratorImpl<CaseHandle>; |
3450 | using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>; |
3451 | |
3452 | static SwitchInst *Create(Value *Value, BasicBlock *Default, |
3453 | unsigned NumCases, |
3454 | Instruction *InsertBefore = nullptr) { |
3455 | return new SwitchInst(Value, Default, NumCases, InsertBefore); |
3456 | } |
3457 | |
3458 | static SwitchInst *Create(Value *Value, BasicBlock *Default, |
3459 | unsigned NumCases, BasicBlock *InsertAtEnd) { |
3460 | return new SwitchInst(Value, Default, NumCases, InsertAtEnd); |
3461 | } |
3462 | |
3463 | /// Provide fast operand accessors |
3464 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
3465 | |
3466 | // Accessor Methods for Switch stmt |
3467 | Value *getCondition() const { return getOperand(0); } |
3468 | void setCondition(Value *V) { setOperand(0, V); } |
3469 | |
3470 | BasicBlock *getDefaultDest() const { |
3471 | return cast<BasicBlock>(getOperand(1)); |
3472 | } |
3473 | |
3474 | void setDefaultDest(BasicBlock *DefaultCase) { |
3475 | setOperand(1, reinterpret_cast<Value*>(DefaultCase)); |
3476 | } |
3477 | |
3478 | /// Return the number of 'cases' in this switch instruction, excluding the |
3479 | /// default case. |
3480 | unsigned getNumCases() const { |
3481 | return getNumOperands()/2 - 1; |
3482 | } |
3483 | |
3484 | /// Returns a read/write iterator that points to the first case in the |
3485 | /// SwitchInst. |
3486 | CaseIt case_begin() { |
3487 | return CaseIt(this, 0); |
3488 | } |
3489 | |
3490 | /// Returns a read-only iterator that points to the first case in the |
3491 | /// SwitchInst. |
3492 | ConstCaseIt case_begin() const { |
3493 | return ConstCaseIt(this, 0); |
3494 | } |
3495 | |
3496 | /// Returns a read/write iterator that points one past the last in the |
3497 | /// SwitchInst. |
3498 | CaseIt case_end() { |
3499 | return CaseIt(this, getNumCases()); |
3500 | } |
3501 | |
3502 | /// Returns a read-only iterator that points one past the last in the |
3503 | /// SwitchInst. |
3504 | ConstCaseIt case_end() const { |
3505 | return ConstCaseIt(this, getNumCases()); |
3506 | } |
3507 | |
3508 | /// Iteration adapter for range-for loops. |
3509 | iterator_range<CaseIt> cases() { |
3510 | return make_range(case_begin(), case_end()); |
3511 | } |
3512 | |
3513 | /// Constant iteration adapter for range-for loops. |
3514 | iterator_range<ConstCaseIt> cases() const { |
3515 | return make_range(case_begin(), case_end()); |
3516 | } |
3517 | |
3518 | /// Returns an iterator that points to the default case. |
3519 | /// Note: this iterator allows to resolve successor only. Attempt |
3520 | /// to resolve case value causes an assertion. |
3521 | /// Also note, that increment and decrement also causes an assertion and |
3522 | /// makes iterator invalid. |
3523 | CaseIt case_default() { |
3524 | return CaseIt(this, DefaultPseudoIndex); |
3525 | } |
3526 | ConstCaseIt case_default() const { |
3527 | return ConstCaseIt(this, DefaultPseudoIndex); |
3528 | } |
3529 | |
3530 | /// Search all of the case values for the specified constant. If it is |
3531 | /// explicitly handled, return the case iterator of it, otherwise return |
3532 | /// default case iterator to indicate that it is handled by the default |
3533 | /// handler. |
3534 | CaseIt findCaseValue(const ConstantInt *C) { |
3535 | return CaseIt( |
3536 | this, |
3537 | const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex()); |
3538 | } |
3539 | ConstCaseIt findCaseValue(const ConstantInt *C) const { |
3540 | ConstCaseIt I = llvm::find_if(cases(), [C](const ConstCaseHandle &Case) { |
3541 | return Case.getCaseValue() == C; |
3542 | }); |
3543 | if (I != case_end()) |
3544 | return I; |
3545 | |
3546 | return case_default(); |
3547 | } |
3548 | |
3549 | /// Finds the unique case value for a given successor. Returns null if the |
3550 | /// successor is not found, not unique, or is the default case. |
3551 | ConstantInt *findCaseDest(BasicBlock *BB) { |
3552 | if (BB == getDefaultDest()) |
3553 | return nullptr; |
3554 | |
3555 | ConstantInt *CI = nullptr; |
3556 | for (auto Case : cases()) { |
3557 | if (Case.getCaseSuccessor() != BB) |
3558 | continue; |
3559 | |
3560 | if (CI) |
3561 | return nullptr; // Multiple cases lead to BB. |
3562 | |
3563 | CI = Case.getCaseValue(); |
3564 | } |
3565 | |
3566 | return CI; |
3567 | } |
3568 | |
3569 | /// Add an entry to the switch instruction. |
3570 | /// Note: |
3571 | /// This action invalidates case_end(). Old case_end() iterator will |
3572 | /// point to the added case. |
3573 | void addCase(ConstantInt *OnVal, BasicBlock *Dest); |
3574 | |
3575 | /// This method removes the specified case and its successor from the switch |
3576 | /// instruction. Note that this operation may reorder the remaining cases at |
3577 | /// index idx and above. |
3578 | /// Note: |
3579 | /// This action invalidates iterators for all cases following the one removed, |
3580 | /// including the case_end() iterator. It returns an iterator for the next |
3581 | /// case. |
3582 | CaseIt removeCase(CaseIt I); |
3583 | |
3584 | unsigned getNumSuccessors() const { return getNumOperands()/2; } |
3585 | BasicBlock *getSuccessor(unsigned idx) const { |
3586 | assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() && "Successor idx out of range for switch!") ? void (0) : __assert_fail ("idx < getNumSuccessors() &&\"Successor idx out of range for switch!\"" , "llvm/include/llvm/IR/Instructions.h", 3586, __extension__ __PRETTY_FUNCTION__ )); |
3587 | return cast<BasicBlock>(getOperand(idx*2+1)); |
3588 | } |
3589 | void setSuccessor(unsigned idx, BasicBlock *NewSucc) { |
3590 | assert(idx < getNumSuccessors() && "Successor # out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() && "Successor # out of range for switch!") ? void (0) : __assert_fail ("idx < getNumSuccessors() && \"Successor # out of range for switch!\"" , "llvm/include/llvm/IR/Instructions.h", 3590, __extension__ __PRETTY_FUNCTION__ )); |
3591 | setOperand(idx * 2 + 1, NewSucc); |
3592 | } |
3593 | |
3594 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
3595 | static bool classof(const Instruction *I) { |
3596 | return I->getOpcode() == Instruction::Switch; |
3597 | } |
3598 | static bool classof(const Value *V) { |
3599 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
3600 | } |
3601 | }; |
3602 | |
3603 | /// A wrapper class to simplify modification of SwitchInst cases along with |
3604 | /// their prof branch_weights metadata. |
3605 | class SwitchInstProfUpdateWrapper { |
3606 | SwitchInst &SI; |
3607 | Optional<SmallVector<uint32_t, 8> > Weights = None; |
3608 | bool Changed = false; |
3609 | |
3610 | protected: |
3611 | static MDNode *getProfBranchWeightsMD(const SwitchInst &SI); |
3612 | |
3613 | MDNode *buildProfBranchWeightsMD(); |
3614 | |
3615 | void init(); |
3616 | |
3617 | public: |
3618 | using CaseWeightOpt = Optional<uint32_t>; |
3619 | SwitchInst *operator->() { return &SI; } |
3620 | SwitchInst &operator*() { return SI; } |
3621 | operator SwitchInst *() { return &SI; } |
3622 | |
3623 | SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); } |
3624 | |
3625 | ~SwitchInstProfUpdateWrapper() { |
3626 | if (Changed) |
3627 | SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD()); |
3628 | } |
3629 | |
3630 | /// Delegate the call to the underlying SwitchInst::removeCase() and remove |
3631 | /// correspondent branch weight. |
3632 | SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I); |
3633 | |
3634 | /// Delegate the call to the underlying SwitchInst::addCase() and set the |
3635 | /// specified branch weight for the added case. |
3636 | void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W); |
3637 | |
3638 | /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark |
3639 | /// this object to not touch the underlying SwitchInst in destructor. |
3640 | SymbolTableList<Instruction>::iterator eraseFromParent(); |
3641 | |
3642 | void setSuccessorWeight(unsigned idx, CaseWeightOpt W); |
3643 | CaseWeightOpt getSuccessorWeight(unsigned idx); |
3644 | |
3645 | static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx); |
3646 | }; |
3647 | |
3648 | template <> |
3649 | struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> { |
3650 | }; |
3651 | |
3652 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)SwitchInst::op_iterator SwitchInst::op_begin() { return OperandTraits <SwitchInst>::op_begin(this); } SwitchInst::const_op_iterator SwitchInst::op_begin() const { return OperandTraits<SwitchInst >::op_begin(const_cast<SwitchInst*>(this)); } SwitchInst ::op_iterator SwitchInst::op_end() { return OperandTraits< SwitchInst>::op_end(this); } SwitchInst::const_op_iterator SwitchInst::op_end() const { return OperandTraits<SwitchInst >::op_end(const_cast<SwitchInst*>(this)); } Value *SwitchInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<SwitchInst>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3652, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<SwitchInst >::op_begin(const_cast<SwitchInst*>(this))[i_nocapture ].get()); } void SwitchInst::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<SwitchInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3652, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<SwitchInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned SwitchInst::getNumOperands() const { return OperandTraits<SwitchInst>::operands(this); } template <int Idx_nocapture> Use &SwitchInst::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &SwitchInst::Op() const { return this->OpFrom<Idx_nocapture>(this); } |
3653 | |
3654 | //===----------------------------------------------------------------------===// |
3655 | // IndirectBrInst Class |
3656 | //===----------------------------------------------------------------------===// |
3657 | |
3658 | //===--------------------------------------------------------------------------- |
3659 | /// Indirect Branch Instruction. |
3660 | /// |
3661 | class IndirectBrInst : public Instruction { |
3662 | unsigned ReservedSpace; |
3663 | |
3664 | // Operand[0] = Address to jump to |
3665 | // Operand[n+1] = n-th destination |
3666 | IndirectBrInst(const IndirectBrInst &IBI); |
3667 | |
3668 | /// Create a new indirectbr instruction, specifying an |
3669 | /// Address to jump to. The number of expected destinations can be specified |
3670 | /// here to make memory allocation more efficient. This constructor can also |
3671 | /// autoinsert before another instruction. |
3672 | IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore); |
3673 | |
3674 | /// Create a new indirectbr instruction, specifying an |
3675 | /// Address to jump to. The number of expected destinations can be specified |
3676 | /// here to make memory allocation more efficient. This constructor also |
3677 | /// autoinserts at the end of the specified BasicBlock. |
3678 | IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd); |
3679 | |
3680 | // allocate space for exactly zero operands |
3681 | void *operator new(size_t S) { return User::operator new(S); } |
3682 | |
3683 | void init(Value *Address, unsigned NumDests); |
3684 | void growOperands(); |
3685 | |
3686 | protected: |
3687 | // Note: Instruction needs to be a friend here to call cloneImpl. |
3688 | friend class Instruction; |
3689 | |
3690 | IndirectBrInst *cloneImpl() const; |
3691 | |
3692 | public: |
3693 | void operator delete(void *Ptr) { User::operator delete(Ptr); } |
3694 | |
3695 | /// Iterator type that casts an operand to a basic block. |
3696 | /// |
3697 | /// This only makes sense because the successors are stored as adjacent |
3698 | /// operands for indirectbr instructions. |
3699 | struct succ_op_iterator |
3700 | : iterator_adaptor_base<succ_op_iterator, value_op_iterator, |
3701 | std::random_access_iterator_tag, BasicBlock *, |
3702 | ptrdiff_t, BasicBlock *, BasicBlock *> { |
3703 | explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} |
3704 | |
3705 | BasicBlock *operator*() const { return cast<BasicBlock>(*I); } |
3706 | BasicBlock *operator->() const { return operator*(); } |
3707 | }; |
3708 | |
3709 | /// The const version of `succ_op_iterator`. |
3710 | struct const_succ_op_iterator |
3711 | : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, |
3712 | std::random_access_iterator_tag, |
3713 | const BasicBlock *, ptrdiff_t, const BasicBlock *, |
3714 | const BasicBlock *> { |
3715 | explicit const_succ_op_iterator(const_value_op_iterator I) |
3716 | : iterator_adaptor_base(I) {} |
3717 | |
3718 | const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } |
3719 | const BasicBlock *operator->() const { return operator*(); } |
3720 | }; |
3721 | |
3722 | static IndirectBrInst *Create(Value *Address, unsigned NumDests, |
3723 | Instruction *InsertBefore = nullptr) { |
3724 | return new IndirectBrInst(Address, NumDests, InsertBefore); |
3725 | } |
3726 | |
3727 | static IndirectBrInst *Create(Value *Address, unsigned NumDests, |
3728 | BasicBlock *InsertAtEnd) { |
3729 | return new IndirectBrInst(Address, NumDests, InsertAtEnd); |
3730 | } |
3731 | |
3732 | /// Provide fast operand accessors. |
3733 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
3734 | |
3735 | // Accessor Methods for IndirectBrInst instruction. |
3736 | Value *getAddress() { return getOperand(0); } |
3737 | const Value *getAddress() const { return getOperand(0); } |
3738 | void setAddress(Value *V) { setOperand(0, V); } |
3739 | |
3740 | /// return the number of possible destinations in this |
3741 | /// indirectbr instruction. |
3742 | unsigned getNumDestinations() const { return getNumOperands()-1; } |
3743 | |
3744 | /// Return the specified destination. |
3745 | BasicBlock *getDestination(unsigned i) { return getSuccessor(i); } |
3746 | const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); } |
3747 | |
3748 | /// Add a destination. |
3749 | /// |
3750 | void addDestination(BasicBlock *Dest); |
3751 | |
3752 | /// This method removes the specified successor from the |
3753 | /// indirectbr instruction. |
3754 | void removeDestination(unsigned i); |
3755 | |
3756 | unsigned getNumSuccessors() const { return getNumOperands()-1; } |
3757 | BasicBlock *getSuccessor(unsigned i) const { |
3758 | return cast<BasicBlock>(getOperand(i+1)); |
3759 | } |
3760 | void setSuccessor(unsigned i, BasicBlock *NewSucc) { |
3761 | setOperand(i + 1, NewSucc); |
3762 | } |
3763 | |
3764 | iterator_range<succ_op_iterator> successors() { |
3765 | return make_range(succ_op_iterator(std::next(value_op_begin())), |
3766 | succ_op_iterator(value_op_end())); |
3767 | } |
3768 | |
3769 | iterator_range<const_succ_op_iterator> successors() const { |
3770 | return make_range(const_succ_op_iterator(std::next(value_op_begin())), |
3771 | const_succ_op_iterator(value_op_end())); |
3772 | } |
3773 | |
3774 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
3775 | static bool classof(const Instruction *I) { |
3776 | return I->getOpcode() == Instruction::IndirectBr; |
3777 | } |
3778 | static bool classof(const Value *V) { |
3779 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
3780 | } |
3781 | }; |
3782 | |
3783 | template <> |
3784 | struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> { |
3785 | }; |
3786 | |
3787 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)IndirectBrInst::op_iterator IndirectBrInst::op_begin() { return OperandTraits<IndirectBrInst>::op_begin(this); } IndirectBrInst ::const_op_iterator IndirectBrInst::op_begin() const { return OperandTraits<IndirectBrInst>::op_begin(const_cast< IndirectBrInst*>(this)); } IndirectBrInst::op_iterator IndirectBrInst ::op_end() { return OperandTraits<IndirectBrInst>::op_end (this); } IndirectBrInst::const_op_iterator IndirectBrInst::op_end () const { return OperandTraits<IndirectBrInst>::op_end (const_cast<IndirectBrInst*>(this)); } Value *IndirectBrInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<IndirectBrInst>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3787, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<IndirectBrInst >::op_begin(const_cast<IndirectBrInst*>(this))[i_nocapture ].get()); } void IndirectBrInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3787, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<IndirectBrInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned IndirectBrInst::getNumOperands( ) const { return OperandTraits<IndirectBrInst>::operands (this); } template <int Idx_nocapture> Use &IndirectBrInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &IndirectBrInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } |
3788 | |
3789 | //===----------------------------------------------------------------------===// |
3790 | // InvokeInst Class |
3791 | //===----------------------------------------------------------------------===// |
3792 | |
3793 | /// Invoke instruction. The SubclassData field is used to hold the |
3794 | /// calling convention of the call. |
3795 | /// |
3796 | class InvokeInst : public CallBase { |
3797 | /// The number of operands for this call beyond the called function, |
3798 | /// arguments, and operand bundles. |
3799 | static constexpr int NumExtraOperands = 2; |
3800 | |
3801 | /// The index from the end of the operand array to the normal destination. |
3802 | static constexpr int NormalDestOpEndIdx = -3; |
3803 | |
3804 | /// The index from the end of the operand array to the unwind destination. |
3805 | static constexpr int UnwindDestOpEndIdx = -2; |
3806 | |
3807 | InvokeInst(const InvokeInst &BI); |
3808 | |
3809 | /// Construct an InvokeInst given a range of arguments. |
3810 | /// |
3811 | /// Construct an InvokeInst from a range of arguments |
3812 | inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3813 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3814 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
3815 | const Twine &NameStr, Instruction *InsertBefore); |
3816 | |
3817 | inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3818 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3819 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
3820 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
3821 | |
3822 | void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3823 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3824 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); |
3825 | |
3826 | /// Compute the number of operands to allocate. |
3827 | static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { |
3828 | // We need one operand for the called function, plus our extra operands and |
3829 | // the input operand counts provided. |
3830 | return 1 + NumExtraOperands + NumArgs + NumBundleInputs; |
3831 | } |
3832 | |
3833 | protected: |
3834 | // Note: Instruction needs to be a friend here to call cloneImpl. |
3835 | friend class Instruction; |
3836 | |
3837 | InvokeInst *cloneImpl() const; |
3838 | |
3839 | public: |
3840 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3841 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3842 | const Twine &NameStr, |
3843 | Instruction *InsertBefore = nullptr) { |
3844 | int NumOperands = ComputeNumOperands(Args.size()); |
3845 | return new (NumOperands) |
3846 | InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands, |
3847 | NameStr, InsertBefore); |
3848 | } |
3849 | |
3850 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3851 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3852 | ArrayRef<OperandBundleDef> Bundles = None, |
3853 | const Twine &NameStr = "", |
3854 | Instruction *InsertBefore = nullptr) { |
3855 | int NumOperands = |
3856 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); |
3857 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
3858 | |
3859 | return new (NumOperands, DescriptorBytes) |
3860 | InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, |
3861 | NameStr, InsertBefore); |
3862 | } |
3863 | |
3864 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3865 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3866 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
3867 | int NumOperands = ComputeNumOperands(Args.size()); |
3868 | return new (NumOperands) |
3869 | InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands, |
3870 | NameStr, InsertAtEnd); |
3871 | } |
3872 | |
3873 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3874 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3875 | ArrayRef<OperandBundleDef> Bundles, |
3876 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
3877 | int NumOperands = |
3878 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); |
3879 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
3880 | |
3881 | return new (NumOperands, DescriptorBytes) |
3882 | InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, |
3883 | NameStr, InsertAtEnd); |
3884 | } |
3885 | |
3886 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, |
3887 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3888 | const Twine &NameStr, |
3889 | Instruction *InsertBefore = nullptr) { |
3890 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, |
3891 | IfException, Args, None, NameStr, InsertBefore); |
3892 | } |
3893 | |
3894 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, |
3895 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3896 | ArrayRef<OperandBundleDef> Bundles = None, |
3897 | const Twine &NameStr = "", |
3898 | Instruction *InsertBefore = nullptr) { |
3899 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, |
3900 | IfException, Args, Bundles, NameStr, InsertBefore); |
3901 | } |
3902 | |
3903 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, |
3904 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3905 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
3906 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, |
3907 | IfException, Args, NameStr, InsertAtEnd); |
3908 | } |
3909 | |
3910 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, |
3911 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3912 | ArrayRef<OperandBundleDef> Bundles, |
3913 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
3914 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, |
3915 | IfException, Args, Bundles, NameStr, InsertAtEnd); |
3916 | } |
3917 | |
3918 | /// Create a clone of \p II with a different set of operand bundles and |
3919 | /// insert it before \p InsertPt. |
3920 | /// |
3921 | /// The returned invoke instruction is identical to \p II in every way except |
3922 | /// that the operand bundles for the new instruction are set to the operand |
3923 | /// bundles in \p Bundles. |
3924 | static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles, |
3925 | Instruction *InsertPt = nullptr); |
3926 | |
3927 | // get*Dest - Return the destination basic blocks... |
3928 | BasicBlock *getNormalDest() const { |
3929 | return cast<BasicBlock>(Op<NormalDestOpEndIdx>()); |
3930 | } |
3931 | BasicBlock *getUnwindDest() const { |
3932 | return cast<BasicBlock>(Op<UnwindDestOpEndIdx>()); |
3933 | } |
3934 | void setNormalDest(BasicBlock *B) { |
3935 | Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B); |
3936 | } |
3937 | void setUnwindDest(BasicBlock *B) { |
3938 | Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B); |
3939 | } |
3940 | |
3941 | /// Get the landingpad instruction from the landing pad |
3942 | /// block (the unwind destination). |
3943 | LandingPadInst *getLandingPadInst() const; |
3944 | |
3945 | BasicBlock *getSuccessor(unsigned i) const { |
3946 | assert(i < 2 && "Successor # out of range for invoke!")(static_cast <bool> (i < 2 && "Successor # out of range for invoke!" ) ? void (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\"" , "llvm/include/llvm/IR/Instructions.h", 3946, __extension__ __PRETTY_FUNCTION__ )); |
3947 | return i == 0 ? getNormalDest() : getUnwindDest(); |
3948 | } |
3949 | |
3950 | void setSuccessor(unsigned i, BasicBlock *NewSucc) { |
3951 | assert(i < 2 && "Successor # out of range for invoke!")(static_cast <bool> (i < 2 && "Successor # out of range for invoke!" ) ? void (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\"" , "llvm/include/llvm/IR/Instructions.h", 3951, __extension__ __PRETTY_FUNCTION__ )); |
3952 | if (i == 0) |
3953 | setNormalDest(NewSucc); |
3954 | else |
3955 | setUnwindDest(NewSucc); |
3956 | } |
3957 | |
3958 | unsigned getNumSuccessors() const { return 2; } |
3959 | |
3960 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
3961 | static bool classof(const Instruction *I) { |
3962 | return (I->getOpcode() == Instruction::Invoke); |
3963 | } |
3964 | static bool classof(const Value *V) { |
3965 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
3966 | } |
3967 | |
3968 | private: |
3969 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
3970 | // method so that subclasses cannot accidentally use it. |
3971 | template <typename Bitfield> |
3972 | void setSubclassData(typename Bitfield::Type Value) { |
3973 | Instruction::setSubclassData<Bitfield>(Value); |
3974 | } |
3975 | }; |
3976 | |
3977 | InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3978 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3979 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
3980 | const Twine &NameStr, Instruction *InsertBefore) |
3981 | : CallBase(Ty->getReturnType(), Instruction::Invoke, |
3982 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, |
3983 | InsertBefore) { |
3984 | init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); |
3985 | } |
3986 | |
3987 | InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3988 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3989 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
3990 | const Twine &NameStr, BasicBlock *InsertAtEnd) |
3991 | : CallBase(Ty->getReturnType(), Instruction::Invoke, |
3992 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, |
3993 | InsertAtEnd) { |
3994 | init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); |
3995 | } |
3996 | |
3997 | //===----------------------------------------------------------------------===// |
3998 | // CallBrInst Class |
3999 | //===----------------------------------------------------------------------===// |
4000 | |
4001 | /// CallBr instruction, tracking function calls that may not return control but |
4002 | /// instead transfer it to a third location. The SubclassData field is used to |
4003 | /// hold the calling convention of the call. |
4004 | /// |
4005 | class CallBrInst : public CallBase { |
4006 | |
4007 | unsigned NumIndirectDests; |
4008 | |
4009 | CallBrInst(const CallBrInst &BI); |
4010 | |
4011 | /// Construct a CallBrInst given a range of arguments. |
4012 | /// |
4013 | /// Construct a CallBrInst from a range of arguments |
4014 | inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, |
4015 | ArrayRef<BasicBlock *> IndirectDests, |
4016 | ArrayRef<Value *> Args, |
4017 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
4018 | const Twine &NameStr, Instruction *InsertBefore); |
4019 | |
4020 | inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, |
4021 | ArrayRef<BasicBlock *> IndirectDests, |
4022 | ArrayRef<Value *> Args, |
4023 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
4024 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
4025 | |
4026 | void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest, |
4027 | ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args, |
4028 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); |
4029 | |
4030 | /// Compute the number of operands to allocate. |
4031 | static int ComputeNumOperands(int NumArgs, int NumIndirectDests, |
4032 | int NumBundleInputs = 0) { |
4033 | // We need one operand for the called function, plus our extra operands and |
4034 | // the input operand counts provided. |
4035 | return 2 + NumIndirectDests + NumArgs + NumBundleInputs; |
4036 | } |
4037 | |
4038 | protected: |
4039 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4040 | friend class Instruction; |
4041 | |
4042 | CallBrInst *cloneImpl() const; |
4043 | |
4044 | public: |
4045 | static CallBrInst *Create(FunctionType *Ty, Value *Func, |
4046 | BasicBlock *DefaultDest, |
4047 | ArrayRef<BasicBlock *> IndirectDests, |
4048 | ArrayRef<Value *> Args, const Twine &NameStr, |
4049 | Instruction *InsertBefore = nullptr) { |
4050 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); |
4051 | return new (NumOperands) |
4052 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None, |
4053 | NumOperands, NameStr, InsertBefore); |
4054 | } |
4055 | |
4056 | static CallBrInst *Create(FunctionType *Ty, Value *Func, |
4057 | BasicBlock *DefaultDest, |
4058 | ArrayRef<BasicBlock *> IndirectDests, |
4059 | ArrayRef<Value *> Args, |
4060 | ArrayRef<OperandBundleDef> Bundles = None, |
4061 | const Twine &NameStr = "", |
4062 | Instruction *InsertBefore = nullptr) { |
4063 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), |
4064 | CountBundleInputs(Bundles)); |
4065 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
4066 | |
4067 | return new (NumOperands, DescriptorBytes) |
4068 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, |
4069 | NumOperands, NameStr, InsertBefore); |
4070 | } |
4071 | |
4072 | static CallBrInst *Create(FunctionType *Ty, Value *Func, |
4073 | BasicBlock *DefaultDest, |
4074 | ArrayRef<BasicBlock *> IndirectDests, |
4075 | ArrayRef<Value *> Args, const Twine &NameStr, |
4076 | BasicBlock *InsertAtEnd) { |
4077 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); |
4078 | return new (NumOperands) |
4079 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None, |
4080 | NumOperands, NameStr, InsertAtEnd); |
4081 | } |
4082 | |
4083 | static CallBrInst *Create(FunctionType *Ty, Value *Func, |
4084 | BasicBlock *DefaultDest, |
4085 | ArrayRef<BasicBlock *> IndirectDests, |
4086 | ArrayRef<Value *> Args, |
4087 | ArrayRef<OperandBundleDef> Bundles, |
4088 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
4089 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), |
4090 | CountBundleInputs(Bundles)); |
4091 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
4092 | |
4093 | return new (NumOperands, DescriptorBytes) |
4094 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, |
4095 | NumOperands, NameStr, InsertAtEnd); |
4096 | } |
4097 | |
4098 | static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, |
4099 | ArrayRef<BasicBlock *> IndirectDests, |
4100 | ArrayRef<Value *> Args, const Twine &NameStr, |
4101 | Instruction *InsertBefore = nullptr) { |
4102 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, |
4103 | IndirectDests, Args, NameStr, InsertBefore); |
4104 | } |
4105 | |
4106 | static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, |
4107 | ArrayRef<BasicBlock *> IndirectDests, |
4108 | ArrayRef<Value *> Args, |
4109 | ArrayRef<OperandBundleDef> Bundles = None, |
4110 | const Twine &NameStr = "", |
4111 | Instruction *InsertBefore = nullptr) { |
4112 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, |
4113 | IndirectDests, Args, Bundles, NameStr, InsertBefore); |
4114 | } |
4115 | |
4116 | static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, |
4117 | ArrayRef<BasicBlock *> IndirectDests, |
4118 | ArrayRef<Value *> Args, const Twine &NameStr, |
4119 | BasicBlock *InsertAtEnd) { |
4120 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, |
4121 | IndirectDests, Args, NameStr, InsertAtEnd); |
4122 | } |
4123 | |
4124 | static CallBrInst *Create(FunctionCallee Func, |
4125 | BasicBlock *DefaultDest, |
4126 | ArrayRef<BasicBlock *> IndirectDests, |
4127 | ArrayRef<Value *> Args, |
4128 | ArrayRef<OperandBundleDef> Bundles, |
4129 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
4130 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, |
4131 | IndirectDests, Args, Bundles, NameStr, InsertAtEnd); |
4132 | } |
4133 | |
4134 | /// Create a clone of \p CBI with a different set of operand bundles and |
4135 | /// insert it before \p InsertPt. |
4136 | /// |
4137 | /// The returned callbr instruction is identical to \p CBI in every way |
4138 | /// except that the operand bundles for the new instruction are set to the |
4139 | /// operand bundles in \p Bundles. |
4140 | static CallBrInst *Create(CallBrInst *CBI, |
4141 | ArrayRef<OperandBundleDef> Bundles, |
4142 | Instruction *InsertPt = nullptr); |
4143 | |
4144 | /// Return the number of callbr indirect dest labels. |
4145 | /// |
4146 | unsigned getNumIndirectDests() const { return NumIndirectDests; } |
4147 | |
4148 | /// getIndirectDestLabel - Return the i-th indirect dest label. |
4149 | /// |
4150 | Value *getIndirectDestLabel(unsigned i) const { |
4151 | assert(i < getNumIndirectDests() && "Out of bounds!")(static_cast <bool> (i < getNumIndirectDests() && "Out of bounds!") ? void (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\"" , "llvm/include/llvm/IR/Instructions.h", 4151, __extension__ __PRETTY_FUNCTION__ )); |
4152 | return getOperand(i + arg_size() + getNumTotalBundleOperands() + 1); |
4153 | } |
4154 | |
4155 | Value *getIndirectDestLabelUse(unsigned i) const { |
4156 | assert(i < getNumIndirectDests() && "Out of bounds!")(static_cast <bool> (i < getNumIndirectDests() && "Out of bounds!") ? void (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\"" , "llvm/include/llvm/IR/Instructions.h", 4156, __extension__ __PRETTY_FUNCTION__ )); |
4157 | return getOperandUse(i + arg_size() + getNumTotalBundleOperands() + 1); |
4158 | } |
4159 | |
4160 | // Return the destination basic blocks... |
4161 | BasicBlock *getDefaultDest() const { |
4162 | return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1)); |
4163 | } |
4164 | BasicBlock *getIndirectDest(unsigned i) const { |
4165 | return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i)); |
4166 | } |
4167 | SmallVector<BasicBlock *, 16> getIndirectDests() const { |
4168 | SmallVector<BasicBlock *, 16> IndirectDests; |
4169 | for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i) |
4170 | IndirectDests.push_back(getIndirectDest(i)); |
4171 | return IndirectDests; |
4172 | } |
4173 | void setDefaultDest(BasicBlock *B) { |
4174 | *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B); |
4175 | } |
4176 | void setIndirectDest(unsigned i, BasicBlock *B) { |
4177 | *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B); |
4178 | } |
4179 | |
4180 | BasicBlock *getSuccessor(unsigned i) const { |
4181 | assert(i < getNumSuccessors() + 1 &&(static_cast <bool> (i < getNumSuccessors() + 1 && "Successor # out of range for callbr!") ? void (0) : __assert_fail ("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\"" , "llvm/include/llvm/IR/Instructions.h", 4182, __extension__ __PRETTY_FUNCTION__ )) |
4182 | "Successor # out of range for callbr!")(static_cast <bool> (i < getNumSuccessors() + 1 && "Successor # out of range for callbr!") ? void (0) : __assert_fail ("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\"" , "llvm/include/llvm/IR/Instructions.h", 4182, __extension__ __PRETTY_FUNCTION__ )); |
4183 | return i == 0 ? getDefaultDest() : getIndirectDest(i - 1); |
4184 | } |
4185 | |
4186 | void setSuccessor(unsigned i, BasicBlock *NewSucc) { |
4187 | assert(i < getNumIndirectDests() + 1 &&(static_cast <bool> (i < getNumIndirectDests() + 1 && "Successor # out of range for callbr!") ? void (0) : __assert_fail ("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\"" , "llvm/include/llvm/IR/Instructions.h", 4188, __extension__ __PRETTY_FUNCTION__ )) |
4188 | "Successor # out of range for callbr!")(static_cast <bool> (i < getNumIndirectDests() + 1 && "Successor # out of range for callbr!") ? void (0) : __assert_fail ("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\"" , "llvm/include/llvm/IR/Instructions.h", 4188, __extension__ __PRETTY_FUNCTION__ )); |
4189 | return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc); |
4190 | } |
4191 | |
4192 | unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; } |
4193 | |
4194 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4195 | static bool classof(const Instruction *I) { |
4196 | return (I->getOpcode() == Instruction::CallBr); |
4197 | } |
4198 | static bool classof(const Value *V) { |
4199 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4200 | } |
4201 | |
4202 | private: |
4203 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
4204 | // method so that subclasses cannot accidentally use it. |
4205 | template <typename Bitfield> |
4206 | void setSubclassData(typename Bitfield::Type Value) { |
4207 | Instruction::setSubclassData<Bitfield>(Value); |
4208 | } |
4209 | }; |
4210 | |
4211 | CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, |
4212 | ArrayRef<BasicBlock *> IndirectDests, |
4213 | ArrayRef<Value *> Args, |
4214 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
4215 | const Twine &NameStr, Instruction *InsertBefore) |
4216 | : CallBase(Ty->getReturnType(), Instruction::CallBr, |
4217 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, |
4218 | InsertBefore) { |
4219 | init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); |
4220 | } |
4221 | |
4222 | CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, |
4223 | ArrayRef<BasicBlock *> IndirectDests, |
4224 | ArrayRef<Value *> Args, |
4225 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
4226 | const Twine &NameStr, BasicBlock *InsertAtEnd) |
4227 | : CallBase(Ty->getReturnType(), Instruction::CallBr, |
4228 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, |
4229 | InsertAtEnd) { |
4230 | init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); |
4231 | } |
4232 | |
4233 | //===----------------------------------------------------------------------===// |
4234 | // ResumeInst Class |
4235 | //===----------------------------------------------------------------------===// |
4236 | |
4237 | //===--------------------------------------------------------------------------- |
4238 | /// Resume the propagation of an exception. |
4239 | /// |
4240 | class ResumeInst : public Instruction { |
4241 | ResumeInst(const ResumeInst &RI); |
4242 | |
4243 | explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr); |
4244 | ResumeInst(Value *Exn, BasicBlock *InsertAtEnd); |
4245 | |
4246 | protected: |
4247 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4248 | friend class Instruction; |
4249 | |
4250 | ResumeInst *cloneImpl() const; |
4251 | |
4252 | public: |
4253 | static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) { |
4254 | return new(1) ResumeInst(Exn, InsertBefore); |
4255 | } |
4256 | |
4257 | static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) { |
4258 | return new(1) ResumeInst(Exn, InsertAtEnd); |
4259 | } |
4260 | |
4261 | /// Provide fast operand accessors |
4262 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
4263 | |
4264 | /// Convenience accessor. |
4265 | Value *getValue() const { return Op<0>(); } |
4266 | |
4267 | unsigned getNumSuccessors() const { return 0; } |
4268 | |
4269 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4270 | static bool classof(const Instruction *I) { |
4271 | return I->getOpcode() == Instruction::Resume; |
4272 | } |
4273 | static bool classof(const Value *V) { |
4274 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4275 | } |
4276 | |
4277 | private: |
4278 | BasicBlock *getSuccessor(unsigned idx) const { |
4279 | llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!" , "llvm/include/llvm/IR/Instructions.h", 4279); |
4280 | } |
4281 | |
4282 | void setSuccessor(unsigned idx, BasicBlock *NewSucc) { |
4283 | llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!" , "llvm/include/llvm/IR/Instructions.h", 4283); |
4284 | } |
4285 | }; |
4286 | |
4287 | template <> |
4288 | struct OperandTraits<ResumeInst> : |
4289 | public FixedNumOperandTraits<ResumeInst, 1> { |
4290 | }; |
4291 | |
4292 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)ResumeInst::op_iterator ResumeInst::op_begin() { return OperandTraits <ResumeInst>::op_begin(this); } ResumeInst::const_op_iterator ResumeInst::op_begin() const { return OperandTraits<ResumeInst >::op_begin(const_cast<ResumeInst*>(this)); } ResumeInst ::op_iterator ResumeInst::op_end() { return OperandTraits< ResumeInst>::op_end(this); } ResumeInst::const_op_iterator ResumeInst::op_end() const { return OperandTraits<ResumeInst >::op_end(const_cast<ResumeInst*>(this)); } Value *ResumeInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<ResumeInst>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4292, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<ResumeInst >::op_begin(const_cast<ResumeInst*>(this))[i_nocapture ].get()); } void ResumeInst::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<ResumeInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4292, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<ResumeInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned ResumeInst::getNumOperands() const { return OperandTraits<ResumeInst>::operands(this); } template <int Idx_nocapture> Use &ResumeInst::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &ResumeInst::Op() const { return this->OpFrom<Idx_nocapture>(this); } |
4293 | |
4294 | //===----------------------------------------------------------------------===// |
4295 | // CatchSwitchInst Class |
4296 | //===----------------------------------------------------------------------===// |
4297 | class CatchSwitchInst : public Instruction { |
4298 | using UnwindDestField = BoolBitfieldElementT<0>; |
4299 | |
4300 | /// The number of operands actually allocated. NumOperands is |
4301 | /// the number actually in use. |
4302 | unsigned ReservedSpace; |
4303 | |
4304 | // Operand[0] = Outer scope |
4305 | // Operand[1] = Unwind block destination |
4306 | // Operand[n] = BasicBlock to go to on match |
4307 | CatchSwitchInst(const CatchSwitchInst &CSI); |
4308 | |
4309 | /// Create a new switch instruction, specifying a |
4310 | /// default destination. The number of additional handlers can be specified |
4311 | /// here to make memory allocation more efficient. |
4312 | /// This constructor can also autoinsert before another instruction. |
4313 | CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, |
4314 | unsigned NumHandlers, const Twine &NameStr, |
4315 | Instruction *InsertBefore); |
4316 | |
4317 | /// Create a new switch instruction, specifying a |
4318 | /// default destination. The number of additional handlers can be specified |
4319 | /// here to make memory allocation more efficient. |
4320 | /// This constructor also autoinserts at the end of the specified BasicBlock. |
4321 | CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, |
4322 | unsigned NumHandlers, const Twine &NameStr, |
4323 | BasicBlock *InsertAtEnd); |
4324 | |
4325 | // allocate space for exactly zero operands |
4326 | void *operator new(size_t S) { return User::operator new(S); } |
4327 | |
4328 | void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved); |
4329 | void growOperands(unsigned Size); |
4330 | |
4331 | protected: |
4332 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4333 | friend class Instruction; |
4334 | |
4335 | CatchSwitchInst *cloneImpl() const; |
4336 | |
4337 | public: |
4338 | void operator delete(void *Ptr) { return User::operator delete(Ptr); } |
4339 | |
4340 | static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, |
4341 | unsigned NumHandlers, |
4342 | const Twine &NameStr = "", |
4343 | Instruction *InsertBefore = nullptr) { |
4344 | return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, |
4345 | InsertBefore); |
4346 | } |
4347 | |
4348 | static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, |
4349 | unsigned NumHandlers, const Twine &NameStr, |
4350 | BasicBlock *InsertAtEnd) { |
4351 | return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, |
4352 | InsertAtEnd); |
4353 | } |
4354 | |
4355 | /// Provide fast operand accessors |
4356 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
4357 | |
4358 | // Accessor Methods for CatchSwitch stmt |
4359 | Value *getParentPad() const { return getOperand(0); } |
4360 | void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); } |
4361 | |
4362 | // Accessor Methods for CatchSwitch stmt |
4363 | bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } |
4364 | bool unwindsToCaller() const { return !hasUnwindDest(); } |
4365 | BasicBlock *getUnwindDest() const { |
4366 | if (hasUnwindDest()) |
4367 | return cast<BasicBlock>(getOperand(1)); |
4368 | return nullptr; |
4369 | } |
4370 | void setUnwindDest(BasicBlock *UnwindDest) { |
4371 | assert(UnwindDest)(static_cast <bool> (UnwindDest) ? void (0) : __assert_fail ("UnwindDest", "llvm/include/llvm/IR/Instructions.h", 4371, __extension__ __PRETTY_FUNCTION__)); |
4372 | assert(hasUnwindDest())(static_cast <bool> (hasUnwindDest()) ? void (0) : __assert_fail ("hasUnwindDest()", "llvm/include/llvm/IR/Instructions.h", 4372 , __extension__ __PRETTY_FUNCTION__)); |
4373 | setOperand(1, UnwindDest); |
4374 | } |
4375 | |
4376 | /// return the number of 'handlers' in this catchswitch |
4377 | /// instruction, except the default handler |
4378 | unsigned getNumHandlers() const { |
4379 | if (hasUnwindDest()) |
4380 | return getNumOperands() - 2; |
4381 | return getNumOperands() - 1; |
4382 | } |
4383 | |
4384 | private: |
4385 | static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); } |
4386 | static const BasicBlock *handler_helper(const Value *V) { |
4387 | return cast<BasicBlock>(V); |
4388 | } |
4389 | |
4390 | public: |
4391 | using DerefFnTy = BasicBlock *(*)(Value *); |
4392 | using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>; |
4393 | using handler_range = iterator_range<handler_iterator>; |
4394 | using ConstDerefFnTy = const BasicBlock *(*)(const Value *); |
4395 | using const_handler_iterator = |
4396 | mapped_iterator<const_op_iterator, ConstDerefFnTy>; |
4397 | using const_handler_range = iterator_range<const_handler_iterator>; |
4398 | |
4399 | /// Returns an iterator that points to the first handler in CatchSwitchInst. |
4400 | handler_iterator handler_begin() { |
4401 | op_iterator It = op_begin() + 1; |
4402 | if (hasUnwindDest()) |
4403 | ++It; |
4404 | return handler_iterator(It, DerefFnTy(handler_helper)); |
4405 | } |
4406 | |
4407 | /// Returns an iterator that points to the first handler in the |
4408 | /// CatchSwitchInst. |
4409 | const_handler_iterator handler_begin() const { |
4410 | const_op_iterator It = op_begin() + 1; |
4411 | if (hasUnwindDest()) |
4412 | ++It; |
4413 | return const_handler_iterator(It, ConstDerefFnTy(handler_helper)); |
4414 | } |
4415 | |
4416 | /// Returns a read-only iterator that points one past the last |
4417 | /// handler in the CatchSwitchInst. |
4418 | handler_iterator handler_end() { |
4419 | return handler_iterator(op_end(), DerefFnTy(handler_helper)); |
4420 | } |
4421 | |
4422 | /// Returns an iterator that points one past the last handler in the |
4423 | /// CatchSwitchInst. |
4424 | const_handler_iterator handler_end() const { |
4425 | return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper)); |
4426 | } |
4427 | |
4428 | /// iteration adapter for range-for loops. |
4429 | handler_range handlers() { |
4430 | return make_range(handler_begin(), handler_end()); |
4431 | } |
4432 | |
4433 | /// iteration adapter for range-for loops. |
4434 | const_handler_range handlers() const { |
4435 | return make_range(handler_begin(), handler_end()); |
4436 | } |
4437 | |
4438 | /// Add an entry to the switch instruction... |
4439 | /// Note: |
4440 | /// This action invalidates handler_end(). Old handler_end() iterator will |
4441 | /// point to the added handler. |
4442 | void addHandler(BasicBlock *Dest); |
4443 | |
4444 | void removeHandler(handler_iterator HI); |
4445 | |
4446 | unsigned getNumSuccessors() const { return getNumOperands() - 1; } |
4447 | BasicBlock *getSuccessor(unsigned Idx) const { |
4448 | assert(Idx < getNumSuccessors() &&(static_cast <bool> (Idx < getNumSuccessors() && "Successor # out of range for catchswitch!") ? void (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "llvm/include/llvm/IR/Instructions.h", 4449, __extension__ __PRETTY_FUNCTION__ )) |
4449 | "Successor # out of range for catchswitch!")(static_cast <bool> (Idx < getNumSuccessors() && "Successor # out of range for catchswitch!") ? void (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "llvm/include/llvm/IR/Instructions.h", 4449, __extension__ __PRETTY_FUNCTION__ )); |
4450 | return cast<BasicBlock>(getOperand(Idx + 1)); |
4451 | } |
4452 | void setSuccessor(unsigned Idx, BasicBlock *NewSucc) { |
4453 | assert(Idx < getNumSuccessors() &&(static_cast <bool> (Idx < getNumSuccessors() && "Successor # out of range for catchswitch!") ? void (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "llvm/include/llvm/IR/Instructions.h", 4454, __extension__ __PRETTY_FUNCTION__ )) |
4454 | "Successor # out of range for catchswitch!")(static_cast <bool> (Idx < getNumSuccessors() && "Successor # out of range for catchswitch!") ? void (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "llvm/include/llvm/IR/Instructions.h", 4454, __extension__ __PRETTY_FUNCTION__ )); |
4455 | setOperand(Idx + 1, NewSucc); |
4456 | } |
4457 | |
4458 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4459 | static bool classof(const Instruction *I) { |
4460 | return I->getOpcode() == Instruction::CatchSwitch; |
4461 | } |
4462 | static bool classof(const Value *V) { |
4463 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4464 | } |
4465 | }; |
4466 | |
4467 | template <> |
4468 | struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {}; |
4469 | |
4470 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)CatchSwitchInst::op_iterator CatchSwitchInst::op_begin() { return OperandTraits<CatchSwitchInst>::op_begin(this); } CatchSwitchInst ::const_op_iterator CatchSwitchInst::op_begin() const { return OperandTraits<CatchSwitchInst>::op_begin(const_cast< CatchSwitchInst*>(this)); } CatchSwitchInst::op_iterator CatchSwitchInst ::op_end() { return OperandTraits<CatchSwitchInst>::op_end (this); } CatchSwitchInst::const_op_iterator CatchSwitchInst:: op_end() const { return OperandTraits<CatchSwitchInst>:: op_end(const_cast<CatchSwitchInst*>(this)); } Value *CatchSwitchInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<CatchSwitchInst>:: operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4470, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<CatchSwitchInst >::op_begin(const_cast<CatchSwitchInst*>(this))[i_nocapture ].get()); } void CatchSwitchInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4470, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<CatchSwitchInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned CatchSwitchInst::getNumOperands () const { return OperandTraits<CatchSwitchInst>::operands (this); } template <int Idx_nocapture> Use &CatchSwitchInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &CatchSwitchInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } |
4471 | |
4472 | //===----------------------------------------------------------------------===// |
4473 | // CleanupPadInst Class |
4474 | //===----------------------------------------------------------------------===// |
4475 | class CleanupPadInst : public FuncletPadInst { |
4476 | private: |
4477 | explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, |
4478 | unsigned Values, const Twine &NameStr, |
4479 | Instruction *InsertBefore) |
4480 | : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, |
4481 | NameStr, InsertBefore) {} |
4482 | explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, |
4483 | unsigned Values, const Twine &NameStr, |
4484 | BasicBlock *InsertAtEnd) |
4485 | : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, |
4486 | NameStr, InsertAtEnd) {} |
4487 | |
4488 | public: |
4489 | static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = None, |
4490 | const Twine &NameStr = "", |
4491 | Instruction *InsertBefore = nullptr) { |
4492 | unsigned Values = 1 + Args.size(); |
4493 | return new (Values) |
4494 | CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore); |
4495 | } |
4496 | |
4497 | static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args, |
4498 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
4499 | unsigned Values = 1 + Args.size(); |
4500 | return new (Values) |
4501 | CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd); |
4502 | } |
4503 | |
4504 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4505 | static bool classof(const Instruction *I) { |
4506 | return I->getOpcode() == Instruction::CleanupPad; |
4507 | } |
4508 | static bool classof(const Value *V) { |
4509 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4510 | } |
4511 | }; |
4512 | |
4513 | //===----------------------------------------------------------------------===// |
4514 | // CatchPadInst Class |
4515 | //===----------------------------------------------------------------------===// |
4516 | class CatchPadInst : public FuncletPadInst { |
4517 | private: |
4518 | explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, |
4519 | unsigned Values, const Twine &NameStr, |
4520 | Instruction *InsertBefore) |
4521 | : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, |
4522 | NameStr, InsertBefore) {} |
4523 | explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, |
4524 | unsigned Values, const Twine &NameStr, |
4525 | BasicBlock *InsertAtEnd) |
4526 | : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, |
4527 | NameStr, InsertAtEnd) {} |
4528 | |
4529 | public: |
4530 | static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, |
4531 | const Twine &NameStr = "", |
4532 | Instruction *InsertBefore = nullptr) { |
4533 | unsigned Values = 1 + Args.size(); |
4534 | return new (Values) |
4535 | CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore); |
4536 | } |
4537 | |
4538 | static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, |
4539 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
4540 | unsigned Values = 1 + Args.size(); |
4541 | return new (Values) |
4542 | CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd); |
4543 | } |
4544 | |
4545 | /// Convenience accessors |
4546 | CatchSwitchInst *getCatchSwitch() const { |
4547 | return cast<CatchSwitchInst>(Op<-1>()); |
4548 | } |
4549 | void setCatchSwitch(Value *CatchSwitch) { |
4550 | assert(CatchSwitch)(static_cast <bool> (CatchSwitch) ? void (0) : __assert_fail ("CatchSwitch", "llvm/include/llvm/IR/Instructions.h", 4550, __extension__ __PRETTY_FUNCTION__)); |
4551 | Op<-1>() = CatchSwitch; |
4552 | } |
4553 | |
4554 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4555 | static bool classof(const Instruction *I) { |
4556 | return I->getOpcode() == Instruction::CatchPad; |
4557 | } |
4558 | static bool classof(const Value *V) { |
4559 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4560 | } |
4561 | }; |
4562 | |
4563 | //===----------------------------------------------------------------------===// |
4564 | // CatchReturnInst Class |
4565 | //===----------------------------------------------------------------------===// |
4566 | |
4567 | class CatchReturnInst : public Instruction { |
4568 | CatchReturnInst(const CatchReturnInst &RI); |
4569 | CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore); |
4570 | CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd); |
4571 | |
4572 | void init(Value *CatchPad, BasicBlock *BB); |
4573 | |
4574 | protected: |
4575 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4576 | friend class Instruction; |
4577 | |
4578 | CatchReturnInst *cloneImpl() const; |
4579 | |
4580 | public: |
4581 | static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, |
4582 | Instruction *InsertBefore = nullptr) { |
4583 | assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail ("CatchPad", "llvm/include/llvm/IR/Instructions.h", 4583, __extension__ __PRETTY_FUNCTION__)); |
4584 | assert(BB)(static_cast <bool> (BB) ? void (0) : __assert_fail ("BB" , "llvm/include/llvm/IR/Instructions.h", 4584, __extension__ __PRETTY_FUNCTION__ )); |
4585 | return new (2) CatchReturnInst(CatchPad, BB, InsertBefore); |
4586 | } |
4587 | |
4588 | static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, |
4589 | BasicBlock *InsertAtEnd) { |
4590 | assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail ("CatchPad", "llvm/include/llvm/IR/Instructions.h", 4590, __extension__ __PRETTY_FUNCTION__)); |
4591 | assert(BB)(static_cast <bool> (BB) ? void (0) : __assert_fail ("BB" , "llvm/include/llvm/IR/Instructions.h", 4591, __extension__ __PRETTY_FUNCTION__ )); |
4592 | return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd); |
4593 | } |
4594 | |
4595 | /// Provide fast operand accessors |
4596 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
4597 | |
4598 | /// Convenience accessors. |
4599 | CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); } |
4600 | void setCatchPad(CatchPadInst *CatchPad) { |
4601 | assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail ("CatchPad", "llvm/include/llvm/IR/Instructions.h", 4601, __extension__ __PRETTY_FUNCTION__)); |
4602 | Op<0>() = CatchPad; |
4603 | } |
4604 | |
4605 | BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); } |
4606 | void setSuccessor(BasicBlock *NewSucc) { |
4607 | assert(NewSucc)(static_cast <bool> (NewSucc) ? void (0) : __assert_fail ("NewSucc", "llvm/include/llvm/IR/Instructions.h", 4607, __extension__ __PRETTY_FUNCTION__)); |
4608 | Op<1>() = NewSucc; |
4609 | } |
4610 | unsigned getNumSuccessors() const { return 1; } |
4611 | |
4612 | /// Get the parentPad of this catchret's catchpad's catchswitch. |
4613 | /// The successor block is implicitly a member of this funclet. |
4614 | Value *getCatchSwitchParentPad() const { |
4615 | return getCatchPad()->getCatchSwitch()->getParentPad(); |
4616 | } |
4617 | |
4618 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4619 | static bool classof(const Instruction *I) { |
4620 | return (I->getOpcode() == Instruction::CatchRet); |
4621 | } |
4622 | static bool classof(const Value *V) { |
4623 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4624 | } |
4625 | |
4626 | private: |
4627 | BasicBlock *getSuccessor(unsigned Idx) const { |
4628 | assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")(static_cast <bool> (Idx < getNumSuccessors() && "Successor # out of range for catchret!") ? void (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\"" , "llvm/include/llvm/IR/Instructions.h", 4628, __extension__ __PRETTY_FUNCTION__ )); |
4629 | return getSuccessor(); |
4630 | } |
4631 | |
4632 | void setSuccessor(unsigned Idx, BasicBlock *B) { |
4633 | assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")(static_cast <bool> (Idx < getNumSuccessors() && "Successor # out of range for catchret!") ? void (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\"" , "llvm/include/llvm/IR/Instructions.h", 4633, __extension__ __PRETTY_FUNCTION__ )); |
4634 | setSuccessor(B); |
4635 | } |
4636 | }; |
4637 | |
4638 | template <> |
4639 | struct OperandTraits<CatchReturnInst> |
4640 | : public FixedNumOperandTraits<CatchReturnInst, 2> {}; |
4641 | |
4642 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)CatchReturnInst::op_iterator CatchReturnInst::op_begin() { return OperandTraits<CatchReturnInst>::op_begin(this); } CatchReturnInst ::const_op_iterator CatchReturnInst::op_begin() const { return OperandTraits<CatchReturnInst>::op_begin(const_cast< CatchReturnInst*>(this)); } CatchReturnInst::op_iterator CatchReturnInst ::op_end() { return OperandTraits<CatchReturnInst>::op_end (this); } CatchReturnInst::const_op_iterator CatchReturnInst:: op_end() const { return OperandTraits<CatchReturnInst>:: op_end(const_cast<CatchReturnInst*>(this)); } Value *CatchReturnInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<CatchReturnInst>:: operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4642, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<CatchReturnInst >::op_begin(const_cast<CatchReturnInst*>(this))[i_nocapture ].get()); } void CatchReturnInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4642, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<CatchReturnInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned CatchReturnInst::getNumOperands () const { return OperandTraits<CatchReturnInst>::operands (this); } template <int Idx_nocapture> Use &CatchReturnInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &CatchReturnInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } |
4643 | |
4644 | //===----------------------------------------------------------------------===// |
4645 | // CleanupReturnInst Class |
4646 | //===----------------------------------------------------------------------===// |
4647 | |
4648 | class CleanupReturnInst : public Instruction { |
4649 | using UnwindDestField = BoolBitfieldElementT<0>; |
4650 | |
4651 | private: |
4652 | CleanupReturnInst(const CleanupReturnInst &RI); |
4653 | CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, |
4654 | Instruction *InsertBefore = nullptr); |
4655 | CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, |
4656 | BasicBlock *InsertAtEnd); |
4657 | |
4658 | void init(Value *CleanupPad, BasicBlock *UnwindBB); |
4659 | |
4660 | protected: |
4661 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4662 | friend class Instruction; |
4663 | |
4664 | CleanupReturnInst *cloneImpl() const; |
4665 | |
4666 | public: |
4667 | static CleanupReturnInst *Create(Value *CleanupPad, |
4668 | BasicBlock *UnwindBB = nullptr, |
4669 | Instruction *InsertBefore = nullptr) { |
4670 | assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail ("CleanupPad", "llvm/include/llvm/IR/Instructions.h", 4670, __extension__ __PRETTY_FUNCTION__)); |
4671 | unsigned Values = 1; |
4672 | if (UnwindBB) |
4673 | ++Values; |
4674 | return new (Values) |
4675 | CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore); |
4676 | } |
4677 | |
4678 | static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB, |
4679 | BasicBlock *InsertAtEnd) { |
4680 | assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail ("CleanupPad", "llvm/include/llvm/IR/Instructions.h", 4680, __extension__ __PRETTY_FUNCTION__)); |
4681 | unsigned Values = 1; |
4682 | if (UnwindBB) |
4683 | ++Values; |
4684 | return new (Values) |
4685 | CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd); |
4686 | } |
4687 | |
4688 | /// Provide fast operand accessors |
4689 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
4690 | |
4691 | bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } |
4692 | bool unwindsToCaller() const { return !hasUnwindDest(); } |
4693 | |
4694 | /// Convenience accessor. |
4695 | CleanupPadInst *getCleanupPad() const { |
4696 | return cast<CleanupPadInst>(Op<0>()); |
4697 | } |
4698 | void setCleanupPad(CleanupPadInst *CleanupPad) { |
4699 | assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail ("CleanupPad", "llvm/include/llvm/IR/Instructions.h", 4699, __extension__ __PRETTY_FUNCTION__)); |
4700 | Op<0>() = CleanupPad; |
4701 | } |
4702 | |
4703 | unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; } |
4704 | |
4705 | BasicBlock *getUnwindDest() const { |
4706 | return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr; |
4707 | } |
4708 | void setUnwindDest(BasicBlock *NewDest) { |
4709 | assert(NewDest)(static_cast <bool> (NewDest) ? void (0) : __assert_fail ("NewDest", "llvm/include/llvm/IR/Instructions.h", 4709, __extension__ __PRETTY_FUNCTION__)); |
4710 | assert(hasUnwindDest())(static_cast <bool> (hasUnwindDest()) ? void (0) : __assert_fail ("hasUnwindDest()", "llvm/include/llvm/IR/Instructions.h", 4710 , __extension__ __PRETTY_FUNCTION__)); |
4711 | Op<1>() = NewDest; |
4712 | } |
4713 | |
4714 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4715 | static bool classof(const Instruction *I) { |
4716 | return (I->getOpcode() == Instruction::CleanupRet); |
4717 | } |
4718 | static bool classof(const Value *V) { |
4719 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4720 | } |
4721 | |
4722 | private: |
4723 | BasicBlock *getSuccessor(unsigned Idx) const { |
4724 | assert(Idx == 0)(static_cast <bool> (Idx == 0) ? void (0) : __assert_fail ("Idx == 0", "llvm/include/llvm/IR/Instructions.h", 4724, __extension__ __PRETTY_FUNCTION__)); |
4725 | return getUnwindDest(); |
4726 | } |
4727 | |
4728 | void setSuccessor(unsigned Idx, BasicBlock *B) { |
4729 | assert(Idx == 0)(static_cast <bool> (Idx == 0) ? void (0) : __assert_fail ("Idx == 0", "llvm/include/llvm/IR/Instructions.h", 4729, __extension__ __PRETTY_FUNCTION__)); |
4730 | setUnwindDest(B); |
4731 | } |
4732 | |
4733 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
4734 | // method so that subclasses cannot accidentally use it. |
4735 | template <typename Bitfield> |
4736 | void setSubclassData(typename Bitfield::Type Value) { |
4737 | Instruction::setSubclassData<Bitfield>(Value); |
4738 | } |
4739 | }; |
4740 | |
4741 | template <> |
4742 | struct OperandTraits<CleanupReturnInst> |
4743 | : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {}; |
4744 | |
4745 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)CleanupReturnInst::op_iterator CleanupReturnInst::op_begin() { return OperandTraits<CleanupReturnInst>::op_begin(this ); } CleanupReturnInst::const_op_iterator CleanupReturnInst:: op_begin() const { return OperandTraits<CleanupReturnInst> ::op_begin(const_cast<CleanupReturnInst*>(this)); } CleanupReturnInst ::op_iterator CleanupReturnInst::op_end() { return OperandTraits <CleanupReturnInst>::op_end(this); } CleanupReturnInst:: const_op_iterator CleanupReturnInst::op_end() const { return OperandTraits <CleanupReturnInst>::op_end(const_cast<CleanupReturnInst *>(this)); } Value *CleanupReturnInst::getOperand(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4745, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<CleanupReturnInst >::op_begin(const_cast<CleanupReturnInst*>(this))[i_nocapture ].get()); } void CleanupReturnInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4745, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<CleanupReturnInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned CleanupReturnInst::getNumOperands () const { return OperandTraits<CleanupReturnInst>::operands (this); } template <int Idx_nocapture> Use &CleanupReturnInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &CleanupReturnInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } |
4746 | |
4747 | //===----------------------------------------------------------------------===// |
4748 | // UnreachableInst Class |
4749 | //===----------------------------------------------------------------------===// |
4750 | |
4751 | //===--------------------------------------------------------------------------- |
4752 | /// This function has undefined behavior. In particular, the |
4753 | /// presence of this instruction indicates some higher level knowledge that the |
4754 | /// end of the block cannot be reached. |
4755 | /// |
4756 | class UnreachableInst : public Instruction { |
4757 | protected: |
4758 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4759 | friend class Instruction; |
4760 | |
4761 | UnreachableInst *cloneImpl() const; |
4762 | |
4763 | public: |
4764 | explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr); |
4765 | explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd); |
4766 | |
4767 | // allocate space for exactly zero operands |
4768 | void *operator new(size_t S) { return User::operator new(S, 0); } |
4769 | void operator delete(void *Ptr) { User::operator delete(Ptr); } |
4770 | |
4771 | unsigned getNumSuccessors() const { return 0; } |
4772 | |
4773 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4774 | static bool classof(const Instruction *I) { |
4775 | return I->getOpcode() == Instruction::Unreachable; |
4776 | } |
4777 | static bool classof(const Value *V) { |
4778 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4779 | } |
4780 | |
4781 | private: |
4782 | BasicBlock *getSuccessor(unsigned idx) const { |
4783 | llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!" , "llvm/include/llvm/IR/Instructions.h", 4783); |
4784 | } |
4785 | |
4786 | void setSuccessor(unsigned idx, BasicBlock *B) { |
4787 | llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!" , "llvm/include/llvm/IR/Instructions.h", 4787); |
4788 | } |
4789 | }; |
4790 | |
4791 | //===----------------------------------------------------------------------===// |
4792 | // TruncInst Class |
4793 | //===----------------------------------------------------------------------===// |
4794 | |
4795 | /// This class represents a truncation of integer types. |
4796 | class TruncInst : public CastInst { |
4797 | protected: |
4798 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4799 | friend class Instruction; |
4800 | |
4801 | /// Clone an identical TruncInst |
4802 | TruncInst *cloneImpl() const; |
4803 | |
4804 | public: |
4805 | /// Constructor with insert-before-instruction semantics |
4806 | TruncInst( |
4807 | Value *S, ///< The value to be truncated |
4808 | Type *Ty, ///< The (smaller) type to truncate to |
4809 | const Twine &NameStr = "", ///< A name for the new instruction |
4810 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4811 | ); |
4812 | |
4813 | /// Constructor with insert-at-end-of-block semantics |
4814 | TruncInst( |
4815 | Value *S, ///< The value to be truncated |
4816 | Type *Ty, ///< The (smaller) type to truncate to |
4817 | const Twine &NameStr, ///< A name for the new instruction |
4818 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4819 | ); |
4820 | |
4821 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4822 | static bool classof(const Instruction *I) { |
4823 | return I->getOpcode() == Trunc; |
4824 | } |
4825 | static bool classof(const Value *V) { |
4826 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4827 | } |
4828 | }; |
4829 | |
4830 | //===----------------------------------------------------------------------===// |
4831 | // ZExtInst Class |
4832 | //===----------------------------------------------------------------------===// |
4833 | |
4834 | /// This class represents zero extension of integer types. |
4835 | class ZExtInst : public CastInst { |
4836 | protected: |
4837 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4838 | friend class Instruction; |
4839 | |
4840 | /// Clone an identical ZExtInst |
4841 | ZExtInst *cloneImpl() const; |
4842 | |
4843 | public: |
4844 | /// Constructor with insert-before-instruction semantics |
4845 | ZExtInst( |
4846 | Value *S, ///< The value to be zero extended |
4847 | Type *Ty, ///< The type to zero extend to |
4848 | const Twine &NameStr = "", ///< A name for the new instruction |
4849 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4850 | ); |
4851 | |
4852 | /// Constructor with insert-at-end semantics. |
4853 | ZExtInst( |
4854 | Value *S, ///< The value to be zero extended |
4855 | Type *Ty, ///< The type to zero extend to |
4856 | const Twine &NameStr, ///< A name for the new instruction |
4857 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4858 | ); |
4859 | |
4860 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4861 | static bool classof(const Instruction *I) { |
4862 | return I->getOpcode() == ZExt; |
4863 | } |
4864 | static bool classof(const Value *V) { |
4865 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4866 | } |
4867 | }; |
4868 | |
4869 | //===----------------------------------------------------------------------===// |
4870 | // SExtInst Class |
4871 | //===----------------------------------------------------------------------===// |
4872 | |
4873 | /// This class represents a sign extension of integer types. |
4874 | class SExtInst : public CastInst { |
4875 | protected: |
4876 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4877 | friend class Instruction; |
4878 | |
4879 | /// Clone an identical SExtInst |
4880 | SExtInst *cloneImpl() const; |
4881 | |
4882 | public: |
4883 | /// Constructor with insert-before-instruction semantics |
4884 | SExtInst( |
4885 | Value *S, ///< The value to be sign extended |
4886 | Type *Ty, ///< The type to sign extend to |
4887 | const Twine &NameStr = "", ///< A name for the new instruction |
4888 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4889 | ); |
4890 | |
4891 | /// Constructor with insert-at-end-of-block semantics |
4892 | SExtInst( |
4893 | Value *S, ///< The value to be sign extended |
4894 | Type *Ty, ///< The type to sign extend to |
4895 | const Twine &NameStr, ///< A name for the new instruction |
4896 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4897 | ); |
4898 | |
4899 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4900 | static bool classof(const Instruction *I) { |
4901 | return I->getOpcode() == SExt; |
4902 | } |
4903 | static bool classof(const Value *V) { |
4904 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4905 | } |
4906 | }; |
4907 | |
4908 | //===----------------------------------------------------------------------===// |
4909 | // FPTruncInst Class |
4910 | //===----------------------------------------------------------------------===// |
4911 | |
4912 | /// This class represents a truncation of floating point types. |
4913 | class FPTruncInst : public CastInst { |
4914 | protected: |
4915 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4916 | friend class Instruction; |
4917 | |
4918 | /// Clone an identical FPTruncInst |
4919 | FPTruncInst *cloneImpl() const; |
4920 | |
4921 | public: |
4922 | /// Constructor with insert-before-instruction semantics |
4923 | FPTruncInst( |
4924 | Value *S, ///< The value to be truncated |
4925 | Type *Ty, ///< The type to truncate to |
4926 | const Twine &NameStr = "", ///< A name for the new instruction |
4927 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4928 | ); |
4929 | |
4930 | /// Constructor with insert-before-instruction semantics |
4931 | FPTruncInst( |
4932 | Value *S, ///< The value to be truncated |
4933 | Type *Ty, ///< The type to truncate to |
4934 | const Twine &NameStr, ///< A name for the new instruction |
4935 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4936 | ); |
4937 | |
4938 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4939 | static bool classof(const Instruction *I) { |
4940 | return I->getOpcode() == FPTrunc; |
4941 | } |
4942 | static bool classof(const Value *V) { |
4943 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4944 | } |
4945 | }; |
4946 | |
4947 | //===----------------------------------------------------------------------===// |
4948 | // FPExtInst Class |
4949 | //===----------------------------------------------------------------------===// |
4950 | |
4951 | /// This class represents an extension of floating point types. |
4952 | class FPExtInst : public CastInst { |
4953 | protected: |
4954 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4955 | friend class Instruction; |
4956 | |
4957 | /// Clone an identical FPExtInst |
4958 | FPExtInst *cloneImpl() const; |
4959 | |
4960 | public: |
4961 | /// Constructor with insert-before-instruction semantics |
4962 | FPExtInst( |
4963 | Value *S, ///< The value to be extended |
4964 | Type *Ty, ///< The type to extend to |
4965 | const Twine &NameStr = "", ///< A name for the new instruction |
4966 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4967 | ); |
4968 | |
4969 | /// Constructor with insert-at-end-of-block semantics |
4970 | FPExtInst( |
4971 | Value *S, ///< The value to be extended |
4972 | Type *Ty, ///< The type to extend to |
4973 | const Twine &NameStr, ///< A name for the new instruction |
4974 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4975 | ); |
4976 | |
4977 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4978 | static bool classof(const Instruction *I) { |
4979 | return I->getOpcode() == FPExt; |
4980 | } |
4981 | static bool classof(const Value *V) { |
4982 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4983 | } |
4984 | }; |
4985 | |
4986 | //===----------------------------------------------------------------------===// |
4987 | // UIToFPInst Class |
4988 | //===----------------------------------------------------------------------===// |
4989 | |
4990 | /// This class represents a cast unsigned integer to floating point. |
4991 | class UIToFPInst : public CastInst { |
4992 | protected: |
4993 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4994 | friend class Instruction; |
4995 | |
4996 | /// Clone an identical UIToFPInst |
4997 | UIToFPInst *cloneImpl() const; |
4998 | |
4999 | public: |
5000 | /// Constructor with insert-before-instruction semantics |
5001 | UIToFPInst( |
5002 | Value *S, ///< The value to be converted |
5003 | Type *Ty, ///< The type to convert to |
5004 | const Twine &NameStr = "", ///< A name for the new instruction |
5005 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5006 | ); |
5007 | |
5008 | /// Constructor with insert-at-end-of-block semantics |
5009 | UIToFPInst( |
5010 | Value *S, ///< The value to be converted |
5011 | Type *Ty, ///< The type to convert to |
5012 | const Twine &NameStr, ///< A name for the new instruction |
5013 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
5014 | ); |
5015 | |
5016 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
5017 | static bool classof(const Instruction *I) { |
5018 | return I->getOpcode() == UIToFP; |
5019 | } |
5020 | static bool classof(const Value *V) { |
5021 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5022 | } |
5023 | }; |
5024 | |
5025 | //===----------------------------------------------------------------------===// |
5026 | // SIToFPInst Class |
5027 | //===----------------------------------------------------------------------===// |
5028 | |
5029 | /// This class represents a cast from signed integer to floating point. |
5030 | class SIToFPInst : public CastInst { |
5031 | protected: |
5032 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5033 | friend class Instruction; |
5034 | |
5035 | /// Clone an identical SIToFPInst |
5036 | SIToFPInst *cloneImpl() const; |
5037 | |
5038 | public: |
5039 | /// Constructor with insert-before-instruction semantics |
5040 | SIToFPInst( |
5041 | Value *S, ///< The value to be converted |
5042 | Type *Ty, ///< The type to convert to |
5043 | const Twine &NameStr = "", ///< A name for the new instruction |
5044 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5045 | ); |
5046 | |
5047 | /// Constructor with insert-at-end-of-block semantics |
5048 | SIToFPInst( |
5049 | Value *S, ///< The value to be converted |
5050 | Type *Ty, ///< The type to convert to |
5051 | const Twine &NameStr, ///< A name for the new instruction |
5052 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
5053 | ); |
5054 | |
5055 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
5056 | static bool classof(const Instruction *I) { |
5057 | return I->getOpcode() == SIToFP; |
5058 | } |
5059 | static bool classof(const Value *V) { |
5060 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5061 | } |
5062 | }; |
5063 | |
5064 | //===----------------------------------------------------------------------===// |
5065 | // FPToUIInst Class |
5066 | //===----------------------------------------------------------------------===// |
5067 | |
5068 | /// This class represents a cast from floating point to unsigned integer |
5069 | class FPToUIInst : public CastInst { |
5070 | protected: |
5071 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5072 | friend class Instruction; |
5073 | |
5074 | /// Clone an identical FPToUIInst |
5075 | FPToUIInst *cloneImpl() const; |
5076 | |
5077 | public: |
5078 | /// Constructor with insert-before-instruction semantics |
5079 | FPToUIInst( |
5080 | Value *S, ///< The value to be converted |
5081 | Type *Ty, ///< The type to convert to |
5082 | const Twine &NameStr = "", ///< A name for the new instruction |
5083 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5084 | ); |
5085 | |
5086 | /// Constructor with insert-at-end-of-block semantics |
5087 | FPToUIInst( |
5088 | Value *S, ///< The value to be converted |
5089 | Type *Ty, ///< The type to convert to |
5090 | const Twine &NameStr, ///< A name for the new instruction |
5091 | BasicBlock *InsertAtEnd ///< Where to insert the new instruction |
5092 | ); |
5093 | |
5094 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
5095 | static bool classof(const Instruction *I) { |
5096 | return I->getOpcode() == FPToUI; |
5097 | } |
5098 | static bool classof(const Value *V) { |
5099 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5100 | } |
5101 | }; |
5102 | |
5103 | //===----------------------------------------------------------------------===// |
5104 | // FPToSIInst Class |
5105 | //===----------------------------------------------------------------------===// |
5106 | |
5107 | /// This class represents a cast from floating point to signed integer. |
5108 | class FPToSIInst : public CastInst { |
5109 | protected: |
5110 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5111 | friend class Instruction; |
5112 | |
5113 | /// Clone an identical FPToSIInst |
5114 | FPToSIInst *cloneImpl() const; |
5115 | |
5116 | public: |
5117 | /// Constructor with insert-before-instruction semantics |
5118 | FPToSIInst( |
5119 | Value *S, ///< The value to be converted |
5120 | Type *Ty, ///< The type to convert to |
5121 | const Twine &NameStr = "", ///< A name for the new instruction |
5122 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5123 | ); |
5124 | |
5125 | /// Constructor with insert-at-end-of-block semantics |
5126 | FPToSIInst( |
5127 | Value *S, ///< The value to be converted |
5128 | Type *Ty, ///< The type to convert to |
5129 | const Twine &NameStr, ///< A name for the new instruction |
5130 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
5131 | ); |
5132 | |
5133 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
5134 | static bool classof(const Instruction *I) { |
5135 | return I->getOpcode() == FPToSI; |
5136 | } |
5137 | static bool classof(const Value *V) { |
5138 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5139 | } |
5140 | }; |
5141 | |
5142 | //===----------------------------------------------------------------------===// |
5143 | // IntToPtrInst Class |
5144 | //===----------------------------------------------------------------------===// |
5145 | |
5146 | /// This class represents a cast from an integer to a pointer. |
5147 | class IntToPtrInst : public CastInst { |
5148 | public: |
5149 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5150 | friend class Instruction; |
5151 | |
5152 | /// Constructor with insert-before-instruction semantics |
5153 | IntToPtrInst( |
5154 | Value *S, ///< The value to be converted |
5155 | Type *Ty, ///< The type to convert to |
5156 | const Twine &NameStr = "", ///< A name for the new instruction |
5157 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5158 | ); |
5159 | |
5160 | /// Constructor with insert-at-end-of-block semantics |
5161 | IntToPtrInst( |
5162 | Value *S, ///< The value to be converted |
5163 | Type *Ty, ///< The type to convert to |
5164 | const Twine &NameStr, ///< A name for the new instruction |
5165 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
5166 | ); |
5167 | |
5168 | /// Clone an identical IntToPtrInst. |
5169 | IntToPtrInst *cloneImpl() const; |
5170 | |
5171 | /// Returns the address space of this instruction's pointer type. |
5172 | unsigned getAddressSpace() const { |
5173 | return getType()->getPointerAddressSpace(); |
5174 | } |
5175 | |
5176 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
5177 | static bool classof(const Instruction *I) { |
5178 | return I->getOpcode() == IntToPtr; |
5179 | } |
5180 | static bool classof(const Value *V) { |
5181 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5182 | } |
5183 | }; |
5184 | |
5185 | //===----------------------------------------------------------------------===// |
5186 | // PtrToIntInst Class |
5187 | //===----------------------------------------------------------------------===// |
5188 | |
5189 | /// This class represents a cast from a pointer to an integer. |
5190 | class PtrToIntInst : public CastInst { |
5191 | protected: |
5192 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5193 | friend class Instruction; |
5194 | |
5195 | /// Clone an identical PtrToIntInst. |
5196 | PtrToIntInst *cloneImpl() const; |
5197 | |
5198 | public: |
5199 | /// Constructor with insert-before-instruction semantics |
5200 | PtrToIntInst( |
5201 | Value *S, ///< The value to be converted |
5202 | Type *Ty, ///< The type to convert to |
5203 | const Twine &NameStr = "", ///< A name for the new instruction |
5204 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5205 | ); |
5206 | |
5207 | /// Constructor with insert-at-end-of-block semantics |
5208 | PtrToIntInst( |
5209 | Value *S, ///< The value to be converted |
5210 | Type *Ty, ///< The type to convert to |
5211 | const Twine &NameStr, ///< A name for the new instruction |
5212 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
5213 | ); |
5214 | |
5215 | /// Gets the pointer operand. |
5216 | Value *getPointerOperand() { return getOperand(0); } |
5217 | /// Gets the pointer operand. |
5218 | const Value *getPointerOperand() const { return getOperand(0); } |
5219 | /// Gets the operand index of the pointer operand. |
5220 | static unsigned getPointerOperandIndex() { return 0U; } |
5221 | |
5222 | /// Returns the address space of the pointer operand. |
5223 | unsigned getPointerAddressSpace() const { |
5224 | return getPointerOperand()->getType()->getPointerAddressSpace(); |
5225 | } |
5226 | |
5227 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
5228 | static bool classof(const Instruction *I) { |
5229 | return I->getOpcode() == PtrToInt; |
5230 | } |
5231 | static bool classof(const Value *V) { |
5232 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5233 | } |
5234 | }; |
5235 | |
5236 | //===----------------------------------------------------------------------===// |
5237 | // BitCastInst Class |
5238 | //===----------------------------------------------------------------------===// |
5239 | |
5240 | /// This class represents a no-op cast from one type to another. |
5241 | class BitCastInst : public CastInst { |
5242 | protected: |
5243 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5244 | friend class Instruction; |
5245 | |
5246 | /// Clone an identical BitCastInst. |
5247 | BitCastInst *cloneImpl() const; |
5248 | |
5249 | public: |
5250 | /// Constructor with insert-before-instruction semantics |
5251 | BitCastInst( |
5252 | Value *S, ///< The value to be casted |
5253 | Type *Ty, ///< The type to casted to |
5254 | const Twine &NameStr = "", ///< A name for the new instruction |
5255 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5256 | ); |
5257 | |
5258 | /// Constructor with insert-at-end-of-block semantics |
5259 | BitCastInst( |
5260 | Value *S, ///< The value to be casted |
5261 | Type *Ty, ///< The type to casted to |
5262 | const Twine &NameStr, ///< A name for the new instruction |
5263 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
5264 | ); |
5265 | |
5266 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
5267 | static bool classof(const Instruction *I) { |
5268 | return I->getOpcode() == BitCast; |
5269 | } |
5270 | static bool classof(const Value *V) { |
5271 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5272 | } |
5273 | }; |
5274 | |
5275 | //===----------------------------------------------------------------------===// |
5276 | // AddrSpaceCastInst Class |
5277 | //===----------------------------------------------------------------------===// |
5278 | |
5279 | /// This class represents a conversion between pointers from one address space |
5280 | /// to another. |
5281 | class AddrSpaceCastInst : public CastInst { |
5282 | protected: |
5283 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5284 | friend class Instruction; |
5285 | |
5286 | /// Clone an identical AddrSpaceCastInst. |
5287 | AddrSpaceCastInst *cloneImpl() const; |
5288 | |
5289 | public: |
5290 | /// Constructor with insert-before-instruction semantics |
5291 | AddrSpaceCastInst( |
5292 | Value *S, ///< The value to be casted |
5293 | Type *Ty, ///< The type to casted to |
5294 | const Twine &NameStr = "", ///< A name for the new instruction |
5295 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5296 | ); |
5297 | |
5298 | /// Constructor with insert-at-end-of-block semantics |
5299 | AddrSpaceCastInst( |
5300 | Value *S, ///< The value to be casted |
5301 | Type *Ty, ///< The type to casted to |
5302 | const Twine &NameStr, ///< A name for the new instruction |
5303 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
5304 | ); |
5305 | |
5306 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
5307 | static bool classof(const Instruction *I) { |
5308 | return I->getOpcode() == AddrSpaceCast; |
5309 | } |
5310 | static bool classof(const Value *V) { |
5311 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5312 | } |
5313 | |
5314 | /// Gets the pointer operand. |
5315 | Value *getPointerOperand() { |
5316 | return getOperand(0); |
5317 | } |
5318 | |
5319 | /// Gets the pointer operand. |
5320 | const Value *getPointerOperand() const { |
5321 | return getOperand(0); |
5322 | } |
5323 | |
5324 | /// Gets the operand index of the pointer operand. |
5325 | static unsigned getPointerOperandIndex() { |
5326 | return 0U; |
5327 | } |
5328 | |
5329 | /// Returns the address space of the pointer operand. |
5330 | unsigned getSrcAddressSpace() const { |
5331 | return getPointerOperand()->getType()->getPointerAddressSpace(); |
5332 | } |
5333 | |
5334 | /// Returns the address space of the result. |
5335 | unsigned getDestAddressSpace() const { |
5336 | return getType()->getPointerAddressSpace(); |
5337 | } |
5338 | }; |
5339 | |
5340 | //===----------------------------------------------------------------------===// |
5341 | // Helper functions |
5342 | //===----------------------------------------------------------------------===// |
5343 | |
5344 | /// A helper function that returns the pointer operand of a load or store |
5345 | /// instruction. Returns nullptr if not load or store. |
5346 | inline const Value *getLoadStorePointerOperand(const Value *V) { |
5347 | if (auto *Load = dyn_cast<LoadInst>(V)) |
5348 | return Load->getPointerOperand(); |
5349 | if (auto *Store = dyn_cast<StoreInst>(V)) |
5350 | return Store->getPointerOperand(); |
5351 | return nullptr; |
5352 | } |
5353 | inline Value *getLoadStorePointerOperand(Value *V) { |
5354 | return const_cast<Value *>( |
5355 | getLoadStorePointerOperand(static_cast<const Value *>(V))); |
5356 | } |
5357 | |
5358 | /// A helper function that returns the pointer operand of a load, store |
5359 | /// or GEP instruction. Returns nullptr if not load, store, or GEP. |
5360 | inline const Value *getPointerOperand(const Value *V) { |
5361 | if (auto *Ptr = getLoadStorePointerOperand(V)) |
5362 | return Ptr; |
5363 | if (auto *Gep = dyn_cast<GetElementPtrInst>(V)) |
5364 | return Gep->getPointerOperand(); |
5365 | return nullptr; |
5366 | } |
5367 | inline Value *getPointerOperand(Value *V) { |
5368 | return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V))); |
5369 | } |
5370 | |
5371 | /// A helper function that returns the alignment of load or store instruction. |
5372 | inline Align getLoadStoreAlignment(Value *I) { |
5373 | assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa< StoreInst>(I)) && "Expected Load or Store instruction" ) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "llvm/include/llvm/IR/Instructions.h", 5374, __extension__ __PRETTY_FUNCTION__ )) |
5374 | "Expected Load or Store instruction")(static_cast <bool> ((isa<LoadInst>(I) || isa< StoreInst>(I)) && "Expected Load or Store instruction" ) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "llvm/include/llvm/IR/Instructions.h", 5374, __extension__ __PRETTY_FUNCTION__ )); |
5375 | if (auto *LI = dyn_cast<LoadInst>(I)) |
5376 | return LI->getAlign(); |
5377 | return cast<StoreInst>(I)->getAlign(); |
5378 | } |
5379 | |
5380 | /// A helper function that returns the address space of the pointer operand of |
5381 | /// load or store instruction. |
5382 | inline unsigned getLoadStoreAddressSpace(Value *I) { |
5383 | assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa< StoreInst>(I)) && "Expected Load or Store instruction" ) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "llvm/include/llvm/IR/Instructions.h", 5384, __extension__ __PRETTY_FUNCTION__ )) |
5384 | "Expected Load or Store instruction")(static_cast <bool> ((isa<LoadInst>(I) || isa< StoreInst>(I)) && "Expected Load or Store instruction" ) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "llvm/include/llvm/IR/Instructions.h", 5384, __extension__ __PRETTY_FUNCTION__ )); |
5385 | if (auto *LI = dyn_cast<LoadInst>(I)) |
5386 | return LI->getPointerAddressSpace(); |
5387 | return cast<StoreInst>(I)->getPointerAddressSpace(); |
5388 | } |
5389 | |
5390 | /// A helper function that returns the type of a load or store instruction. |
5391 | inline Type *getLoadStoreType(Value *I) { |
5392 | assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa< StoreInst>(I)) && "Expected Load or Store instruction" ) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "llvm/include/llvm/IR/Instructions.h", 5393, __extension__ __PRETTY_FUNCTION__ )) |
5393 | "Expected Load or Store instruction")(static_cast <bool> ((isa<LoadInst>(I) || isa< StoreInst>(I)) && "Expected Load or Store instruction" ) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "llvm/include/llvm/IR/Instructions.h", 5393, __extension__ __PRETTY_FUNCTION__ )); |
5394 | if (auto *LI = dyn_cast<LoadInst>(I)) |
5395 | return LI->getType(); |
5396 | return cast<StoreInst>(I)->getValueOperand()->getType(); |
5397 | } |
5398 | |
5399 | /// A helper function that returns an atomic operation's sync scope; returns |
5400 | /// None if it is not an atomic operation. |
5401 | inline Optional<SyncScope::ID> getAtomicSyncScopeID(const Instruction *I) { |
5402 | if (!I->isAtomic()) |
5403 | return None; |
5404 | if (auto *AI = dyn_cast<LoadInst>(I)) |
5405 | return AI->getSyncScopeID(); |
5406 | if (auto *AI = dyn_cast<StoreInst>(I)) |
5407 | return AI->getSyncScopeID(); |
5408 | if (auto *AI = dyn_cast<FenceInst>(I)) |
5409 | return AI->getSyncScopeID(); |
5410 | if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) |
5411 | return AI->getSyncScopeID(); |
5412 | if (auto *AI = dyn_cast<AtomicRMWInst>(I)) |
5413 | return AI->getSyncScopeID(); |
5414 | llvm_unreachable("unhandled atomic operation")::llvm::llvm_unreachable_internal("unhandled atomic operation" , "llvm/include/llvm/IR/Instructions.h", 5414); |
5415 | } |
5416 | |
5417 | //===----------------------------------------------------------------------===// |
5418 | // FreezeInst Class |
5419 | //===----------------------------------------------------------------------===// |
5420 | |
5421 | /// This class represents a freeze function that returns random concrete |
5422 | /// value if an operand is either a poison value or an undef value |
5423 | class FreezeInst : public UnaryInstruction { |
5424 | protected: |
5425 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5426 | friend class Instruction; |
5427 | |
5428 | /// Clone an identical FreezeInst |
5429 | FreezeInst *cloneImpl() const; |
5430 | |
5431 | public: |
5432 | explicit FreezeInst(Value *S, |
5433 | const Twine &NameStr = "", |
5434 | Instruction *InsertBefore = nullptr); |
5435 | FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd); |
5436 | |
5437 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
5438 | static inline bool classof(const Instruction *I) { |
5439 | return I->getOpcode() == Freeze; |
5440 | } |
5441 | static inline bool classof(const Value *V) { |
5442 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5443 | } |
5444 | }; |
5445 | |
5446 | } // end namespace llvm |
5447 | |
5448 | #endif // LLVM_IR_INSTRUCTIONS_H |