File: | build/source/llvm/lib/CodeGen/CodeGenPrepare.cpp |
Warning: | line 1242, column 37 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // This pass munges the code in the input function to better prepare it for | |||
10 | // SelectionDAG-based code generation. This works around limitations in it's | |||
11 | // basic-block-at-a-time approach. It should eventually be removed. | |||
12 | // | |||
13 | //===----------------------------------------------------------------------===// | |||
14 | ||||
15 | #include "llvm/ADT/APInt.h" | |||
16 | #include "llvm/ADT/ArrayRef.h" | |||
17 | #include "llvm/ADT/DenseMap.h" | |||
18 | #include "llvm/ADT/MapVector.h" | |||
19 | #include "llvm/ADT/PointerIntPair.h" | |||
20 | #include "llvm/ADT/STLExtras.h" | |||
21 | #include "llvm/ADT/SmallPtrSet.h" | |||
22 | #include "llvm/ADT/SmallVector.h" | |||
23 | #include "llvm/ADT/Statistic.h" | |||
24 | #include "llvm/Analysis/BlockFrequencyInfo.h" | |||
25 | #include "llvm/Analysis/BranchProbabilityInfo.h" | |||
26 | #include "llvm/Analysis/InstructionSimplify.h" | |||
27 | #include "llvm/Analysis/LoopInfo.h" | |||
28 | #include "llvm/Analysis/ProfileSummaryInfo.h" | |||
29 | #include "llvm/Analysis/TargetLibraryInfo.h" | |||
30 | #include "llvm/Analysis/TargetTransformInfo.h" | |||
31 | #include "llvm/Analysis/ValueTracking.h" | |||
32 | #include "llvm/Analysis/VectorUtils.h" | |||
33 | #include "llvm/CodeGen/Analysis.h" | |||
34 | #include "llvm/CodeGen/BasicBlockSectionsProfileReader.h" | |||
35 | #include "llvm/CodeGen/ISDOpcodes.h" | |||
36 | #include "llvm/CodeGen/MachineValueType.h" | |||
37 | #include "llvm/CodeGen/SelectionDAGNodes.h" | |||
38 | #include "llvm/CodeGen/TargetLowering.h" | |||
39 | #include "llvm/CodeGen/TargetPassConfig.h" | |||
40 | #include "llvm/CodeGen/TargetSubtargetInfo.h" | |||
41 | #include "llvm/CodeGen/ValueTypes.h" | |||
42 | #include "llvm/Config/llvm-config.h" | |||
43 | #include "llvm/IR/Argument.h" | |||
44 | #include "llvm/IR/Attributes.h" | |||
45 | #include "llvm/IR/BasicBlock.h" | |||
46 | #include "llvm/IR/Constant.h" | |||
47 | #include "llvm/IR/Constants.h" | |||
48 | #include "llvm/IR/DataLayout.h" | |||
49 | #include "llvm/IR/DebugInfo.h" | |||
50 | #include "llvm/IR/DerivedTypes.h" | |||
51 | #include "llvm/IR/Dominators.h" | |||
52 | #include "llvm/IR/Function.h" | |||
53 | #include "llvm/IR/GetElementPtrTypeIterator.h" | |||
54 | #include "llvm/IR/GlobalValue.h" | |||
55 | #include "llvm/IR/GlobalVariable.h" | |||
56 | #include "llvm/IR/IRBuilder.h" | |||
57 | #include "llvm/IR/InlineAsm.h" | |||
58 | #include "llvm/IR/InstrTypes.h" | |||
59 | #include "llvm/IR/Instruction.h" | |||
60 | #include "llvm/IR/Instructions.h" | |||
61 | #include "llvm/IR/IntrinsicInst.h" | |||
62 | #include "llvm/IR/Intrinsics.h" | |||
63 | #include "llvm/IR/IntrinsicsAArch64.h" | |||
64 | #include "llvm/IR/LLVMContext.h" | |||
65 | #include "llvm/IR/MDBuilder.h" | |||
66 | #include "llvm/IR/Module.h" | |||
67 | #include "llvm/IR/Operator.h" | |||
68 | #include "llvm/IR/PatternMatch.h" | |||
69 | #include "llvm/IR/ProfDataUtils.h" | |||
70 | #include "llvm/IR/Statepoint.h" | |||
71 | #include "llvm/IR/Type.h" | |||
72 | #include "llvm/IR/Use.h" | |||
73 | #include "llvm/IR/User.h" | |||
74 | #include "llvm/IR/Value.h" | |||
75 | #include "llvm/IR/ValueHandle.h" | |||
76 | #include "llvm/IR/ValueMap.h" | |||
77 | #include "llvm/InitializePasses.h" | |||
78 | #include "llvm/Pass.h" | |||
79 | #include "llvm/Support/BlockFrequency.h" | |||
80 | #include "llvm/Support/BranchProbability.h" | |||
81 | #include "llvm/Support/Casting.h" | |||
82 | #include "llvm/Support/CommandLine.h" | |||
83 | #include "llvm/Support/Compiler.h" | |||
84 | #include "llvm/Support/Debug.h" | |||
85 | #include "llvm/Support/ErrorHandling.h" | |||
86 | #include "llvm/Support/MathExtras.h" | |||
87 | #include "llvm/Support/raw_ostream.h" | |||
88 | #include "llvm/Target/TargetMachine.h" | |||
89 | #include "llvm/Target/TargetOptions.h" | |||
90 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" | |||
91 | #include "llvm/Transforms/Utils/BypassSlowDivision.h" | |||
92 | #include "llvm/Transforms/Utils/Local.h" | |||
93 | #include "llvm/Transforms/Utils/SimplifyLibCalls.h" | |||
94 | #include "llvm/Transforms/Utils/SizeOpts.h" | |||
95 | #include <algorithm> | |||
96 | #include <cassert> | |||
97 | #include <cstdint> | |||
98 | #include <iterator> | |||
99 | #include <limits> | |||
100 | #include <memory> | |||
101 | #include <optional> | |||
102 | #include <utility> | |||
103 | #include <vector> | |||
104 | ||||
105 | using namespace llvm; | |||
106 | using namespace llvm::PatternMatch; | |||
107 | ||||
108 | #define DEBUG_TYPE"codegenprepare" "codegenprepare" | |||
109 | ||||
110 | STATISTIC(NumBlocksElim, "Number of blocks eliminated")static llvm::Statistic NumBlocksElim = {"codegenprepare", "NumBlocksElim" , "Number of blocks eliminated"}; | |||
111 | STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated")static llvm::Statistic NumPHIsElim = {"codegenprepare", "NumPHIsElim" , "Number of trivial PHIs eliminated"}; | |||
112 | STATISTIC(NumGEPsElim, "Number of GEPs converted to casts")static llvm::Statistic NumGEPsElim = {"codegenprepare", "NumGEPsElim" , "Number of GEPs converted to casts"}; | |||
113 | STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "static llvm::Statistic NumCmpUses = {"codegenprepare", "NumCmpUses" , "Number of uses of Cmp expressions replaced with uses of " "sunken Cmps" } | |||
114 | "sunken Cmps")static llvm::Statistic NumCmpUses = {"codegenprepare", "NumCmpUses" , "Number of uses of Cmp expressions replaced with uses of " "sunken Cmps" }; | |||
115 | STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "static llvm::Statistic NumCastUses = {"codegenprepare", "NumCastUses" , "Number of uses of Cast expressions replaced with uses " "of sunken Casts" } | |||
116 | "of sunken Casts")static llvm::Statistic NumCastUses = {"codegenprepare", "NumCastUses" , "Number of uses of Cast expressions replaced with uses " "of sunken Casts" }; | |||
117 | STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "static llvm::Statistic NumMemoryInsts = {"codegenprepare", "NumMemoryInsts" , "Number of memory instructions whose address " "computations were sunk" } | |||
118 | "computations were sunk")static llvm::Statistic NumMemoryInsts = {"codegenprepare", "NumMemoryInsts" , "Number of memory instructions whose address " "computations were sunk" }; | |||
119 | STATISTIC(NumMemoryInstsPhiCreated,static llvm::Statistic NumMemoryInstsPhiCreated = {"codegenprepare" , "NumMemoryInstsPhiCreated", "Number of phis created when address " "computations were sunk to memory instructions"} | |||
120 | "Number of phis created when address "static llvm::Statistic NumMemoryInstsPhiCreated = {"codegenprepare" , "NumMemoryInstsPhiCreated", "Number of phis created when address " "computations were sunk to memory instructions"} | |||
121 | "computations were sunk to memory instructions")static llvm::Statistic NumMemoryInstsPhiCreated = {"codegenprepare" , "NumMemoryInstsPhiCreated", "Number of phis created when address " "computations were sunk to memory instructions"}; | |||
122 | STATISTIC(NumMemoryInstsSelectCreated,static llvm::Statistic NumMemoryInstsSelectCreated = {"codegenprepare" , "NumMemoryInstsSelectCreated", "Number of select created when address " "computations were sunk to memory instructions"} | |||
123 | "Number of select created when address "static llvm::Statistic NumMemoryInstsSelectCreated = {"codegenprepare" , "NumMemoryInstsSelectCreated", "Number of select created when address " "computations were sunk to memory instructions"} | |||
124 | "computations were sunk to memory instructions")static llvm::Statistic NumMemoryInstsSelectCreated = {"codegenprepare" , "NumMemoryInstsSelectCreated", "Number of select created when address " "computations were sunk to memory instructions"}; | |||
125 | STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads")static llvm::Statistic NumExtsMoved = {"codegenprepare", "NumExtsMoved" , "Number of [s|z]ext instructions combined with loads"}; | |||
126 | STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized")static llvm::Statistic NumExtUses = {"codegenprepare", "NumExtUses" , "Number of uses of [s|z]ext instructions optimized"}; | |||
127 | STATISTIC(NumAndsAdded,static llvm::Statistic NumAndsAdded = {"codegenprepare", "NumAndsAdded" , "Number of and mask instructions added to form ext loads"} | |||
128 | "Number of and mask instructions added to form ext loads")static llvm::Statistic NumAndsAdded = {"codegenprepare", "NumAndsAdded" , "Number of and mask instructions added to form ext loads"}; | |||
129 | STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized")static llvm::Statistic NumAndUses = {"codegenprepare", "NumAndUses" , "Number of uses of and mask instructions optimized"}; | |||
130 | STATISTIC(NumRetsDup, "Number of return instructions duplicated")static llvm::Statistic NumRetsDup = {"codegenprepare", "NumRetsDup" , "Number of return instructions duplicated"}; | |||
131 | STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved")static llvm::Statistic NumDbgValueMoved = {"codegenprepare", "NumDbgValueMoved" , "Number of debug value instructions moved"}; | |||
132 | STATISTIC(NumSelectsExpanded, "Number of selects turned into branches")static llvm::Statistic NumSelectsExpanded = {"codegenprepare" , "NumSelectsExpanded", "Number of selects turned into branches" }; | |||
133 | STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed")static llvm::Statistic NumStoreExtractExposed = {"codegenprepare" , "NumStoreExtractExposed", "Number of store(extractelement) exposed" }; | |||
134 | ||||
135 | static cl::opt<bool> DisableBranchOpts( | |||
136 | "disable-cgp-branch-opts", cl::Hidden, cl::init(false), | |||
137 | cl::desc("Disable branch optimizations in CodeGenPrepare")); | |||
138 | ||||
139 | static cl::opt<bool> | |||
140 | DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false), | |||
141 | cl::desc("Disable GC optimizations in CodeGenPrepare")); | |||
142 | ||||
143 | static cl::opt<bool> | |||
144 | DisableSelectToBranch("disable-cgp-select2branch", cl::Hidden, | |||
145 | cl::init(false), | |||
146 | cl::desc("Disable select to branch conversion.")); | |||
147 | ||||
148 | static cl::opt<bool> | |||
149 | AddrSinkUsingGEPs("addr-sink-using-gep", cl::Hidden, cl::init(true), | |||
150 | cl::desc("Address sinking in CGP using GEPs.")); | |||
151 | ||||
152 | static cl::opt<bool> | |||
153 | EnableAndCmpSinking("enable-andcmp-sinking", cl::Hidden, cl::init(true), | |||
154 | cl::desc("Enable sinkinig and/cmp into branches.")); | |||
155 | ||||
156 | static cl::opt<bool> DisableStoreExtract( | |||
157 | "disable-cgp-store-extract", cl::Hidden, cl::init(false), | |||
158 | cl::desc("Disable store(extract) optimizations in CodeGenPrepare")); | |||
159 | ||||
160 | static cl::opt<bool> StressStoreExtract( | |||
161 | "stress-cgp-store-extract", cl::Hidden, cl::init(false), | |||
162 | cl::desc("Stress test store(extract) optimizations in CodeGenPrepare")); | |||
163 | ||||
164 | static cl::opt<bool> DisableExtLdPromotion( | |||
165 | "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), | |||
166 | cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " | |||
167 | "CodeGenPrepare")); | |||
168 | ||||
169 | static cl::opt<bool> StressExtLdPromotion( | |||
170 | "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), | |||
171 | cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " | |||
172 | "optimization in CodeGenPrepare")); | |||
173 | ||||
174 | static cl::opt<bool> DisablePreheaderProtect( | |||
175 | "disable-preheader-prot", cl::Hidden, cl::init(false), | |||
176 | cl::desc("Disable protection against removing loop preheaders")); | |||
177 | ||||
178 | static cl::opt<bool> ProfileGuidedSectionPrefix( | |||
179 | "profile-guided-section-prefix", cl::Hidden, cl::init(true), | |||
180 | cl::desc("Use profile info to add section prefix for hot/cold functions")); | |||
181 | ||||
182 | static cl::opt<bool> ProfileUnknownInSpecialSection( | |||
183 | "profile-unknown-in-special-section", cl::Hidden, | |||
184 | cl::desc("In profiling mode like sampleFDO, if a function doesn't have " | |||
185 | "profile, we cannot tell the function is cold for sure because " | |||
186 | "it may be a function newly added without ever being sampled. " | |||
187 | "With the flag enabled, compiler can put such profile unknown " | |||
188 | "functions into a special section, so runtime system can choose " | |||
189 | "to handle it in a different way than .text section, to save " | |||
190 | "RAM for example. ")); | |||
191 | ||||
192 | static cl::opt<bool> BBSectionsGuidedSectionPrefix( | |||
193 | "bbsections-guided-section-prefix", cl::Hidden, cl::init(true), | |||
194 | cl::desc("Use the basic-block-sections profile to determine the text " | |||
195 | "section prefix for hot functions. Functions with " | |||
196 | "basic-block-sections profile will be placed in `.text.hot` " | |||
197 | "regardless of their FDO profile info. Other functions won't be " | |||
198 | "impacted, i.e., their prefixes will be decided by FDO/sampleFDO " | |||
199 | "profiles.")); | |||
200 | ||||
201 | static cl::opt<unsigned> FreqRatioToSkipMerge( | |||
202 | "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2), | |||
203 | cl::desc("Skip merging empty blocks if (frequency of empty block) / " | |||
204 | "(frequency of destination block) is greater than this ratio")); | |||
205 | ||||
206 | static cl::opt<bool> ForceSplitStore( | |||
207 | "force-split-store", cl::Hidden, cl::init(false), | |||
208 | cl::desc("Force store splitting no matter what the target query says.")); | |||
209 | ||||
210 | static cl::opt<bool> EnableTypePromotionMerge( | |||
211 | "cgp-type-promotion-merge", cl::Hidden, | |||
212 | cl::desc("Enable merging of redundant sexts when one is dominating" | |||
213 | " the other."), | |||
214 | cl::init(true)); | |||
215 | ||||
216 | static cl::opt<bool> DisableComplexAddrModes( | |||
217 | "disable-complex-addr-modes", cl::Hidden, cl::init(false), | |||
218 | cl::desc("Disables combining addressing modes with different parts " | |||
219 | "in optimizeMemoryInst.")); | |||
220 | ||||
221 | static cl::opt<bool> | |||
222 | AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden, cl::init(false), | |||
223 | cl::desc("Allow creation of Phis in Address sinking.")); | |||
224 | ||||
225 | static cl::opt<bool> AddrSinkNewSelects( | |||
226 | "addr-sink-new-select", cl::Hidden, cl::init(true), | |||
227 | cl::desc("Allow creation of selects in Address sinking.")); | |||
228 | ||||
229 | static cl::opt<bool> AddrSinkCombineBaseReg( | |||
230 | "addr-sink-combine-base-reg", cl::Hidden, cl::init(true), | |||
231 | cl::desc("Allow combining of BaseReg field in Address sinking.")); | |||
232 | ||||
233 | static cl::opt<bool> AddrSinkCombineBaseGV( | |||
234 | "addr-sink-combine-base-gv", cl::Hidden, cl::init(true), | |||
235 | cl::desc("Allow combining of BaseGV field in Address sinking.")); | |||
236 | ||||
237 | static cl::opt<bool> AddrSinkCombineBaseOffs( | |||
238 | "addr-sink-combine-base-offs", cl::Hidden, cl::init(true), | |||
239 | cl::desc("Allow combining of BaseOffs field in Address sinking.")); | |||
240 | ||||
241 | static cl::opt<bool> AddrSinkCombineScaledReg( | |||
242 | "addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true), | |||
243 | cl::desc("Allow combining of ScaledReg field in Address sinking.")); | |||
244 | ||||
245 | static cl::opt<bool> | |||
246 | EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden, | |||
247 | cl::init(true), | |||
248 | cl::desc("Enable splitting large offset of GEP.")); | |||
249 | ||||
250 | static cl::opt<bool> EnableICMP_EQToICMP_ST( | |||
251 | "cgp-icmp-eq2icmp-st", cl::Hidden, cl::init(false), | |||
252 | cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion.")); | |||
253 | ||||
254 | static cl::opt<bool> | |||
255 | VerifyBFIUpdates("cgp-verify-bfi-updates", cl::Hidden, cl::init(false), | |||
256 | cl::desc("Enable BFI update verification for " | |||
257 | "CodeGenPrepare.")); | |||
258 | ||||
259 | static cl::opt<bool> | |||
260 | OptimizePhiTypes("cgp-optimize-phi-types", cl::Hidden, cl::init(false), | |||
261 | cl::desc("Enable converting phi types in CodeGenPrepare")); | |||
262 | ||||
263 | static cl::opt<unsigned> | |||
264 | HugeFuncThresholdInCGPP("cgpp-huge-func", cl::init(10000), cl::Hidden, | |||
265 | cl::desc("Least BB number of huge function.")); | |||
266 | ||||
267 | static cl::opt<unsigned> | |||
268 | MaxAddressUsersToScan("cgp-max-address-users-to-scan", cl::init(100), | |||
269 | cl::Hidden, | |||
270 | cl::desc("Max number of address users to look at")); | |||
271 | namespace { | |||
272 | ||||
273 | enum ExtType { | |||
274 | ZeroExtension, // Zero extension has been seen. | |||
275 | SignExtension, // Sign extension has been seen. | |||
276 | BothExtension // This extension type is used if we saw sext after | |||
277 | // ZeroExtension had been set, or if we saw zext after | |||
278 | // SignExtension had been set. It makes the type | |||
279 | // information of a promoted instruction invalid. | |||
280 | }; | |||
281 | ||||
282 | enum ModifyDT { | |||
283 | NotModifyDT, // Not Modify any DT. | |||
284 | ModifyBBDT, // Modify the Basic Block Dominator Tree. | |||
285 | ModifyInstDT // Modify the Instruction Dominator in a Basic Block, | |||
286 | // This usually means we move/delete/insert instruction | |||
287 | // in a Basic Block. So we should re-iterate instructions | |||
288 | // in such Basic Block. | |||
289 | }; | |||
290 | ||||
291 | using SetOfInstrs = SmallPtrSet<Instruction *, 16>; | |||
292 | using TypeIsSExt = PointerIntPair<Type *, 2, ExtType>; | |||
293 | using InstrToOrigTy = DenseMap<Instruction *, TypeIsSExt>; | |||
294 | using SExts = SmallVector<Instruction *, 16>; | |||
295 | using ValueToSExts = MapVector<Value *, SExts>; | |||
296 | ||||
297 | class TypePromotionTransaction; | |||
298 | ||||
299 | class CodeGenPrepare : public FunctionPass { | |||
300 | const TargetMachine *TM = nullptr; | |||
301 | const TargetSubtargetInfo *SubtargetInfo = nullptr; | |||
302 | const TargetLowering *TLI = nullptr; | |||
303 | const TargetRegisterInfo *TRI = nullptr; | |||
304 | const TargetTransformInfo *TTI = nullptr; | |||
305 | const BasicBlockSectionsProfileReader *BBSectionsProfileReader = nullptr; | |||
306 | const TargetLibraryInfo *TLInfo = nullptr; | |||
307 | const LoopInfo *LI = nullptr; | |||
308 | std::unique_ptr<BlockFrequencyInfo> BFI; | |||
309 | std::unique_ptr<BranchProbabilityInfo> BPI; | |||
310 | ProfileSummaryInfo *PSI = nullptr; | |||
311 | ||||
312 | /// As we scan instructions optimizing them, this is the next instruction | |||
313 | /// to optimize. Transforms that can invalidate this should update it. | |||
314 | BasicBlock::iterator CurInstIterator; | |||
315 | ||||
316 | /// Keeps track of non-local addresses that have been sunk into a block. | |||
317 | /// This allows us to avoid inserting duplicate code for blocks with | |||
318 | /// multiple load/stores of the same address. The usage of WeakTrackingVH | |||
319 | /// enables SunkAddrs to be treated as a cache whose entries can be | |||
320 | /// invalidated if a sunken address computation has been erased. | |||
321 | ValueMap<Value *, WeakTrackingVH> SunkAddrs; | |||
322 | ||||
323 | /// Keeps track of all instructions inserted for the current function. | |||
324 | SetOfInstrs InsertedInsts; | |||
325 | ||||
326 | /// Keeps track of the type of the related instruction before their | |||
327 | /// promotion for the current function. | |||
328 | InstrToOrigTy PromotedInsts; | |||
329 | ||||
330 | /// Keep track of instructions removed during promotion. | |||
331 | SetOfInstrs RemovedInsts; | |||
332 | ||||
333 | /// Keep track of sext chains based on their initial value. | |||
334 | DenseMap<Value *, Instruction *> SeenChainsForSExt; | |||
335 | ||||
336 | /// Keep track of GEPs accessing the same data structures such as structs or | |||
337 | /// arrays that are candidates to be split later because of their large | |||
338 | /// size. | |||
339 | MapVector<AssertingVH<Value>, | |||
340 | SmallVector<std::pair<AssertingVH<GetElementPtrInst>, int64_t>, 32>> | |||
341 | LargeOffsetGEPMap; | |||
342 | ||||
343 | /// Keep track of new GEP base after splitting the GEPs having large offset. | |||
344 | SmallSet<AssertingVH<Value>, 2> NewGEPBases; | |||
345 | ||||
346 | /// Map serial numbers to Large offset GEPs. | |||
347 | DenseMap<AssertingVH<GetElementPtrInst>, int> LargeOffsetGEPID; | |||
348 | ||||
349 | /// Keep track of SExt promoted. | |||
350 | ValueToSExts ValToSExtendedUses; | |||
351 | ||||
352 | /// True if the function has the OptSize attribute. | |||
353 | bool OptSize; | |||
354 | ||||
355 | /// DataLayout for the Function being processed. | |||
356 | const DataLayout *DL = nullptr; | |||
357 | ||||
358 | /// Building the dominator tree can be expensive, so we only build it | |||
359 | /// lazily and update it when required. | |||
360 | std::unique_ptr<DominatorTree> DT; | |||
361 | ||||
362 | public: | |||
363 | /// If encounter huge function, we need to limit the build time. | |||
364 | bool IsHugeFunc = false; | |||
365 | ||||
366 | /// FreshBBs is like worklist, it collected the updated BBs which need | |||
367 | /// to be optimized again. | |||
368 | /// Note: Consider building time in this pass, when a BB updated, we need | |||
369 | /// to insert such BB into FreshBBs for huge function. | |||
370 | SmallSet<BasicBlock *, 32> FreshBBs; | |||
371 | ||||
372 | static char ID; // Pass identification, replacement for typeid | |||
373 | ||||
374 | CodeGenPrepare() : FunctionPass(ID) { | |||
375 | initializeCodeGenPreparePass(*PassRegistry::getPassRegistry()); | |||
376 | } | |||
377 | ||||
378 | bool runOnFunction(Function &F) override; | |||
379 | ||||
380 | StringRef getPassName() const override { return "CodeGen Prepare"; } | |||
381 | ||||
382 | void getAnalysisUsage(AnalysisUsage &AU) const override { | |||
383 | // FIXME: When we can selectively preserve passes, preserve the domtree. | |||
384 | AU.addRequired<ProfileSummaryInfoWrapperPass>(); | |||
385 | AU.addRequired<TargetLibraryInfoWrapperPass>(); | |||
386 | AU.addRequired<TargetPassConfig>(); | |||
387 | AU.addRequired<TargetTransformInfoWrapperPass>(); | |||
388 | AU.addRequired<LoopInfoWrapperPass>(); | |||
389 | AU.addUsedIfAvailable<BasicBlockSectionsProfileReader>(); | |||
390 | } | |||
391 | ||||
392 | private: | |||
393 | template <typename F> | |||
394 | void resetIteratorIfInvalidatedWhileCalling(BasicBlock *BB, F f) { | |||
395 | // Substituting can cause recursive simplifications, which can invalidate | |||
396 | // our iterator. Use a WeakTrackingVH to hold onto it in case this | |||
397 | // happens. | |||
398 | Value *CurValue = &*CurInstIterator; | |||
399 | WeakTrackingVH IterHandle(CurValue); | |||
400 | ||||
401 | f(); | |||
402 | ||||
403 | // If the iterator instruction was recursively deleted, start over at the | |||
404 | // start of the block. | |||
405 | if (IterHandle != CurValue) { | |||
406 | CurInstIterator = BB->begin(); | |||
407 | SunkAddrs.clear(); | |||
408 | } | |||
409 | } | |||
410 | ||||
411 | // Get the DominatorTree, building if necessary. | |||
412 | DominatorTree &getDT(Function &F) { | |||
413 | if (!DT) | |||
414 | DT = std::make_unique<DominatorTree>(F); | |||
415 | return *DT; | |||
416 | } | |||
417 | ||||
418 | void removeAllAssertingVHReferences(Value *V); | |||
419 | bool eliminateAssumptions(Function &F); | |||
420 | bool eliminateFallThrough(Function &F); | |||
421 | bool eliminateMostlyEmptyBlocks(Function &F); | |||
422 | BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB); | |||
423 | bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; | |||
424 | void eliminateMostlyEmptyBlock(BasicBlock *BB); | |||
425 | bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB, | |||
426 | bool isPreheader); | |||
427 | bool makeBitReverse(Instruction &I); | |||
428 | bool optimizeBlock(BasicBlock &BB, ModifyDT &ModifiedDT); | |||
429 | bool optimizeInst(Instruction *I, ModifyDT &ModifiedDT); | |||
430 | bool optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, Type *AccessTy, | |||
431 | unsigned AddrSpace); | |||
432 | bool optimizeGatherScatterInst(Instruction *MemoryInst, Value *Ptr); | |||
433 | bool optimizeInlineAsmInst(CallInst *CS); | |||
434 | bool optimizeCallInst(CallInst *CI, ModifyDT &ModifiedDT); | |||
435 | bool optimizeExt(Instruction *&I); | |||
436 | bool optimizeExtUses(Instruction *I); | |||
437 | bool optimizeLoadExt(LoadInst *Load); | |||
438 | bool optimizeShiftInst(BinaryOperator *BO); | |||
439 | bool optimizeFunnelShift(IntrinsicInst *Fsh); | |||
440 | bool optimizeSelectInst(SelectInst *SI); | |||
441 | bool optimizeShuffleVectorInst(ShuffleVectorInst *SVI); | |||
442 | bool optimizeSwitchType(SwitchInst *SI); | |||
443 | bool optimizeSwitchPhiConstants(SwitchInst *SI); | |||
444 | bool optimizeSwitchInst(SwitchInst *SI); | |||
445 | bool optimizeExtractElementInst(Instruction *Inst); | |||
446 | bool dupRetToEnableTailCallOpts(BasicBlock *BB, ModifyDT &ModifiedDT); | |||
447 | bool fixupDbgValue(Instruction *I); | |||
448 | bool placeDbgValues(Function &F); | |||
449 | bool placePseudoProbes(Function &F); | |||
450 | bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts, | |||
451 | LoadInst *&LI, Instruction *&Inst, bool HasPromoted); | |||
452 | bool tryToPromoteExts(TypePromotionTransaction &TPT, | |||
453 | const SmallVectorImpl<Instruction *> &Exts, | |||
454 | SmallVectorImpl<Instruction *> &ProfitablyMovedExts, | |||
455 | unsigned CreatedInstsCost = 0); | |||
456 | bool mergeSExts(Function &F); | |||
457 | bool splitLargeGEPOffsets(); | |||
458 | bool optimizePhiType(PHINode *Inst, SmallPtrSetImpl<PHINode *> &Visited, | |||
459 | SmallPtrSetImpl<Instruction *> &DeletedInstrs); | |||
460 | bool optimizePhiTypes(Function &F); | |||
461 | bool performAddressTypePromotion( | |||
462 | Instruction *&Inst, bool AllowPromotionWithoutCommonHeader, | |||
463 | bool HasPromoted, TypePromotionTransaction &TPT, | |||
464 | SmallVectorImpl<Instruction *> &SpeculativelyMovedExts); | |||
465 | bool splitBranchCondition(Function &F, ModifyDT &ModifiedDT); | |||
466 | bool simplifyOffsetableRelocate(GCStatepointInst &I); | |||
467 | ||||
468 | bool tryToSinkFreeOperands(Instruction *I); | |||
469 | bool replaceMathCmpWithIntrinsic(BinaryOperator *BO, Value *Arg0, Value *Arg1, | |||
470 | CmpInst *Cmp, Intrinsic::ID IID); | |||
471 | bool optimizeCmp(CmpInst *Cmp, ModifyDT &ModifiedDT); | |||
472 | bool combineToUSubWithOverflow(CmpInst *Cmp, ModifyDT &ModifiedDT); | |||
473 | bool combineToUAddWithOverflow(CmpInst *Cmp, ModifyDT &ModifiedDT); | |||
474 | void verifyBFIUpdates(Function &F); | |||
475 | }; | |||
476 | ||||
477 | } // end anonymous namespace | |||
478 | ||||
479 | char CodeGenPrepare::ID = 0; | |||
480 | ||||
481 | INITIALIZE_PASS_BEGIN(CodeGenPrepare, DEBUG_TYPE,static void *initializeCodeGenPreparePassOnce(PassRegistry & Registry) { | |||
482 | "Optimize for code generation", false, false)static void *initializeCodeGenPreparePassOnce(PassRegistry & Registry) { | |||
483 | INITIALIZE_PASS_DEPENDENCY(BasicBlockSectionsProfileReader)initializeBasicBlockSectionsProfileReaderPass(Registry); | |||
484 | INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)initializeLoopInfoWrapperPassPass(Registry); | |||
485 | INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)initializeProfileSummaryInfoWrapperPassPass(Registry); | |||
486 | INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry); | |||
487 | INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)initializeTargetPassConfigPass(Registry); | |||
488 | INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)initializeTargetTransformInfoWrapperPassPass(Registry); | |||
489 | INITIALIZE_PASS_END(CodeGenPrepare, DEBUG_TYPE, "Optimize for code generation",PassInfo *PI = new PassInfo( "Optimize for code generation", "codegenprepare" , &CodeGenPrepare::ID, PassInfo::NormalCtor_t(callDefaultCtor <CodeGenPrepare>), false, false); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeCodeGenPreparePassFlag ; void llvm::initializeCodeGenPreparePass(PassRegistry &Registry ) { llvm::call_once(InitializeCodeGenPreparePassFlag, initializeCodeGenPreparePassOnce , std::ref(Registry)); } | |||
490 | false, false)PassInfo *PI = new PassInfo( "Optimize for code generation", "codegenprepare" , &CodeGenPrepare::ID, PassInfo::NormalCtor_t(callDefaultCtor <CodeGenPrepare>), false, false); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeCodeGenPreparePassFlag ; void llvm::initializeCodeGenPreparePass(PassRegistry &Registry ) { llvm::call_once(InitializeCodeGenPreparePassFlag, initializeCodeGenPreparePassOnce , std::ref(Registry)); } | |||
491 | ||||
492 | FunctionPass *llvm::createCodeGenPreparePass() { return new CodeGenPrepare(); } | |||
493 | ||||
494 | bool CodeGenPrepare::runOnFunction(Function &F) { | |||
495 | if (skipFunction(F)) | |||
496 | return false; | |||
497 | ||||
498 | DL = &F.getParent()->getDataLayout(); | |||
499 | ||||
500 | bool EverMadeChange = false; | |||
501 | // Clear per function information. | |||
502 | InsertedInsts.clear(); | |||
503 | PromotedInsts.clear(); | |||
504 | FreshBBs.clear(); | |||
505 | ||||
506 | TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>(); | |||
507 | SubtargetInfo = TM->getSubtargetImpl(F); | |||
508 | TLI = SubtargetInfo->getTargetLowering(); | |||
509 | TRI = SubtargetInfo->getRegisterInfo(); | |||
510 | TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); | |||
511 | TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); | |||
512 | LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); | |||
513 | BPI.reset(new BranchProbabilityInfo(F, *LI)); | |||
514 | BFI.reset(new BlockFrequencyInfo(F, *BPI, *LI)); | |||
515 | PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); | |||
516 | BBSectionsProfileReader = | |||
517 | getAnalysisIfAvailable<BasicBlockSectionsProfileReader>(); | |||
518 | OptSize = F.hasOptSize(); | |||
519 | // Use the basic-block-sections profile to promote hot functions to .text.hot | |||
520 | // if requested. | |||
521 | if (BBSectionsGuidedSectionPrefix && BBSectionsProfileReader && | |||
522 | BBSectionsProfileReader->isFunctionHot(F.getName())) { | |||
523 | F.setSectionPrefix("hot"); | |||
524 | } else if (ProfileGuidedSectionPrefix) { | |||
525 | // The hot attribute overwrites profile count based hotness while profile | |||
526 | // counts based hotness overwrite the cold attribute. | |||
527 | // This is a conservative behabvior. | |||
528 | if (F.hasFnAttribute(Attribute::Hot) || | |||
529 | PSI->isFunctionHotInCallGraph(&F, *BFI)) | |||
530 | F.setSectionPrefix("hot"); | |||
531 | // If PSI shows this function is not hot, we will placed the function | |||
532 | // into unlikely section if (1) PSI shows this is a cold function, or | |||
533 | // (2) the function has a attribute of cold. | |||
534 | else if (PSI->isFunctionColdInCallGraph(&F, *BFI) || | |||
535 | F.hasFnAttribute(Attribute::Cold)) | |||
536 | F.setSectionPrefix("unlikely"); | |||
537 | else if (ProfileUnknownInSpecialSection && PSI->hasPartialSampleProfile() && | |||
538 | PSI->isFunctionHotnessUnknown(F)) | |||
539 | F.setSectionPrefix("unknown"); | |||
540 | } | |||
541 | ||||
542 | /// This optimization identifies DIV instructions that can be | |||
543 | /// profitably bypassed and carried out with a shorter, faster divide. | |||
544 | if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI->isSlowDivBypassed()) { | |||
545 | const DenseMap<unsigned int, unsigned int> &BypassWidths = | |||
546 | TLI->getBypassSlowDivWidths(); | |||
547 | BasicBlock *BB = &*F.begin(); | |||
548 | while (BB != nullptr) { | |||
549 | // bypassSlowDivision may create new BBs, but we don't want to reapply the | |||
550 | // optimization to those blocks. | |||
551 | BasicBlock *Next = BB->getNextNode(); | |||
552 | // F.hasOptSize is already checked in the outer if statement. | |||
553 | if (!llvm::shouldOptimizeForSize(BB, PSI, BFI.get())) | |||
554 | EverMadeChange |= bypassSlowDivision(BB, BypassWidths); | |||
555 | BB = Next; | |||
556 | } | |||
557 | } | |||
558 | ||||
559 | // Get rid of @llvm.assume builtins before attempting to eliminate empty | |||
560 | // blocks, since there might be blocks that only contain @llvm.assume calls | |||
561 | // (plus arguments that we can get rid of). | |||
562 | EverMadeChange |= eliminateAssumptions(F); | |||
563 | ||||
564 | // Eliminate blocks that contain only PHI nodes and an | |||
565 | // unconditional branch. | |||
566 | EverMadeChange |= eliminateMostlyEmptyBlocks(F); | |||
567 | ||||
568 | ModifyDT ModifiedDT = ModifyDT::NotModifyDT; | |||
569 | if (!DisableBranchOpts) | |||
570 | EverMadeChange |= splitBranchCondition(F, ModifiedDT); | |||
571 | ||||
572 | // Split some critical edges where one of the sources is an indirect branch, | |||
573 | // to help generate sane code for PHIs involving such edges. | |||
574 | EverMadeChange |= | |||
575 | SplitIndirectBrCriticalEdges(F, /*IgnoreBlocksWithoutPHI=*/true); | |||
576 | ||||
577 | // If we are optimzing huge function, we need to consider the build time. | |||
578 | // Because the basic algorithm's complex is near O(N!). | |||
579 | IsHugeFunc = F.size() > HugeFuncThresholdInCGPP; | |||
580 | ||||
581 | bool MadeChange = true; | |||
582 | bool FuncIterated = false; | |||
583 | while (MadeChange) { | |||
584 | MadeChange = false; | |||
585 | DT.reset(); | |||
586 | ||||
587 | for (BasicBlock &BB : llvm::make_early_inc_range(F)) { | |||
588 | if (FuncIterated && !FreshBBs.contains(&BB)) | |||
589 | continue; | |||
590 | ||||
591 | ModifyDT ModifiedDTOnIteration = ModifyDT::NotModifyDT; | |||
592 | bool Changed = optimizeBlock(BB, ModifiedDTOnIteration); | |||
593 | ||||
594 | MadeChange |= Changed; | |||
595 | if (IsHugeFunc) { | |||
596 | // If the BB is updated, it may still has chance to be optimized. | |||
597 | // This usually happen at sink optimization. | |||
598 | // For example: | |||
599 | // | |||
600 | // bb0: | |||
601 | // %and = and i32 %a, 4 | |||
602 | // %cmp = icmp eq i32 %and, 0 | |||
603 | // | |||
604 | // If the %cmp sink to other BB, the %and will has chance to sink. | |||
605 | if (Changed) | |||
606 | FreshBBs.insert(&BB); | |||
607 | else if (FuncIterated) | |||
608 | FreshBBs.erase(&BB); | |||
609 | ||||
610 | if (ModifiedDTOnIteration == ModifyDT::ModifyBBDT) | |||
611 | DT.reset(); | |||
612 | } else { | |||
613 | // For small/normal functions, we restart BB iteration if the dominator | |||
614 | // tree of the Function was changed. | |||
615 | if (ModifiedDTOnIteration != ModifyDT::NotModifyDT) | |||
616 | break; | |||
617 | } | |||
618 | } | |||
619 | // We have iterated all the BB in the (only work for huge) function. | |||
620 | FuncIterated = IsHugeFunc; | |||
621 | ||||
622 | if (EnableTypePromotionMerge && !ValToSExtendedUses.empty()) | |||
623 | MadeChange |= mergeSExts(F); | |||
624 | if (!LargeOffsetGEPMap.empty()) | |||
625 | MadeChange |= splitLargeGEPOffsets(); | |||
626 | MadeChange |= optimizePhiTypes(F); | |||
627 | ||||
628 | if (MadeChange) | |||
629 | eliminateFallThrough(F); | |||
630 | ||||
631 | // Really free removed instructions during promotion. | |||
632 | for (Instruction *I : RemovedInsts) | |||
633 | I->deleteValue(); | |||
634 | ||||
635 | EverMadeChange |= MadeChange; | |||
636 | SeenChainsForSExt.clear(); | |||
637 | ValToSExtendedUses.clear(); | |||
638 | RemovedInsts.clear(); | |||
639 | LargeOffsetGEPMap.clear(); | |||
640 | LargeOffsetGEPID.clear(); | |||
641 | } | |||
642 | ||||
643 | NewGEPBases.clear(); | |||
644 | SunkAddrs.clear(); | |||
645 | ||||
646 | if (!DisableBranchOpts) { | |||
647 | MadeChange = false; | |||
648 | // Use a set vector to get deterministic iteration order. The order the | |||
649 | // blocks are removed may affect whether or not PHI nodes in successors | |||
650 | // are removed. | |||
651 | SmallSetVector<BasicBlock *, 8> WorkList; | |||
652 | for (BasicBlock &BB : F) { | |||
653 | SmallVector<BasicBlock *, 2> Successors(successors(&BB)); | |||
654 | MadeChange |= ConstantFoldTerminator(&BB, true); | |||
655 | if (!MadeChange) | |||
656 | continue; | |||
657 | ||||
658 | for (BasicBlock *Succ : Successors) | |||
659 | if (pred_empty(Succ)) | |||
660 | WorkList.insert(Succ); | |||
661 | } | |||
662 | ||||
663 | // Delete the dead blocks and any of their dead successors. | |||
664 | MadeChange |= !WorkList.empty(); | |||
665 | while (!WorkList.empty()) { | |||
666 | BasicBlock *BB = WorkList.pop_back_val(); | |||
667 | SmallVector<BasicBlock *, 2> Successors(successors(BB)); | |||
668 | ||||
669 | DeleteDeadBlock(BB); | |||
670 | ||||
671 | for (BasicBlock *Succ : Successors) | |||
672 | if (pred_empty(Succ)) | |||
673 | WorkList.insert(Succ); | |||
674 | } | |||
675 | ||||
676 | // Merge pairs of basic blocks with unconditional branches, connected by | |||
677 | // a single edge. | |||
678 | if (EverMadeChange || MadeChange) | |||
679 | MadeChange |= eliminateFallThrough(F); | |||
680 | ||||
681 | EverMadeChange |= MadeChange; | |||
682 | } | |||
683 | ||||
684 | if (!DisableGCOpts) { | |||
685 | SmallVector<GCStatepointInst *, 2> Statepoints; | |||
686 | for (BasicBlock &BB : F) | |||
687 | for (Instruction &I : BB) | |||
688 | if (auto *SP = dyn_cast<GCStatepointInst>(&I)) | |||
689 | Statepoints.push_back(SP); | |||
690 | for (auto &I : Statepoints) | |||
691 | EverMadeChange |= simplifyOffsetableRelocate(*I); | |||
692 | } | |||
693 | ||||
694 | // Do this last to clean up use-before-def scenarios introduced by other | |||
695 | // preparatory transforms. | |||
696 | EverMadeChange |= placeDbgValues(F); | |||
697 | EverMadeChange |= placePseudoProbes(F); | |||
698 | ||||
699 | #ifndef NDEBUG | |||
700 | if (VerifyBFIUpdates) | |||
701 | verifyBFIUpdates(F); | |||
702 | #endif | |||
703 | ||||
704 | return EverMadeChange; | |||
705 | } | |||
706 | ||||
707 | bool CodeGenPrepare::eliminateAssumptions(Function &F) { | |||
708 | bool MadeChange = false; | |||
709 | for (BasicBlock &BB : F) { | |||
710 | CurInstIterator = BB.begin(); | |||
711 | while (CurInstIterator != BB.end()) { | |||
712 | Instruction *I = &*(CurInstIterator++); | |||
713 | if (auto *Assume = dyn_cast<AssumeInst>(I)) { | |||
714 | MadeChange = true; | |||
715 | Value *Operand = Assume->getOperand(0); | |||
716 | Assume->eraseFromParent(); | |||
717 | ||||
718 | resetIteratorIfInvalidatedWhileCalling(&BB, [&]() { | |||
719 | RecursivelyDeleteTriviallyDeadInstructions(Operand, TLInfo, nullptr); | |||
720 | }); | |||
721 | } | |||
722 | } | |||
723 | } | |||
724 | return MadeChange; | |||
725 | } | |||
726 | ||||
727 | /// An instruction is about to be deleted, so remove all references to it in our | |||
728 | /// GEP-tracking data strcutures. | |||
729 | void CodeGenPrepare::removeAllAssertingVHReferences(Value *V) { | |||
730 | LargeOffsetGEPMap.erase(V); | |||
731 | NewGEPBases.erase(V); | |||
732 | ||||
733 | auto GEP = dyn_cast<GetElementPtrInst>(V); | |||
734 | if (!GEP) | |||
735 | return; | |||
736 | ||||
737 | LargeOffsetGEPID.erase(GEP); | |||
738 | ||||
739 | auto VecI = LargeOffsetGEPMap.find(GEP->getPointerOperand()); | |||
740 | if (VecI == LargeOffsetGEPMap.end()) | |||
741 | return; | |||
742 | ||||
743 | auto &GEPVector = VecI->second; | |||
744 | llvm::erase_if(GEPVector, [=](auto &Elt) { return Elt.first == GEP; }); | |||
745 | ||||
746 | if (GEPVector.empty()) | |||
747 | LargeOffsetGEPMap.erase(VecI); | |||
748 | } | |||
749 | ||||
750 | // Verify BFI has been updated correctly by recomputing BFI and comparing them. | |||
751 | void LLVM_ATTRIBUTE_UNUSED__attribute__((__unused__)) CodeGenPrepare::verifyBFIUpdates(Function &F) { | |||
752 | DominatorTree NewDT(F); | |||
753 | LoopInfo NewLI(NewDT); | |||
754 | BranchProbabilityInfo NewBPI(F, NewLI, TLInfo); | |||
755 | BlockFrequencyInfo NewBFI(F, NewBPI, NewLI); | |||
756 | NewBFI.verifyMatch(*BFI); | |||
757 | } | |||
758 | ||||
759 | /// Merge basic blocks which are connected by a single edge, where one of the | |||
760 | /// basic blocks has a single successor pointing to the other basic block, | |||
761 | /// which has a single predecessor. | |||
762 | bool CodeGenPrepare::eliminateFallThrough(Function &F) { | |||
763 | bool Changed = false; | |||
764 | // Scan all of the blocks in the function, except for the entry block. | |||
765 | // Use a temporary array to avoid iterator being invalidated when | |||
766 | // deleting blocks. | |||
767 | SmallVector<WeakTrackingVH, 16> Blocks; | |||
768 | for (auto &Block : llvm::drop_begin(F)) | |||
769 | Blocks.push_back(&Block); | |||
770 | ||||
771 | SmallSet<WeakTrackingVH, 16> Preds; | |||
772 | for (auto &Block : Blocks) { | |||
773 | auto *BB = cast_or_null<BasicBlock>(Block); | |||
774 | if (!BB) | |||
775 | continue; | |||
776 | // If the destination block has a single pred, then this is a trivial | |||
777 | // edge, just collapse it. | |||
778 | BasicBlock *SinglePred = BB->getSinglePredecessor(); | |||
779 | ||||
780 | // Don't merge if BB's address is taken. | |||
781 | if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) | |||
782 | continue; | |||
783 | ||||
784 | BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator()); | |||
785 | if (Term && !Term->isConditional()) { | |||
786 | Changed = true; | |||
787 | LLVM_DEBUG(dbgs() << "To merge:\n" << *BB << "\n\n\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "To merge:\n" << * BB << "\n\n\n"; } } while (false); | |||
788 | ||||
789 | // Merge BB into SinglePred and delete it. | |||
790 | MergeBlockIntoPredecessor(BB); | |||
791 | Preds.insert(SinglePred); | |||
792 | ||||
793 | if (IsHugeFunc) { | |||
794 | // Update FreshBBs to optimize the merged BB. | |||
795 | FreshBBs.insert(SinglePred); | |||
796 | FreshBBs.erase(BB); | |||
797 | } | |||
798 | } | |||
799 | } | |||
800 | ||||
801 | // (Repeatedly) merging blocks into their predecessors can create redundant | |||
802 | // debug intrinsics. | |||
803 | for (const auto &Pred : Preds) | |||
804 | if (auto *BB = cast_or_null<BasicBlock>(Pred)) | |||
805 | RemoveRedundantDbgInstrs(BB); | |||
806 | ||||
807 | return Changed; | |||
808 | } | |||
809 | ||||
810 | /// Find a destination block from BB if BB is mergeable empty block. | |||
811 | BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) { | |||
812 | // If this block doesn't end with an uncond branch, ignore it. | |||
813 | BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); | |||
814 | if (!BI || !BI->isUnconditional()) | |||
815 | return nullptr; | |||
816 | ||||
817 | // If the instruction before the branch (skipping debug info) isn't a phi | |||
818 | // node, then other stuff is happening here. | |||
819 | BasicBlock::iterator BBI = BI->getIterator(); | |||
820 | if (BBI != BB->begin()) { | |||
821 | --BBI; | |||
822 | while (isa<DbgInfoIntrinsic>(BBI)) { | |||
823 | if (BBI == BB->begin()) | |||
824 | break; | |||
825 | --BBI; | |||
826 | } | |||
827 | if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) | |||
828 | return nullptr; | |||
829 | } | |||
830 | ||||
831 | // Do not break infinite loops. | |||
832 | BasicBlock *DestBB = BI->getSuccessor(0); | |||
833 | if (DestBB == BB) | |||
834 | return nullptr; | |||
835 | ||||
836 | if (!canMergeBlocks(BB, DestBB)) | |||
837 | DestBB = nullptr; | |||
838 | ||||
839 | return DestBB; | |||
840 | } | |||
841 | ||||
842 | /// Eliminate blocks that contain only PHI nodes, debug info directives, and an | |||
843 | /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split | |||
844 | /// edges in ways that are non-optimal for isel. Start by eliminating these | |||
845 | /// blocks so we can split them the way we want them. | |||
846 | bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) { | |||
847 | SmallPtrSet<BasicBlock *, 16> Preheaders; | |||
848 | SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end()); | |||
849 | while (!LoopList.empty()) { | |||
850 | Loop *L = LoopList.pop_back_val(); | |||
851 | llvm::append_range(LoopList, *L); | |||
852 | if (BasicBlock *Preheader = L->getLoopPreheader()) | |||
853 | Preheaders.insert(Preheader); | |||
854 | } | |||
855 | ||||
856 | bool MadeChange = false; | |||
857 | // Copy blocks into a temporary array to avoid iterator invalidation issues | |||
858 | // as we remove them. | |||
859 | // Note that this intentionally skips the entry block. | |||
860 | SmallVector<WeakTrackingVH, 16> Blocks; | |||
861 | for (auto &Block : llvm::drop_begin(F)) | |||
862 | Blocks.push_back(&Block); | |||
863 | ||||
864 | for (auto &Block : Blocks) { | |||
865 | BasicBlock *BB = cast_or_null<BasicBlock>(Block); | |||
866 | if (!BB) | |||
867 | continue; | |||
868 | BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB); | |||
869 | if (!DestBB || | |||
870 | !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB))) | |||
871 | continue; | |||
872 | ||||
873 | eliminateMostlyEmptyBlock(BB); | |||
874 | MadeChange = true; | |||
875 | } | |||
876 | return MadeChange; | |||
877 | } | |||
878 | ||||
879 | bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB, | |||
880 | BasicBlock *DestBB, | |||
881 | bool isPreheader) { | |||
882 | // Do not delete loop preheaders if doing so would create a critical edge. | |||
883 | // Loop preheaders can be good locations to spill registers. If the | |||
884 | // preheader is deleted and we create a critical edge, registers may be | |||
885 | // spilled in the loop body instead. | |||
886 | if (!DisablePreheaderProtect && isPreheader && | |||
887 | !(BB->getSinglePredecessor() && | |||
888 | BB->getSinglePredecessor()->getSingleSuccessor())) | |||
889 | return false; | |||
890 | ||||
891 | // Skip merging if the block's successor is also a successor to any callbr | |||
892 | // that leads to this block. | |||
893 | // FIXME: Is this really needed? Is this a correctness issue? | |||
894 | for (BasicBlock *Pred : predecessors(BB)) { | |||
895 | if (auto *CBI = dyn_cast<CallBrInst>((Pred)->getTerminator())) | |||
896 | for (unsigned i = 0, e = CBI->getNumSuccessors(); i != e; ++i) | |||
897 | if (DestBB == CBI->getSuccessor(i)) | |||
898 | return false; | |||
899 | } | |||
900 | ||||
901 | // Try to skip merging if the unique predecessor of BB is terminated by a | |||
902 | // switch or indirect branch instruction, and BB is used as an incoming block | |||
903 | // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to | |||
904 | // add COPY instructions in the predecessor of BB instead of BB (if it is not | |||
905 | // merged). Note that the critical edge created by merging such blocks wont be | |||
906 | // split in MachineSink because the jump table is not analyzable. By keeping | |||
907 | // such empty block (BB), ISel will place COPY instructions in BB, not in the | |||
908 | // predecessor of BB. | |||
909 | BasicBlock *Pred = BB->getUniquePredecessor(); | |||
910 | if (!Pred || !(isa<SwitchInst>(Pred->getTerminator()) || | |||
911 | isa<IndirectBrInst>(Pred->getTerminator()))) | |||
912 | return true; | |||
913 | ||||
914 | if (BB->getTerminator() != BB->getFirstNonPHIOrDbg()) | |||
915 | return true; | |||
916 | ||||
917 | // We use a simple cost heuristic which determine skipping merging is | |||
918 | // profitable if the cost of skipping merging is less than the cost of | |||
919 | // merging : Cost(skipping merging) < Cost(merging BB), where the | |||
920 | // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and | |||
921 | // the Cost(merging BB) is Freq(Pred) * Cost(Copy). | |||
922 | // Assuming Cost(Copy) == Cost(Branch), we could simplify it to : | |||
923 | // Freq(Pred) / Freq(BB) > 2. | |||
924 | // Note that if there are multiple empty blocks sharing the same incoming | |||
925 | // value for the PHIs in the DestBB, we consider them together. In such | |||
926 | // case, Cost(merging BB) will be the sum of their frequencies. | |||
927 | ||||
928 | if (!isa<PHINode>(DestBB->begin())) | |||
929 | return true; | |||
930 | ||||
931 | SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs; | |||
932 | ||||
933 | // Find all other incoming blocks from which incoming values of all PHIs in | |||
934 | // DestBB are the same as the ones from BB. | |||
935 | for (BasicBlock *DestBBPred : predecessors(DestBB)) { | |||
936 | if (DestBBPred == BB) | |||
937 | continue; | |||
938 | ||||
939 | if (llvm::all_of(DestBB->phis(), [&](const PHINode &DestPN) { | |||
940 | return DestPN.getIncomingValueForBlock(BB) == | |||
941 | DestPN.getIncomingValueForBlock(DestBBPred); | |||
942 | })) | |||
943 | SameIncomingValueBBs.insert(DestBBPred); | |||
944 | } | |||
945 | ||||
946 | // See if all BB's incoming values are same as the value from Pred. In this | |||
947 | // case, no reason to skip merging because COPYs are expected to be place in | |||
948 | // Pred already. | |||
949 | if (SameIncomingValueBBs.count(Pred)) | |||
950 | return true; | |||
951 | ||||
952 | BlockFrequency PredFreq = BFI->getBlockFreq(Pred); | |||
953 | BlockFrequency BBFreq = BFI->getBlockFreq(BB); | |||
954 | ||||
955 | for (auto *SameValueBB : SameIncomingValueBBs) | |||
956 | if (SameValueBB->getUniquePredecessor() == Pred && | |||
957 | DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB)) | |||
958 | BBFreq += BFI->getBlockFreq(SameValueBB); | |||
959 | ||||
960 | return PredFreq.getFrequency() <= | |||
961 | BBFreq.getFrequency() * FreqRatioToSkipMerge; | |||
962 | } | |||
963 | ||||
964 | /// Return true if we can merge BB into DestBB if there is a single | |||
965 | /// unconditional branch between them, and BB contains no other non-phi | |||
966 | /// instructions. | |||
967 | bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB, | |||
968 | const BasicBlock *DestBB) const { | |||
969 | // We only want to eliminate blocks whose phi nodes are used by phi nodes in | |||
970 | // the successor. If there are more complex condition (e.g. preheaders), | |||
971 | // don't mess around with them. | |||
972 | for (const PHINode &PN : BB->phis()) { | |||
973 | for (const User *U : PN.users()) { | |||
974 | const Instruction *UI = cast<Instruction>(U); | |||
975 | if (UI->getParent() != DestBB || !isa<PHINode>(UI)) | |||
976 | return false; | |||
977 | // If User is inside DestBB block and it is a PHINode then check | |||
978 | // incoming value. If incoming value is not from BB then this is | |||
979 | // a complex condition (e.g. preheaders) we want to avoid here. | |||
980 | if (UI->getParent() == DestBB) { | |||
981 | if (const PHINode *UPN = dyn_cast<PHINode>(UI)) | |||
982 | for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { | |||
983 | Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); | |||
984 | if (Insn && Insn->getParent() == BB && | |||
985 | Insn->getParent() != UPN->getIncomingBlock(I)) | |||
986 | return false; | |||
987 | } | |||
988 | } | |||
989 | } | |||
990 | } | |||
991 | ||||
992 | // If BB and DestBB contain any common predecessors, then the phi nodes in BB | |||
993 | // and DestBB may have conflicting incoming values for the block. If so, we | |||
994 | // can't merge the block. | |||
995 | const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); | |||
996 | if (!DestBBPN) | |||
997 | return true; // no conflict. | |||
998 | ||||
999 | // Collect the preds of BB. | |||
1000 | SmallPtrSet<const BasicBlock *, 16> BBPreds; | |||
1001 | if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { | |||
1002 | // It is faster to get preds from a PHI than with pred_iterator. | |||
1003 | for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) | |||
1004 | BBPreds.insert(BBPN->getIncomingBlock(i)); | |||
1005 | } else { | |||
1006 | BBPreds.insert(pred_begin(BB), pred_end(BB)); | |||
1007 | } | |||
1008 | ||||
1009 | // Walk the preds of DestBB. | |||
1010 | for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { | |||
1011 | BasicBlock *Pred = DestBBPN->getIncomingBlock(i); | |||
1012 | if (BBPreds.count(Pred)) { // Common predecessor? | |||
1013 | for (const PHINode &PN : DestBB->phis()) { | |||
1014 | const Value *V1 = PN.getIncomingValueForBlock(Pred); | |||
1015 | const Value *V2 = PN.getIncomingValueForBlock(BB); | |||
1016 | ||||
1017 | // If V2 is a phi node in BB, look up what the mapped value will be. | |||
1018 | if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) | |||
1019 | if (V2PN->getParent() == BB) | |||
1020 | V2 = V2PN->getIncomingValueForBlock(Pred); | |||
1021 | ||||
1022 | // If there is a conflict, bail out. | |||
1023 | if (V1 != V2) | |||
1024 | return false; | |||
1025 | } | |||
1026 | } | |||
1027 | } | |||
1028 | ||||
1029 | return true; | |||
1030 | } | |||
1031 | ||||
1032 | /// Replace all old uses with new ones, and push the updated BBs into FreshBBs. | |||
1033 | static void replaceAllUsesWith(Value *Old, Value *New, | |||
1034 | SmallSet<BasicBlock *, 32> &FreshBBs, | |||
1035 | bool IsHuge) { | |||
1036 | auto *OldI = dyn_cast<Instruction>(Old); | |||
1037 | if (OldI) { | |||
1038 | for (Value::user_iterator UI = OldI->user_begin(), E = OldI->user_end(); | |||
1039 | UI != E; ++UI) { | |||
1040 | Instruction *User = cast<Instruction>(*UI); | |||
1041 | if (IsHuge) | |||
1042 | FreshBBs.insert(User->getParent()); | |||
1043 | } | |||
1044 | } | |||
1045 | Old->replaceAllUsesWith(New); | |||
1046 | } | |||
1047 | ||||
1048 | /// Eliminate a basic block that has only phi's and an unconditional branch in | |||
1049 | /// it. | |||
1050 | void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) { | |||
1051 | BranchInst *BI = cast<BranchInst>(BB->getTerminator()); | |||
1052 | BasicBlock *DestBB = BI->getSuccessor(0); | |||
1053 | ||||
1054 | LLVM_DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB; } } while (false) | |||
1055 | << *BB << *DestBB)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB; } } while (false); | |||
1056 | ||||
1057 | // If the destination block has a single pred, then this is a trivial edge, | |||
1058 | // just collapse it. | |||
1059 | if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { | |||
1060 | if (SinglePred != DestBB) { | |||
1061 | assert(SinglePred == BB &&(static_cast <bool> (SinglePred == BB && "Single predecessor not the same as predecessor" ) ? void (0) : __assert_fail ("SinglePred == BB && \"Single predecessor not the same as predecessor\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1062, __extension__ __PRETTY_FUNCTION__ )) | |||
1062 | "Single predecessor not the same as predecessor")(static_cast <bool> (SinglePred == BB && "Single predecessor not the same as predecessor" ) ? void (0) : __assert_fail ("SinglePred == BB && \"Single predecessor not the same as predecessor\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1062, __extension__ __PRETTY_FUNCTION__ )); | |||
1063 | // Merge DestBB into SinglePred/BB and delete it. | |||
1064 | MergeBlockIntoPredecessor(DestBB); | |||
1065 | // Note: BB(=SinglePred) will not be deleted on this path. | |||
1066 | // DestBB(=its single successor) is the one that was deleted. | |||
1067 | LLVM_DEBUG(dbgs() << "AFTER:\n" << *SinglePred << "\n\n\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "AFTER:\n" << *SinglePred << "\n\n\n"; } } while (false); | |||
1068 | ||||
1069 | if (IsHugeFunc) { | |||
1070 | // Update FreshBBs to optimize the merged BB. | |||
1071 | FreshBBs.insert(SinglePred); | |||
1072 | FreshBBs.erase(DestBB); | |||
1073 | } | |||
1074 | return; | |||
1075 | } | |||
1076 | } | |||
1077 | ||||
1078 | // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB | |||
1079 | // to handle the new incoming edges it is about to have. | |||
1080 | for (PHINode &PN : DestBB->phis()) { | |||
1081 | // Remove the incoming value for BB, and remember it. | |||
1082 | Value *InVal = PN.removeIncomingValue(BB, false); | |||
1083 | ||||
1084 | // Two options: either the InVal is a phi node defined in BB or it is some | |||
1085 | // value that dominates BB. | |||
1086 | PHINode *InValPhi = dyn_cast<PHINode>(InVal); | |||
1087 | if (InValPhi && InValPhi->getParent() == BB) { | |||
1088 | // Add all of the input values of the input PHI as inputs of this phi. | |||
1089 | for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) | |||
1090 | PN.addIncoming(InValPhi->getIncomingValue(i), | |||
1091 | InValPhi->getIncomingBlock(i)); | |||
1092 | } else { | |||
1093 | // Otherwise, add one instance of the dominating value for each edge that | |||
1094 | // we will be adding. | |||
1095 | if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { | |||
1096 | for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) | |||
1097 | PN.addIncoming(InVal, BBPN->getIncomingBlock(i)); | |||
1098 | } else { | |||
1099 | for (BasicBlock *Pred : predecessors(BB)) | |||
1100 | PN.addIncoming(InVal, Pred); | |||
1101 | } | |||
1102 | } | |||
1103 | } | |||
1104 | ||||
1105 | // The PHIs are now updated, change everything that refers to BB to use | |||
1106 | // DestBB and remove BB. | |||
1107 | BB->replaceAllUsesWith(DestBB); | |||
1108 | BB->eraseFromParent(); | |||
1109 | ++NumBlocksElim; | |||
1110 | ||||
1111 | LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"; } } while (false); | |||
1112 | } | |||
1113 | ||||
1114 | // Computes a map of base pointer relocation instructions to corresponding | |||
1115 | // derived pointer relocation instructions given a vector of all relocate calls | |||
1116 | static void computeBaseDerivedRelocateMap( | |||
1117 | const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls, | |||
1118 | DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> | |||
1119 | &RelocateInstMap) { | |||
1120 | // Collect information in two maps: one primarily for locating the base object | |||
1121 | // while filling the second map; the second map is the final structure holding | |||
1122 | // a mapping between Base and corresponding Derived relocate calls | |||
1123 | DenseMap<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap; | |||
1124 | for (auto *ThisRelocate : AllRelocateCalls) { | |||
1125 | auto K = std::make_pair(ThisRelocate->getBasePtrIndex(), | |||
1126 | ThisRelocate->getDerivedPtrIndex()); | |||
1127 | RelocateIdxMap.insert(std::make_pair(K, ThisRelocate)); | |||
1128 | } | |||
1129 | for (auto &Item : RelocateIdxMap) { | |||
1130 | std::pair<unsigned, unsigned> Key = Item.first; | |||
1131 | if (Key.first == Key.second) | |||
1132 | // Base relocation: nothing to insert | |||
1133 | continue; | |||
1134 | ||||
1135 | GCRelocateInst *I = Item.second; | |||
1136 | auto BaseKey = std::make_pair(Key.first, Key.first); | |||
1137 | ||||
1138 | // We're iterating over RelocateIdxMap so we cannot modify it. | |||
1139 | auto MaybeBase = RelocateIdxMap.find(BaseKey); | |||
1140 | if (MaybeBase == RelocateIdxMap.end()) | |||
1141 | // TODO: We might want to insert a new base object relocate and gep off | |||
1142 | // that, if there are enough derived object relocates. | |||
1143 | continue; | |||
1144 | ||||
1145 | RelocateInstMap[MaybeBase->second].push_back(I); | |||
1146 | } | |||
1147 | } | |||
1148 | ||||
1149 | // Accepts a GEP and extracts the operands into a vector provided they're all | |||
1150 | // small integer constants | |||
1151 | static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP, | |||
1152 | SmallVectorImpl<Value *> &OffsetV) { | |||
1153 | for (unsigned i = 1; i < GEP->getNumOperands(); i++) { | |||
1154 | // Only accept small constant integer operands | |||
1155 | auto *Op = dyn_cast<ConstantInt>(GEP->getOperand(i)); | |||
1156 | if (!Op || Op->getZExtValue() > 20) | |||
1157 | return false; | |||
1158 | } | |||
1159 | ||||
1160 | for (unsigned i = 1; i < GEP->getNumOperands(); i++) | |||
1161 | OffsetV.push_back(GEP->getOperand(i)); | |||
1162 | return true; | |||
1163 | } | |||
1164 | ||||
1165 | // Takes a RelocatedBase (base pointer relocation instruction) and Targets to | |||
1166 | // replace, computes a replacement, and affects it. | |||
1167 | static bool | |||
1168 | simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase, | |||
1169 | const SmallVectorImpl<GCRelocateInst *> &Targets) { | |||
1170 | bool MadeChange = false; | |||
1171 | // We must ensure the relocation of derived pointer is defined after | |||
1172 | // relocation of base pointer. If we find a relocation corresponding to base | |||
1173 | // defined earlier than relocation of base then we move relocation of base | |||
1174 | // right before found relocation. We consider only relocation in the same | |||
1175 | // basic block as relocation of base. Relocations from other basic block will | |||
1176 | // be skipped by optimization and we do not care about them. | |||
1177 | for (auto R = RelocatedBase->getParent()->getFirstInsertionPt(); | |||
1178 | &*R != RelocatedBase; ++R) | |||
1179 | if (auto *RI = dyn_cast<GCRelocateInst>(R)) | |||
1180 | if (RI->getStatepoint() == RelocatedBase->getStatepoint()) | |||
1181 | if (RI->getBasePtrIndex() == RelocatedBase->getBasePtrIndex()) { | |||
1182 | RelocatedBase->moveBefore(RI); | |||
1183 | break; | |||
1184 | } | |||
1185 | ||||
1186 | for (GCRelocateInst *ToReplace : Targets) { | |||
1187 | assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() &&(static_cast <bool> (ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() && "Not relocating a derived object of the original base object" ) ? void (0) : __assert_fail ("ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() && \"Not relocating a derived object of the original base object\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1188, __extension__ __PRETTY_FUNCTION__ )) | |||
1188 | "Not relocating a derived object of the original base object")(static_cast <bool> (ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() && "Not relocating a derived object of the original base object" ) ? void (0) : __assert_fail ("ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() && \"Not relocating a derived object of the original base object\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1188, __extension__ __PRETTY_FUNCTION__ )); | |||
1189 | if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) { | |||
1190 | // A duplicate relocate call. TODO: coalesce duplicates. | |||
1191 | continue; | |||
1192 | } | |||
1193 | ||||
1194 | if (RelocatedBase->getParent() != ToReplace->getParent()) { | |||
1195 | // Base and derived relocates are in different basic blocks. | |||
1196 | // In this case transform is only valid when base dominates derived | |||
1197 | // relocate. However it would be too expensive to check dominance | |||
1198 | // for each such relocate, so we skip the whole transformation. | |||
1199 | continue; | |||
1200 | } | |||
1201 | ||||
1202 | Value *Base = ToReplace->getBasePtr(); | |||
1203 | auto *Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr()); | |||
1204 | if (!Derived
| |||
1205 | continue; | |||
1206 | ||||
1207 | SmallVector<Value *, 2> OffsetV; | |||
1208 | if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV)) | |||
1209 | continue; | |||
1210 | ||||
1211 | // Create a Builder and replace the target callsite with a gep | |||
1212 | assert(RelocatedBase->getNextNode() &&(static_cast <bool> (RelocatedBase->getNextNode() && "Should always have one since it's not a terminator") ? void (0) : __assert_fail ("RelocatedBase->getNextNode() && \"Should always have one since it's not a terminator\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1213, __extension__ __PRETTY_FUNCTION__ )) | |||
1213 | "Should always have one since it's not a terminator")(static_cast <bool> (RelocatedBase->getNextNode() && "Should always have one since it's not a terminator") ? void (0) : __assert_fail ("RelocatedBase->getNextNode() && \"Should always have one since it's not a terminator\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1213, __extension__ __PRETTY_FUNCTION__ )); | |||
1214 | ||||
1215 | // Insert after RelocatedBase | |||
1216 | IRBuilder<> Builder(RelocatedBase->getNextNode()); | |||
1217 | Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc()); | |||
1218 | ||||
1219 | // If gc_relocate does not match the actual type, cast it to the right type. | |||
1220 | // In theory, there must be a bitcast after gc_relocate if the type does not | |||
1221 | // match, and we should reuse it to get the derived pointer. But it could be | |||
1222 | // cases like this: | |||
1223 | // bb1: | |||
1224 | // ... | |||
1225 | // %g1 = call coldcc i8 addrspace(1)* | |||
1226 | // @llvm.experimental.gc.relocate.p1i8(...) br label %merge | |||
1227 | // | |||
1228 | // bb2: | |||
1229 | // ... | |||
1230 | // %g2 = call coldcc i8 addrspace(1)* | |||
1231 | // @llvm.experimental.gc.relocate.p1i8(...) br label %merge | |||
1232 | // | |||
1233 | // merge: | |||
1234 | // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ] | |||
1235 | // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)* | |||
1236 | // | |||
1237 | // In this case, we can not find the bitcast any more. So we insert a new | |||
1238 | // bitcast no matter there is already one or not. In this way, we can handle | |||
1239 | // all cases, and the extra bitcast should be optimized away in later | |||
1240 | // passes. | |||
1241 | Value *ActualRelocatedBase = RelocatedBase; | |||
1242 | if (RelocatedBase->getType() != Base->getType()) { | |||
| ||||
1243 | ActualRelocatedBase = | |||
1244 | Builder.CreateBitCast(RelocatedBase, Base->getType()); | |||
1245 | } | |||
1246 | Value *Replacement = | |||
1247 | Builder.CreateGEP(Derived->getSourceElementType(), ActualRelocatedBase, | |||
1248 | ArrayRef(OffsetV)); | |||
1249 | Replacement->takeName(ToReplace); | |||
1250 | // If the newly generated derived pointer's type does not match the original | |||
1251 | // derived pointer's type, cast the new derived pointer to match it. Same | |||
1252 | // reasoning as above. | |||
1253 | Value *ActualReplacement = Replacement; | |||
1254 | if (Replacement->getType() != ToReplace->getType()) { | |||
1255 | ActualReplacement = | |||
1256 | Builder.CreateBitCast(Replacement, ToReplace->getType()); | |||
1257 | } | |||
1258 | ToReplace->replaceAllUsesWith(ActualReplacement); | |||
1259 | ToReplace->eraseFromParent(); | |||
1260 | ||||
1261 | MadeChange = true; | |||
1262 | } | |||
1263 | return MadeChange; | |||
1264 | } | |||
1265 | ||||
1266 | // Turns this: | |||
1267 | // | |||
1268 | // %base = ... | |||
1269 | // %ptr = gep %base + 15 | |||
1270 | // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) | |||
1271 | // %base' = relocate(%tok, i32 4, i32 4) | |||
1272 | // %ptr' = relocate(%tok, i32 4, i32 5) | |||
1273 | // %val = load %ptr' | |||
1274 | // | |||
1275 | // into this: | |||
1276 | // | |||
1277 | // %base = ... | |||
1278 | // %ptr = gep %base + 15 | |||
1279 | // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) | |||
1280 | // %base' = gc.relocate(%tok, i32 4, i32 4) | |||
1281 | // %ptr' = gep %base' + 15 | |||
1282 | // %val = load %ptr' | |||
1283 | bool CodeGenPrepare::simplifyOffsetableRelocate(GCStatepointInst &I) { | |||
1284 | bool MadeChange = false; | |||
1285 | SmallVector<GCRelocateInst *, 2> AllRelocateCalls; | |||
1286 | for (auto *U : I.users()) | |||
1287 | if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U)) | |||
1288 | // Collect all the relocate calls associated with a statepoint | |||
1289 | AllRelocateCalls.push_back(Relocate); | |||
1290 | ||||
1291 | // We need at least one base pointer relocation + one derived pointer | |||
1292 | // relocation to mangle | |||
1293 | if (AllRelocateCalls.size() < 2) | |||
| ||||
1294 | return false; | |||
1295 | ||||
1296 | // RelocateInstMap is a mapping from the base relocate instruction to the | |||
1297 | // corresponding derived relocate instructions | |||
1298 | DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> RelocateInstMap; | |||
1299 | computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap); | |||
1300 | if (RelocateInstMap.empty()) | |||
1301 | return false; | |||
1302 | ||||
1303 | for (auto &Item : RelocateInstMap) | |||
1304 | // Item.first is the RelocatedBase to offset against | |||
1305 | // Item.second is the vector of Targets to replace | |||
1306 | MadeChange = simplifyRelocatesOffABase(Item.first, Item.second); | |||
1307 | return MadeChange; | |||
1308 | } | |||
1309 | ||||
1310 | /// Sink the specified cast instruction into its user blocks. | |||
1311 | static bool SinkCast(CastInst *CI) { | |||
1312 | BasicBlock *DefBB = CI->getParent(); | |||
1313 | ||||
1314 | /// InsertedCasts - Only insert a cast in each block once. | |||
1315 | DenseMap<BasicBlock *, CastInst *> InsertedCasts; | |||
1316 | ||||
1317 | bool MadeChange = false; | |||
1318 | for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); | |||
1319 | UI != E;) { | |||
1320 | Use &TheUse = UI.getUse(); | |||
1321 | Instruction *User = cast<Instruction>(*UI); | |||
1322 | ||||
1323 | // Figure out which BB this cast is used in. For PHI's this is the | |||
1324 | // appropriate predecessor block. | |||
1325 | BasicBlock *UserBB = User->getParent(); | |||
1326 | if (PHINode *PN = dyn_cast<PHINode>(User)) { | |||
1327 | UserBB = PN->getIncomingBlock(TheUse); | |||
1328 | } | |||
1329 | ||||
1330 | // Preincrement use iterator so we don't invalidate it. | |||
1331 | ++UI; | |||
1332 | ||||
1333 | // The first insertion point of a block containing an EH pad is after the | |||
1334 | // pad. If the pad is the user, we cannot sink the cast past the pad. | |||
1335 | if (User->isEHPad()) | |||
1336 | continue; | |||
1337 | ||||
1338 | // If the block selected to receive the cast is an EH pad that does not | |||
1339 | // allow non-PHI instructions before the terminator, we can't sink the | |||
1340 | // cast. | |||
1341 | if (UserBB->getTerminator()->isEHPad()) | |||
1342 | continue; | |||
1343 | ||||
1344 | // If this user is in the same block as the cast, don't change the cast. | |||
1345 | if (UserBB == DefBB) | |||
1346 | continue; | |||
1347 | ||||
1348 | // If we have already inserted a cast into this block, use it. | |||
1349 | CastInst *&InsertedCast = InsertedCasts[UserBB]; | |||
1350 | ||||
1351 | if (!InsertedCast) { | |||
1352 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); | |||
1353 | assert(InsertPt != UserBB->end())(static_cast <bool> (InsertPt != UserBB->end()) ? void (0) : __assert_fail ("InsertPt != UserBB->end()", "llvm/lib/CodeGen/CodeGenPrepare.cpp" , 1353, __extension__ __PRETTY_FUNCTION__)); | |||
1354 | InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0), | |||
1355 | CI->getType(), "", &*InsertPt); | |||
1356 | InsertedCast->setDebugLoc(CI->getDebugLoc()); | |||
1357 | } | |||
1358 | ||||
1359 | // Replace a use of the cast with a use of the new cast. | |||
1360 | TheUse = InsertedCast; | |||
1361 | MadeChange = true; | |||
1362 | ++NumCastUses; | |||
1363 | } | |||
1364 | ||||
1365 | // If we removed all uses, nuke the cast. | |||
1366 | if (CI->use_empty()) { | |||
1367 | salvageDebugInfo(*CI); | |||
1368 | CI->eraseFromParent(); | |||
1369 | MadeChange = true; | |||
1370 | } | |||
1371 | ||||
1372 | return MadeChange; | |||
1373 | } | |||
1374 | ||||
1375 | /// If the specified cast instruction is a noop copy (e.g. it's casting from | |||
1376 | /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to | |||
1377 | /// reduce the number of virtual registers that must be created and coalesced. | |||
1378 | /// | |||
1379 | /// Return true if any changes are made. | |||
1380 | static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI, | |||
1381 | const DataLayout &DL) { | |||
1382 | // Sink only "cheap" (or nop) address-space casts. This is a weaker condition | |||
1383 | // than sinking only nop casts, but is helpful on some platforms. | |||
1384 | if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) { | |||
1385 | if (!TLI.isFreeAddrSpaceCast(ASC->getSrcAddressSpace(), | |||
1386 | ASC->getDestAddressSpace())) | |||
1387 | return false; | |||
1388 | } | |||
1389 | ||||
1390 | // If this is a noop copy, | |||
1391 | EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType()); | |||
1392 | EVT DstVT = TLI.getValueType(DL, CI->getType()); | |||
1393 | ||||
1394 | // This is an fp<->int conversion? | |||
1395 | if (SrcVT.isInteger() != DstVT.isInteger()) | |||
1396 | return false; | |||
1397 | ||||
1398 | // If this is an extension, it will be a zero or sign extension, which | |||
1399 | // isn't a noop. | |||
1400 | if (SrcVT.bitsLT(DstVT)) | |||
1401 | return false; | |||
1402 | ||||
1403 | // If these values will be promoted, find out what they will be promoted | |||
1404 | // to. This helps us consider truncates on PPC as noop copies when they | |||
1405 | // are. | |||
1406 | if (TLI.getTypeAction(CI->getContext(), SrcVT) == | |||
1407 | TargetLowering::TypePromoteInteger) | |||
1408 | SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); | |||
1409 | if (TLI.getTypeAction(CI->getContext(), DstVT) == | |||
1410 | TargetLowering::TypePromoteInteger) | |||
1411 | DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); | |||
1412 | ||||
1413 | // If, after promotion, these are the same types, this is a noop copy. | |||
1414 | if (SrcVT != DstVT) | |||
1415 | return false; | |||
1416 | ||||
1417 | return SinkCast(CI); | |||
1418 | } | |||
1419 | ||||
1420 | // Match a simple increment by constant operation. Note that if a sub is | |||
1421 | // matched, the step is negated (as if the step had been canonicalized to | |||
1422 | // an add, even though we leave the instruction alone.) | |||
1423 | bool matchIncrement(const Instruction *IVInc, Instruction *&LHS, | |||
1424 | Constant *&Step) { | |||
1425 | if (match(IVInc, m_Add(m_Instruction(LHS), m_Constant(Step))) || | |||
1426 | match(IVInc, m_ExtractValue<0>(m_Intrinsic<Intrinsic::uadd_with_overflow>( | |||
1427 | m_Instruction(LHS), m_Constant(Step))))) | |||
1428 | return true; | |||
1429 | if (match(IVInc, m_Sub(m_Instruction(LHS), m_Constant(Step))) || | |||
1430 | match(IVInc, m_ExtractValue<0>(m_Intrinsic<Intrinsic::usub_with_overflow>( | |||
1431 | m_Instruction(LHS), m_Constant(Step))))) { | |||
1432 | Step = ConstantExpr::getNeg(Step); | |||
1433 | return true; | |||
1434 | } | |||
1435 | return false; | |||
1436 | } | |||
1437 | ||||
1438 | /// If given \p PN is an inductive variable with value IVInc coming from the | |||
1439 | /// backedge, and on each iteration it gets increased by Step, return pair | |||
1440 | /// <IVInc, Step>. Otherwise, return std::nullopt. | |||
1441 | static std::optional<std::pair<Instruction *, Constant *>> | |||
1442 | getIVIncrement(const PHINode *PN, const LoopInfo *LI) { | |||
1443 | const Loop *L = LI->getLoopFor(PN->getParent()); | |||
1444 | if (!L || L->getHeader() != PN->getParent() || !L->getLoopLatch()) | |||
1445 | return std::nullopt; | |||
1446 | auto *IVInc = | |||
1447 | dyn_cast<Instruction>(PN->getIncomingValueForBlock(L->getLoopLatch())); | |||
1448 | if (!IVInc || LI->getLoopFor(IVInc->getParent()) != L) | |||
1449 | return std::nullopt; | |||
1450 | Instruction *LHS = nullptr; | |||
1451 | Constant *Step = nullptr; | |||
1452 | if (matchIncrement(IVInc, LHS, Step) && LHS == PN) | |||
1453 | return std::make_pair(IVInc, Step); | |||
1454 | return std::nullopt; | |||
1455 | } | |||
1456 | ||||
1457 | static bool isIVIncrement(const Value *V, const LoopInfo *LI) { | |||
1458 | auto *I = dyn_cast<Instruction>(V); | |||
1459 | if (!I) | |||
1460 | return false; | |||
1461 | Instruction *LHS = nullptr; | |||
1462 | Constant *Step = nullptr; | |||
1463 | if (!matchIncrement(I, LHS, Step)) | |||
1464 | return false; | |||
1465 | if (auto *PN = dyn_cast<PHINode>(LHS)) | |||
1466 | if (auto IVInc = getIVIncrement(PN, LI)) | |||
1467 | return IVInc->first == I; | |||
1468 | return false; | |||
1469 | } | |||
1470 | ||||
1471 | bool CodeGenPrepare::replaceMathCmpWithIntrinsic(BinaryOperator *BO, | |||
1472 | Value *Arg0, Value *Arg1, | |||
1473 | CmpInst *Cmp, | |||
1474 | Intrinsic::ID IID) { | |||
1475 | auto IsReplacableIVIncrement = [this, &Cmp](BinaryOperator *BO) { | |||
1476 | if (!isIVIncrement(BO, LI)) | |||
1477 | return false; | |||
1478 | const Loop *L = LI->getLoopFor(BO->getParent()); | |||
1479 | assert(L && "L should not be null after isIVIncrement()")(static_cast <bool> (L && "L should not be null after isIVIncrement()" ) ? void (0) : __assert_fail ("L && \"L should not be null after isIVIncrement()\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1479, __extension__ __PRETTY_FUNCTION__ )); | |||
1480 | // Do not risk on moving increment into a child loop. | |||
1481 | if (LI->getLoopFor(Cmp->getParent()) != L) | |||
1482 | return false; | |||
1483 | ||||
1484 | // Finally, we need to ensure that the insert point will dominate all | |||
1485 | // existing uses of the increment. | |||
1486 | ||||
1487 | auto &DT = getDT(*BO->getParent()->getParent()); | |||
1488 | if (DT.dominates(Cmp->getParent(), BO->getParent())) | |||
1489 | // If we're moving up the dom tree, all uses are trivially dominated. | |||
1490 | // (This is the common case for code produced by LSR.) | |||
1491 | return true; | |||
1492 | ||||
1493 | // Otherwise, special case the single use in the phi recurrence. | |||
1494 | return BO->hasOneUse() && DT.dominates(Cmp->getParent(), L->getLoopLatch()); | |||
1495 | }; | |||
1496 | if (BO->getParent() != Cmp->getParent() && !IsReplacableIVIncrement(BO)) { | |||
1497 | // We used to use a dominator tree here to allow multi-block optimization. | |||
1498 | // But that was problematic because: | |||
1499 | // 1. It could cause a perf regression by hoisting the math op into the | |||
1500 | // critical path. | |||
1501 | // 2. It could cause a perf regression by creating a value that was live | |||
1502 | // across multiple blocks and increasing register pressure. | |||
1503 | // 3. Use of a dominator tree could cause large compile-time regression. | |||
1504 | // This is because we recompute the DT on every change in the main CGP | |||
1505 | // run-loop. The recomputing is probably unnecessary in many cases, so if | |||
1506 | // that was fixed, using a DT here would be ok. | |||
1507 | // | |||
1508 | // There is one important particular case we still want to handle: if BO is | |||
1509 | // the IV increment. Important properties that make it profitable: | |||
1510 | // - We can speculate IV increment anywhere in the loop (as long as the | |||
1511 | // indvar Phi is its only user); | |||
1512 | // - Upon computing Cmp, we effectively compute something equivalent to the | |||
1513 | // IV increment (despite it loops differently in the IR). So moving it up | |||
1514 | // to the cmp point does not really increase register pressure. | |||
1515 | return false; | |||
1516 | } | |||
1517 | ||||
1518 | // We allow matching the canonical IR (add X, C) back to (usubo X, -C). | |||
1519 | if (BO->getOpcode() == Instruction::Add && | |||
1520 | IID == Intrinsic::usub_with_overflow) { | |||
1521 | assert(isa<Constant>(Arg1) && "Unexpected input for usubo")(static_cast <bool> (isa<Constant>(Arg1) && "Unexpected input for usubo") ? void (0) : __assert_fail ("isa<Constant>(Arg1) && \"Unexpected input for usubo\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1521, __extension__ __PRETTY_FUNCTION__ )); | |||
1522 | Arg1 = ConstantExpr::getNeg(cast<Constant>(Arg1)); | |||
1523 | } | |||
1524 | ||||
1525 | // Insert at the first instruction of the pair. | |||
1526 | Instruction *InsertPt = nullptr; | |||
1527 | for (Instruction &Iter : *Cmp->getParent()) { | |||
1528 | // If BO is an XOR, it is not guaranteed that it comes after both inputs to | |||
1529 | // the overflow intrinsic are defined. | |||
1530 | if ((BO->getOpcode() != Instruction::Xor && &Iter == BO) || &Iter == Cmp) { | |||
1531 | InsertPt = &Iter; | |||
1532 | break; | |||
1533 | } | |||
1534 | } | |||
1535 | assert(InsertPt != nullptr && "Parent block did not contain cmp or binop")(static_cast <bool> (InsertPt != nullptr && "Parent block did not contain cmp or binop" ) ? void (0) : __assert_fail ("InsertPt != nullptr && \"Parent block did not contain cmp or binop\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1535, __extension__ __PRETTY_FUNCTION__ )); | |||
1536 | ||||
1537 | IRBuilder<> Builder(InsertPt); | |||
1538 | Value *MathOV = Builder.CreateBinaryIntrinsic(IID, Arg0, Arg1); | |||
1539 | if (BO->getOpcode() != Instruction::Xor) { | |||
1540 | Value *Math = Builder.CreateExtractValue(MathOV, 0, "math"); | |||
1541 | replaceAllUsesWith(BO, Math, FreshBBs, IsHugeFunc); | |||
1542 | } else | |||
1543 | assert(BO->hasOneUse() &&(static_cast <bool> (BO->hasOneUse() && "Patterns with XOr should use the BO only in the compare" ) ? void (0) : __assert_fail ("BO->hasOneUse() && \"Patterns with XOr should use the BO only in the compare\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1544, __extension__ __PRETTY_FUNCTION__ )) | |||
1544 | "Patterns with XOr should use the BO only in the compare")(static_cast <bool> (BO->hasOneUse() && "Patterns with XOr should use the BO only in the compare" ) ? void (0) : __assert_fail ("BO->hasOneUse() && \"Patterns with XOr should use the BO only in the compare\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1544, __extension__ __PRETTY_FUNCTION__ )); | |||
1545 | Value *OV = Builder.CreateExtractValue(MathOV, 1, "ov"); | |||
1546 | replaceAllUsesWith(Cmp, OV, FreshBBs, IsHugeFunc); | |||
1547 | Cmp->eraseFromParent(); | |||
1548 | BO->eraseFromParent(); | |||
1549 | return true; | |||
1550 | } | |||
1551 | ||||
1552 | /// Match special-case patterns that check for unsigned add overflow. | |||
1553 | static bool matchUAddWithOverflowConstantEdgeCases(CmpInst *Cmp, | |||
1554 | BinaryOperator *&Add) { | |||
1555 | // Add = add A, 1; Cmp = icmp eq A,-1 (overflow if A is max val) | |||
1556 | // Add = add A,-1; Cmp = icmp ne A, 0 (overflow if A is non-zero) | |||
1557 | Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1); | |||
1558 | ||||
1559 | // We are not expecting non-canonical/degenerate code. Just bail out. | |||
1560 | if (isa<Constant>(A)) | |||
1561 | return false; | |||
1562 | ||||
1563 | ICmpInst::Predicate Pred = Cmp->getPredicate(); | |||
1564 | if (Pred == ICmpInst::ICMP_EQ && match(B, m_AllOnes())) | |||
1565 | B = ConstantInt::get(B->getType(), 1); | |||
1566 | else if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt())) | |||
1567 | B = ConstantInt::get(B->getType(), -1); | |||
1568 | else | |||
1569 | return false; | |||
1570 | ||||
1571 | // Check the users of the variable operand of the compare looking for an add | |||
1572 | // with the adjusted constant. | |||
1573 | for (User *U : A->users()) { | |||
1574 | if (match(U, m_Add(m_Specific(A), m_Specific(B)))) { | |||
1575 | Add = cast<BinaryOperator>(U); | |||
1576 | return true; | |||
1577 | } | |||
1578 | } | |||
1579 | return false; | |||
1580 | } | |||
1581 | ||||
1582 | /// Try to combine the compare into a call to the llvm.uadd.with.overflow | |||
1583 | /// intrinsic. Return true if any changes were made. | |||
1584 | bool CodeGenPrepare::combineToUAddWithOverflow(CmpInst *Cmp, | |||
1585 | ModifyDT &ModifiedDT) { | |||
1586 | bool EdgeCase = false; | |||
1587 | Value *A, *B; | |||
1588 | BinaryOperator *Add; | |||
1589 | if (!match(Cmp, m_UAddWithOverflow(m_Value(A), m_Value(B), m_BinOp(Add)))) { | |||
1590 | if (!matchUAddWithOverflowConstantEdgeCases(Cmp, Add)) | |||
1591 | return false; | |||
1592 | // Set A and B in case we match matchUAddWithOverflowConstantEdgeCases. | |||
1593 | A = Add->getOperand(0); | |||
1594 | B = Add->getOperand(1); | |||
1595 | EdgeCase = true; | |||
1596 | } | |||
1597 | ||||
1598 | if (!TLI->shouldFormOverflowOp(ISD::UADDO, | |||
1599 | TLI->getValueType(*DL, Add->getType()), | |||
1600 | Add->hasNUsesOrMore(EdgeCase ? 1 : 2))) | |||
1601 | return false; | |||
1602 | ||||
1603 | // We don't want to move around uses of condition values this late, so we | |||
1604 | // check if it is legal to create the call to the intrinsic in the basic | |||
1605 | // block containing the icmp. | |||
1606 | if (Add->getParent() != Cmp->getParent() && !Add->hasOneUse()) | |||
1607 | return false; | |||
1608 | ||||
1609 | if (!replaceMathCmpWithIntrinsic(Add, A, B, Cmp, | |||
1610 | Intrinsic::uadd_with_overflow)) | |||
1611 | return false; | |||
1612 | ||||
1613 | // Reset callers - do not crash by iterating over a dead instruction. | |||
1614 | ModifiedDT = ModifyDT::ModifyInstDT; | |||
1615 | return true; | |||
1616 | } | |||
1617 | ||||
1618 | bool CodeGenPrepare::combineToUSubWithOverflow(CmpInst *Cmp, | |||
1619 | ModifyDT &ModifiedDT) { | |||
1620 | // We are not expecting non-canonical/degenerate code. Just bail out. | |||
1621 | Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1); | |||
1622 | if (isa<Constant>(A) && isa<Constant>(B)) | |||
1623 | return false; | |||
1624 | ||||
1625 | // Convert (A u> B) to (A u< B) to simplify pattern matching. | |||
1626 | ICmpInst::Predicate Pred = Cmp->getPredicate(); | |||
1627 | if (Pred == ICmpInst::ICMP_UGT) { | |||
1628 | std::swap(A, B); | |||
1629 | Pred = ICmpInst::ICMP_ULT; | |||
1630 | } | |||
1631 | // Convert special-case: (A == 0) is the same as (A u< 1). | |||
1632 | if (Pred == ICmpInst::ICMP_EQ && match(B, m_ZeroInt())) { | |||
1633 | B = ConstantInt::get(B->getType(), 1); | |||
1634 | Pred = ICmpInst::ICMP_ULT; | |||
1635 | } | |||
1636 | // Convert special-case: (A != 0) is the same as (0 u< A). | |||
1637 | if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt())) { | |||
1638 | std::swap(A, B); | |||
1639 | Pred = ICmpInst::ICMP_ULT; | |||
1640 | } | |||
1641 | if (Pred != ICmpInst::ICMP_ULT) | |||
1642 | return false; | |||
1643 | ||||
1644 | // Walk the users of a variable operand of a compare looking for a subtract or | |||
1645 | // add with that same operand. Also match the 2nd operand of the compare to | |||
1646 | // the add/sub, but that may be a negated constant operand of an add. | |||
1647 | Value *CmpVariableOperand = isa<Constant>(A) ? B : A; | |||
1648 | BinaryOperator *Sub = nullptr; | |||
1649 | for (User *U : CmpVariableOperand->users()) { | |||
1650 | // A - B, A u< B --> usubo(A, B) | |||
1651 | if (match(U, m_Sub(m_Specific(A), m_Specific(B)))) { | |||
1652 | Sub = cast<BinaryOperator>(U); | |||
1653 | break; | |||
1654 | } | |||
1655 | ||||
1656 | // A + (-C), A u< C (canonicalized form of (sub A, C)) | |||
1657 | const APInt *CmpC, *AddC; | |||
1658 | if (match(U, m_Add(m_Specific(A), m_APInt(AddC))) && | |||
1659 | match(B, m_APInt(CmpC)) && *AddC == -(*CmpC)) { | |||
1660 | Sub = cast<BinaryOperator>(U); | |||
1661 | break; | |||
1662 | } | |||
1663 | } | |||
1664 | if (!Sub) | |||
1665 | return false; | |||
1666 | ||||
1667 | if (!TLI->shouldFormOverflowOp(ISD::USUBO, | |||
1668 | TLI->getValueType(*DL, Sub->getType()), | |||
1669 | Sub->hasNUsesOrMore(1))) | |||
1670 | return false; | |||
1671 | ||||
1672 | if (!replaceMathCmpWithIntrinsic(Sub, Sub->getOperand(0), Sub->getOperand(1), | |||
1673 | Cmp, Intrinsic::usub_with_overflow)) | |||
1674 | return false; | |||
1675 | ||||
1676 | // Reset callers - do not crash by iterating over a dead instruction. | |||
1677 | ModifiedDT = ModifyDT::ModifyInstDT; | |||
1678 | return true; | |||
1679 | } | |||
1680 | ||||
1681 | /// Sink the given CmpInst into user blocks to reduce the number of virtual | |||
1682 | /// registers that must be created and coalesced. This is a clear win except on | |||
1683 | /// targets with multiple condition code registers (PowerPC), where it might | |||
1684 | /// lose; some adjustment may be wanted there. | |||
1685 | /// | |||
1686 | /// Return true if any changes are made. | |||
1687 | static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) { | |||
1688 | if (TLI.hasMultipleConditionRegisters()) | |||
1689 | return false; | |||
1690 | ||||
1691 | // Avoid sinking soft-FP comparisons, since this can move them into a loop. | |||
1692 | if (TLI.useSoftFloat() && isa<FCmpInst>(Cmp)) | |||
1693 | return false; | |||
1694 | ||||
1695 | // Only insert a cmp in each block once. | |||
1696 | DenseMap<BasicBlock *, CmpInst *> InsertedCmps; | |||
1697 | ||||
1698 | bool MadeChange = false; | |||
1699 | for (Value::user_iterator UI = Cmp->user_begin(), E = Cmp->user_end(); | |||
1700 | UI != E;) { | |||
1701 | Use &TheUse = UI.getUse(); | |||
1702 | Instruction *User = cast<Instruction>(*UI); | |||
1703 | ||||
1704 | // Preincrement use iterator so we don't invalidate it. | |||
1705 | ++UI; | |||
1706 | ||||
1707 | // Don't bother for PHI nodes. | |||
1708 | if (isa<PHINode>(User)) | |||
1709 | continue; | |||
1710 | ||||
1711 | // Figure out which BB this cmp is used in. | |||
1712 | BasicBlock *UserBB = User->getParent(); | |||
1713 | BasicBlock *DefBB = Cmp->getParent(); | |||
1714 | ||||
1715 | // If this user is in the same block as the cmp, don't change the cmp. | |||
1716 | if (UserBB == DefBB) | |||
1717 | continue; | |||
1718 | ||||
1719 | // If we have already inserted a cmp into this block, use it. | |||
1720 | CmpInst *&InsertedCmp = InsertedCmps[UserBB]; | |||
1721 | ||||
1722 | if (!InsertedCmp) { | |||
1723 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); | |||
1724 | assert(InsertPt != UserBB->end())(static_cast <bool> (InsertPt != UserBB->end()) ? void (0) : __assert_fail ("InsertPt != UserBB->end()", "llvm/lib/CodeGen/CodeGenPrepare.cpp" , 1724, __extension__ __PRETTY_FUNCTION__)); | |||
1725 | InsertedCmp = CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(), | |||
1726 | Cmp->getOperand(0), Cmp->getOperand(1), "", | |||
1727 | &*InsertPt); | |||
1728 | // Propagate the debug info. | |||
1729 | InsertedCmp->setDebugLoc(Cmp->getDebugLoc()); | |||
1730 | } | |||
1731 | ||||
1732 | // Replace a use of the cmp with a use of the new cmp. | |||
1733 | TheUse = InsertedCmp; | |||
1734 | MadeChange = true; | |||
1735 | ++NumCmpUses; | |||
1736 | } | |||
1737 | ||||
1738 | // If we removed all uses, nuke the cmp. | |||
1739 | if (Cmp->use_empty()) { | |||
1740 | Cmp->eraseFromParent(); | |||
1741 | MadeChange = true; | |||
1742 | } | |||
1743 | ||||
1744 | return MadeChange; | |||
1745 | } | |||
1746 | ||||
1747 | /// For pattern like: | |||
1748 | /// | |||
1749 | /// DomCond = icmp sgt/slt CmpOp0, CmpOp1 (might not be in DomBB) | |||
1750 | /// ... | |||
1751 | /// DomBB: | |||
1752 | /// ... | |||
1753 | /// br DomCond, TrueBB, CmpBB | |||
1754 | /// CmpBB: (with DomBB being the single predecessor) | |||
1755 | /// ... | |||
1756 | /// Cmp = icmp eq CmpOp0, CmpOp1 | |||
1757 | /// ... | |||
1758 | /// | |||
1759 | /// It would use two comparison on targets that lowering of icmp sgt/slt is | |||
1760 | /// different from lowering of icmp eq (PowerPC). This function try to convert | |||
1761 | /// 'Cmp = icmp eq CmpOp0, CmpOp1' to ' Cmp = icmp slt/sgt CmpOp0, CmpOp1'. | |||
1762 | /// After that, DomCond and Cmp can use the same comparison so reduce one | |||
1763 | /// comparison. | |||
1764 | /// | |||
1765 | /// Return true if any changes are made. | |||
1766 | static bool foldICmpWithDominatingICmp(CmpInst *Cmp, | |||
1767 | const TargetLowering &TLI) { | |||
1768 | if (!EnableICMP_EQToICMP_ST && TLI.isEqualityCmpFoldedWithSignedCmp()) | |||
1769 | return false; | |||
1770 | ||||
1771 | ICmpInst::Predicate Pred = Cmp->getPredicate(); | |||
1772 | if (Pred != ICmpInst::ICMP_EQ) | |||
1773 | return false; | |||
1774 | ||||
1775 | // If icmp eq has users other than BranchInst and SelectInst, converting it to | |||
1776 | // icmp slt/sgt would introduce more redundant LLVM IR. | |||
1777 | for (User *U : Cmp->users()) { | |||
1778 | if (isa<BranchInst>(U)) | |||
1779 | continue; | |||
1780 | if (isa<SelectInst>(U) && cast<SelectInst>(U)->getCondition() == Cmp) | |||
1781 | continue; | |||
1782 | return false; | |||
1783 | } | |||
1784 | ||||
1785 | // This is a cheap/incomplete check for dominance - just match a single | |||
1786 | // predecessor with a conditional branch. | |||
1787 | BasicBlock *CmpBB = Cmp->getParent(); | |||
1788 | BasicBlock *DomBB = CmpBB->getSinglePredecessor(); | |||
1789 | if (!DomBB) | |||
1790 | return false; | |||
1791 | ||||
1792 | // We want to ensure that the only way control gets to the comparison of | |||
1793 | // interest is that a less/greater than comparison on the same operands is | |||
1794 | // false. | |||
1795 | Value *DomCond; | |||
1796 | BasicBlock *TrueBB, *FalseBB; | |||
1797 | if (!match(DomBB->getTerminator(), m_Br(m_Value(DomCond), TrueBB, FalseBB))) | |||
1798 | return false; | |||
1799 | if (CmpBB != FalseBB) | |||
1800 | return false; | |||
1801 | ||||
1802 | Value *CmpOp0 = Cmp->getOperand(0), *CmpOp1 = Cmp->getOperand(1); | |||
1803 | ICmpInst::Predicate DomPred; | |||
1804 | if (!match(DomCond, m_ICmp(DomPred, m_Specific(CmpOp0), m_Specific(CmpOp1)))) | |||
1805 | return false; | |||
1806 | if (DomPred != ICmpInst::ICMP_SGT && DomPred != ICmpInst::ICMP_SLT) | |||
1807 | return false; | |||
1808 | ||||
1809 | // Convert the equality comparison to the opposite of the dominating | |||
1810 | // comparison and swap the direction for all branch/select users. | |||
1811 | // We have conceptually converted: | |||
1812 | // Res = (a < b) ? <LT_RES> : (a == b) ? <EQ_RES> : <GT_RES>; | |||
1813 | // to | |||
1814 | // Res = (a < b) ? <LT_RES> : (a > b) ? <GT_RES> : <EQ_RES>; | |||
1815 | // And similarly for branches. | |||
1816 | for (User *U : Cmp->users()) { | |||
1817 | if (auto *BI = dyn_cast<BranchInst>(U)) { | |||
1818 | assert(BI->isConditional() && "Must be conditional")(static_cast <bool> (BI->isConditional() && "Must be conditional" ) ? void (0) : __assert_fail ("BI->isConditional() && \"Must be conditional\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1818, __extension__ __PRETTY_FUNCTION__ )); | |||
1819 | BI->swapSuccessors(); | |||
1820 | continue; | |||
1821 | } | |||
1822 | if (auto *SI = dyn_cast<SelectInst>(U)) { | |||
1823 | // Swap operands | |||
1824 | SI->swapValues(); | |||
1825 | SI->swapProfMetadata(); | |||
1826 | continue; | |||
1827 | } | |||
1828 | llvm_unreachable("Must be a branch or a select")::llvm::llvm_unreachable_internal("Must be a branch or a select" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1828); | |||
1829 | } | |||
1830 | Cmp->setPredicate(CmpInst::getSwappedPredicate(DomPred)); | |||
1831 | return true; | |||
1832 | } | |||
1833 | ||||
1834 | bool CodeGenPrepare::optimizeCmp(CmpInst *Cmp, ModifyDT &ModifiedDT) { | |||
1835 | if (sinkCmpExpression(Cmp, *TLI)) | |||
1836 | return true; | |||
1837 | ||||
1838 | if (combineToUAddWithOverflow(Cmp, ModifiedDT)) | |||
1839 | return true; | |||
1840 | ||||
1841 | if (combineToUSubWithOverflow(Cmp, ModifiedDT)) | |||
1842 | return true; | |||
1843 | ||||
1844 | if (foldICmpWithDominatingICmp(Cmp, *TLI)) | |||
1845 | return true; | |||
1846 | ||||
1847 | return false; | |||
1848 | } | |||
1849 | ||||
1850 | /// Duplicate and sink the given 'and' instruction into user blocks where it is | |||
1851 | /// used in a compare to allow isel to generate better code for targets where | |||
1852 | /// this operation can be combined. | |||
1853 | /// | |||
1854 | /// Return true if any changes are made. | |||
1855 | static bool sinkAndCmp0Expression(Instruction *AndI, const TargetLowering &TLI, | |||
1856 | SetOfInstrs &InsertedInsts) { | |||
1857 | // Double-check that we're not trying to optimize an instruction that was | |||
1858 | // already optimized by some other part of this pass. | |||
1859 | assert(!InsertedInsts.count(AndI) &&(static_cast <bool> (!InsertedInsts.count(AndI) && "Attempting to optimize already optimized and instruction") ? void (0) : __assert_fail ("!InsertedInsts.count(AndI) && \"Attempting to optimize already optimized and instruction\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1860, __extension__ __PRETTY_FUNCTION__ )) | |||
1860 | "Attempting to optimize already optimized and instruction")(static_cast <bool> (!InsertedInsts.count(AndI) && "Attempting to optimize already optimized and instruction") ? void (0) : __assert_fail ("!InsertedInsts.count(AndI) && \"Attempting to optimize already optimized and instruction\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1860, __extension__ __PRETTY_FUNCTION__ )); | |||
1861 | (void)InsertedInsts; | |||
1862 | ||||
1863 | // Nothing to do for single use in same basic block. | |||
1864 | if (AndI->hasOneUse() && | |||
1865 | AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent()) | |||
1866 | return false; | |||
1867 | ||||
1868 | // Try to avoid cases where sinking/duplicating is likely to increase register | |||
1869 | // pressure. | |||
1870 | if (!isa<ConstantInt>(AndI->getOperand(0)) && | |||
1871 | !isa<ConstantInt>(AndI->getOperand(1)) && | |||
1872 | AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse()) | |||
1873 | return false; | |||
1874 | ||||
1875 | for (auto *U : AndI->users()) { | |||
1876 | Instruction *User = cast<Instruction>(U); | |||
1877 | ||||
1878 | // Only sink 'and' feeding icmp with 0. | |||
1879 | if (!isa<ICmpInst>(User)) | |||
1880 | return false; | |||
1881 | ||||
1882 | auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1)); | |||
1883 | if (!CmpC || !CmpC->isZero()) | |||
1884 | return false; | |||
1885 | } | |||
1886 | ||||
1887 | if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI)) | |||
1888 | return false; | |||
1889 | ||||
1890 | LLVM_DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "found 'and' feeding only icmp 0;\n" ; } } while (false); | |||
1891 | LLVM_DEBUG(AndI->getParent()->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { AndI->getParent()->dump(); } } while (false); | |||
1892 | ||||
1893 | // Push the 'and' into the same block as the icmp 0. There should only be | |||
1894 | // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any | |||
1895 | // others, so we don't need to keep track of which BBs we insert into. | |||
1896 | for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end(); | |||
1897 | UI != E;) { | |||
1898 | Use &TheUse = UI.getUse(); | |||
1899 | Instruction *User = cast<Instruction>(*UI); | |||
1900 | ||||
1901 | // Preincrement use iterator so we don't invalidate it. | |||
1902 | ++UI; | |||
1903 | ||||
1904 | LLVM_DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "sinking 'and' use: " << *User << "\n"; } } while (false); | |||
1905 | ||||
1906 | // Keep the 'and' in the same place if the use is already in the same block. | |||
1907 | Instruction *InsertPt = | |||
1908 | User->getParent() == AndI->getParent() ? AndI : User; | |||
1909 | Instruction *InsertedAnd = | |||
1910 | BinaryOperator::Create(Instruction::And, AndI->getOperand(0), | |||
1911 | AndI->getOperand(1), "", InsertPt); | |||
1912 | // Propagate the debug info. | |||
1913 | InsertedAnd->setDebugLoc(AndI->getDebugLoc()); | |||
1914 | ||||
1915 | // Replace a use of the 'and' with a use of the new 'and'. | |||
1916 | TheUse = InsertedAnd; | |||
1917 | ++NumAndUses; | |||
1918 | LLVM_DEBUG(User->getParent()->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { User->getParent()->dump(); } } while (false); | |||
1919 | } | |||
1920 | ||||
1921 | // We removed all uses, nuke the and. | |||
1922 | AndI->eraseFromParent(); | |||
1923 | return true; | |||
1924 | } | |||
1925 | ||||
1926 | /// Check if the candidates could be combined with a shift instruction, which | |||
1927 | /// includes: | |||
1928 | /// 1. Truncate instruction | |||
1929 | /// 2. And instruction and the imm is a mask of the low bits: | |||
1930 | /// imm & (imm+1) == 0 | |||
1931 | static bool isExtractBitsCandidateUse(Instruction *User) { | |||
1932 | if (!isa<TruncInst>(User)) { | |||
1933 | if (User->getOpcode() != Instruction::And || | |||
1934 | !isa<ConstantInt>(User->getOperand(1))) | |||
1935 | return false; | |||
1936 | ||||
1937 | const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue(); | |||
1938 | ||||
1939 | if ((Cimm & (Cimm + 1)).getBoolValue()) | |||
1940 | return false; | |||
1941 | } | |||
1942 | return true; | |||
1943 | } | |||
1944 | ||||
1945 | /// Sink both shift and truncate instruction to the use of truncate's BB. | |||
1946 | static bool | |||
1947 | SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, | |||
1948 | DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts, | |||
1949 | const TargetLowering &TLI, const DataLayout &DL) { | |||
1950 | BasicBlock *UserBB = User->getParent(); | |||
1951 | DenseMap<BasicBlock *, CastInst *> InsertedTruncs; | |||
1952 | auto *TruncI = cast<TruncInst>(User); | |||
1953 | bool MadeChange = false; | |||
1954 | ||||
1955 | for (Value::user_iterator TruncUI = TruncI->user_begin(), | |||
1956 | TruncE = TruncI->user_end(); | |||
1957 | TruncUI != TruncE;) { | |||
1958 | ||||
1959 | Use &TruncTheUse = TruncUI.getUse(); | |||
1960 | Instruction *TruncUser = cast<Instruction>(*TruncUI); | |||
1961 | // Preincrement use iterator so we don't invalidate it. | |||
1962 | ||||
1963 | ++TruncUI; | |||
1964 | ||||
1965 | int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode()); | |||
1966 | if (!ISDOpcode) | |||
1967 | continue; | |||
1968 | ||||
1969 | // If the use is actually a legal node, there will not be an | |||
1970 | // implicit truncate. | |||
1971 | // FIXME: always querying the result type is just an | |||
1972 | // approximation; some nodes' legality is determined by the | |||
1973 | // operand or other means. There's no good way to find out though. | |||
1974 | if (TLI.isOperationLegalOrCustom( | |||
1975 | ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true))) | |||
1976 | continue; | |||
1977 | ||||
1978 | // Don't bother for PHI nodes. | |||
1979 | if (isa<PHINode>(TruncUser)) | |||
1980 | continue; | |||
1981 | ||||
1982 | BasicBlock *TruncUserBB = TruncUser->getParent(); | |||
1983 | ||||
1984 | if (UserBB == TruncUserBB) | |||
1985 | continue; | |||
1986 | ||||
1987 | BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB]; | |||
1988 | CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB]; | |||
1989 | ||||
1990 | if (!InsertedShift && !InsertedTrunc) { | |||
1991 | BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt(); | |||
1992 | assert(InsertPt != TruncUserBB->end())(static_cast <bool> (InsertPt != TruncUserBB->end()) ? void (0) : __assert_fail ("InsertPt != TruncUserBB->end()" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 1992, __extension__ __PRETTY_FUNCTION__ )); | |||
1993 | // Sink the shift | |||
1994 | if (ShiftI->getOpcode() == Instruction::AShr) | |||
1995 | InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, | |||
1996 | "", &*InsertPt); | |||
1997 | else | |||
1998 | InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, | |||
1999 | "", &*InsertPt); | |||
2000 | InsertedShift->setDebugLoc(ShiftI->getDebugLoc()); | |||
2001 | ||||
2002 | // Sink the trunc | |||
2003 | BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt(); | |||
2004 | TruncInsertPt++; | |||
2005 | assert(TruncInsertPt != TruncUserBB->end())(static_cast <bool> (TruncInsertPt != TruncUserBB->end ()) ? void (0) : __assert_fail ("TruncInsertPt != TruncUserBB->end()" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 2005, __extension__ __PRETTY_FUNCTION__ )); | |||
2006 | ||||
2007 | InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift, | |||
2008 | TruncI->getType(), "", &*TruncInsertPt); | |||
2009 | InsertedTrunc->setDebugLoc(TruncI->getDebugLoc()); | |||
2010 | ||||
2011 | MadeChange = true; | |||
2012 | ||||
2013 | TruncTheUse = InsertedTrunc; | |||
2014 | } | |||
2015 | } | |||
2016 | return MadeChange; | |||
2017 | } | |||
2018 | ||||
2019 | /// Sink the shift *right* instruction into user blocks if the uses could | |||
2020 | /// potentially be combined with this shift instruction and generate BitExtract | |||
2021 | /// instruction. It will only be applied if the architecture supports BitExtract | |||
2022 | /// instruction. Here is an example: | |||
2023 | /// BB1: | |||
2024 | /// %x.extract.shift = lshr i64 %arg1, 32 | |||
2025 | /// BB2: | |||
2026 | /// %x.extract.trunc = trunc i64 %x.extract.shift to i16 | |||
2027 | /// ==> | |||
2028 | /// | |||
2029 | /// BB2: | |||
2030 | /// %x.extract.shift.1 = lshr i64 %arg1, 32 | |||
2031 | /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16 | |||
2032 | /// | |||
2033 | /// CodeGen will recognize the pattern in BB2 and generate BitExtract | |||
2034 | /// instruction. | |||
2035 | /// Return true if any changes are made. | |||
2036 | static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, | |||
2037 | const TargetLowering &TLI, | |||
2038 | const DataLayout &DL) { | |||
2039 | BasicBlock *DefBB = ShiftI->getParent(); | |||
2040 | ||||
2041 | /// Only insert instructions in each block once. | |||
2042 | DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts; | |||
2043 | ||||
2044 | bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType())); | |||
2045 | ||||
2046 | bool MadeChange = false; | |||
2047 | for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end(); | |||
2048 | UI != E;) { | |||
2049 | Use &TheUse = UI.getUse(); | |||
2050 | Instruction *User = cast<Instruction>(*UI); | |||
2051 | // Preincrement use iterator so we don't invalidate it. | |||
2052 | ++UI; | |||
2053 | ||||
2054 | // Don't bother for PHI nodes. | |||
2055 | if (isa<PHINode>(User)) | |||
2056 | continue; | |||
2057 | ||||
2058 | if (!isExtractBitsCandidateUse(User)) | |||
2059 | continue; | |||
2060 | ||||
2061 | BasicBlock *UserBB = User->getParent(); | |||
2062 | ||||
2063 | if (UserBB == DefBB) { | |||
2064 | // If the shift and truncate instruction are in the same BB. The use of | |||
2065 | // the truncate(TruncUse) may still introduce another truncate if not | |||
2066 | // legal. In this case, we would like to sink both shift and truncate | |||
2067 | // instruction to the BB of TruncUse. | |||
2068 | // for example: | |||
2069 | // BB1: | |||
2070 | // i64 shift.result = lshr i64 opnd, imm | |||
2071 | // trunc.result = trunc shift.result to i16 | |||
2072 | // | |||
2073 | // BB2: | |||
2074 | // ----> We will have an implicit truncate here if the architecture does | |||
2075 | // not have i16 compare. | |||
2076 | // cmp i16 trunc.result, opnd2 | |||
2077 | // | |||
2078 | if (isa<TruncInst>(User) && | |||
2079 | shiftIsLegal | |||
2080 | // If the type of the truncate is legal, no truncate will be | |||
2081 | // introduced in other basic blocks. | |||
2082 | && (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType())))) | |||
2083 | MadeChange = | |||
2084 | SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL); | |||
2085 | ||||
2086 | continue; | |||
2087 | } | |||
2088 | // If we have already inserted a shift into this block, use it. | |||
2089 | BinaryOperator *&InsertedShift = InsertedShifts[UserBB]; | |||
2090 | ||||
2091 | if (!InsertedShift) { | |||
2092 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); | |||
2093 | assert(InsertPt != UserBB->end())(static_cast <bool> (InsertPt != UserBB->end()) ? void (0) : __assert_fail ("InsertPt != UserBB->end()", "llvm/lib/CodeGen/CodeGenPrepare.cpp" , 2093, __extension__ __PRETTY_FUNCTION__)); | |||
2094 | ||||
2095 | if (ShiftI->getOpcode() == Instruction::AShr) | |||
2096 | InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, | |||
2097 | "", &*InsertPt); | |||
2098 | else | |||
2099 | InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, | |||
2100 | "", &*InsertPt); | |||
2101 | InsertedShift->setDebugLoc(ShiftI->getDebugLoc()); | |||
2102 | ||||
2103 | MadeChange = true; | |||
2104 | } | |||
2105 | ||||
2106 | // Replace a use of the shift with a use of the new shift. | |||
2107 | TheUse = InsertedShift; | |||
2108 | } | |||
2109 | ||||
2110 | // If we removed all uses, or there are none, nuke the shift. | |||
2111 | if (ShiftI->use_empty()) { | |||
2112 | salvageDebugInfo(*ShiftI); | |||
2113 | ShiftI->eraseFromParent(); | |||
2114 | MadeChange = true; | |||
2115 | } | |||
2116 | ||||
2117 | return MadeChange; | |||
2118 | } | |||
2119 | ||||
2120 | /// If counting leading or trailing zeros is an expensive operation and a zero | |||
2121 | /// input is defined, add a check for zero to avoid calling the intrinsic. | |||
2122 | /// | |||
2123 | /// We want to transform: | |||
2124 | /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false) | |||
2125 | /// | |||
2126 | /// into: | |||
2127 | /// entry: | |||
2128 | /// %cmpz = icmp eq i64 %A, 0 | |||
2129 | /// br i1 %cmpz, label %cond.end, label %cond.false | |||
2130 | /// cond.false: | |||
2131 | /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true) | |||
2132 | /// br label %cond.end | |||
2133 | /// cond.end: | |||
2134 | /// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ] | |||
2135 | /// | |||
2136 | /// If the transform is performed, return true and set ModifiedDT to true. | |||
2137 | static bool despeculateCountZeros(IntrinsicInst *CountZeros, | |||
2138 | const TargetLowering *TLI, | |||
2139 | const DataLayout *DL, ModifyDT &ModifiedDT, | |||
2140 | SmallSet<BasicBlock *, 32> &FreshBBs, | |||
2141 | bool IsHugeFunc) { | |||
2142 | // If a zero input is undefined, it doesn't make sense to despeculate that. | |||
2143 | if (match(CountZeros->getOperand(1), m_One())) | |||
2144 | return false; | |||
2145 | ||||
2146 | // If it's cheap to speculate, there's nothing to do. | |||
2147 | Type *Ty = CountZeros->getType(); | |||
2148 | auto IntrinsicID = CountZeros->getIntrinsicID(); | |||
2149 | if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz(Ty)) || | |||
2150 | (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz(Ty))) | |||
2151 | return false; | |||
2152 | ||||
2153 | // Only handle legal scalar cases. Anything else requires too much work. | |||
2154 | unsigned SizeInBits = Ty->getScalarSizeInBits(); | |||
2155 | if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits()) | |||
2156 | return false; | |||
2157 | ||||
2158 | // Bail if the value is never zero. | |||
2159 | Use &Op = CountZeros->getOperandUse(0); | |||
2160 | if (isKnownNonZero(Op, *DL)) | |||
2161 | return false; | |||
2162 | ||||
2163 | // The intrinsic will be sunk behind a compare against zero and branch. | |||
2164 | BasicBlock *StartBlock = CountZeros->getParent(); | |||
2165 | BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false"); | |||
2166 | if (IsHugeFunc) | |||
2167 | FreshBBs.insert(CallBlock); | |||
2168 | ||||
2169 | // Create another block after the count zero intrinsic. A PHI will be added | |||
2170 | // in this block to select the result of the intrinsic or the bit-width | |||
2171 | // constant if the input to the intrinsic is zero. | |||
2172 | BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros)); | |||
2173 | BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end"); | |||
2174 | if (IsHugeFunc) | |||
2175 | FreshBBs.insert(EndBlock); | |||
2176 | ||||
2177 | // Set up a builder to create a compare, conditional branch, and PHI. | |||
2178 | IRBuilder<> Builder(CountZeros->getContext()); | |||
2179 | Builder.SetInsertPoint(StartBlock->getTerminator()); | |||
2180 | Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc()); | |||
2181 | ||||
2182 | // Replace the unconditional branch that was created by the first split with | |||
2183 | // a compare against zero and a conditional branch. | |||
2184 | Value *Zero = Constant::getNullValue(Ty); | |||
2185 | // Avoid introducing branch on poison. This also replaces the ctz operand. | |||
2186 | if (!isGuaranteedNotToBeUndefOrPoison(Op)) | |||
2187 | Op = Builder.CreateFreeze(Op, Op->getName() + ".fr"); | |||
2188 | Value *Cmp = Builder.CreateICmpEQ(Op, Zero, "cmpz"); | |||
2189 | Builder.CreateCondBr(Cmp, EndBlock, CallBlock); | |||
2190 | StartBlock->getTerminator()->eraseFromParent(); | |||
2191 | ||||
2192 | // Create a PHI in the end block to select either the output of the intrinsic | |||
2193 | // or the bit width of the operand. | |||
2194 | Builder.SetInsertPoint(&EndBlock->front()); | |||
2195 | PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz"); | |||
2196 | replaceAllUsesWith(CountZeros, PN, FreshBBs, IsHugeFunc); | |||
2197 | Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits)); | |||
2198 | PN->addIncoming(BitWidth, StartBlock); | |||
2199 | PN->addIncoming(CountZeros, CallBlock); | |||
2200 | ||||
2201 | // We are explicitly handling the zero case, so we can set the intrinsic's | |||
2202 | // undefined zero argument to 'true'. This will also prevent reprocessing the | |||
2203 | // intrinsic; we only despeculate when a zero input is defined. | |||
2204 | CountZeros->setArgOperand(1, Builder.getTrue()); | |||
2205 | ModifiedDT = ModifyDT::ModifyBBDT; | |||
2206 | return true; | |||
2207 | } | |||
2208 | ||||
2209 | bool CodeGenPrepare::optimizeCallInst(CallInst *CI, ModifyDT &ModifiedDT) { | |||
2210 | BasicBlock *BB = CI->getParent(); | |||
2211 | ||||
2212 | // Lower inline assembly if we can. | |||
2213 | // If we found an inline asm expession, and if the target knows how to | |||
2214 | // lower it to normal LLVM code, do so now. | |||
2215 | if (CI->isInlineAsm()) { | |||
2216 | if (TLI->ExpandInlineAsm(CI)) { | |||
2217 | // Avoid invalidating the iterator. | |||
2218 | CurInstIterator = BB->begin(); | |||
2219 | // Avoid processing instructions out of order, which could cause | |||
2220 | // reuse before a value is defined. | |||
2221 | SunkAddrs.clear(); | |||
2222 | return true; | |||
2223 | } | |||
2224 | // Sink address computing for memory operands into the block. | |||
2225 | if (optimizeInlineAsmInst(CI)) | |||
2226 | return true; | |||
2227 | } | |||
2228 | ||||
2229 | // Align the pointer arguments to this call if the target thinks it's a good | |||
2230 | // idea | |||
2231 | unsigned MinSize; | |||
2232 | Align PrefAlign; | |||
2233 | if (TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) { | |||
2234 | for (auto &Arg : CI->args()) { | |||
2235 | // We want to align both objects whose address is used directly and | |||
2236 | // objects whose address is used in casts and GEPs, though it only makes | |||
2237 | // sense for GEPs if the offset is a multiple of the desired alignment and | |||
2238 | // if size - offset meets the size threshold. | |||
2239 | if (!Arg->getType()->isPointerTy()) | |||
2240 | continue; | |||
2241 | APInt Offset(DL->getIndexSizeInBits( | |||
2242 | cast<PointerType>(Arg->getType())->getAddressSpace()), | |||
2243 | 0); | |||
2244 | Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset); | |||
2245 | uint64_t Offset2 = Offset.getLimitedValue(); | |||
2246 | if (!isAligned(PrefAlign, Offset2)) | |||
2247 | continue; | |||
2248 | AllocaInst *AI; | |||
2249 | if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlign() < PrefAlign && | |||
2250 | DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2) | |||
2251 | AI->setAlignment(PrefAlign); | |||
2252 | // Global variables can only be aligned if they are defined in this | |||
2253 | // object (i.e. they are uniquely initialized in this object), and | |||
2254 | // over-aligning global variables that have an explicit section is | |||
2255 | // forbidden. | |||
2256 | GlobalVariable *GV; | |||
2257 | if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() && | |||
2258 | GV->getPointerAlignment(*DL) < PrefAlign && | |||
2259 | DL->getTypeAllocSize(GV->getValueType()) >= MinSize + Offset2) | |||
2260 | GV->setAlignment(PrefAlign); | |||
2261 | } | |||
2262 | } | |||
2263 | // If this is a memcpy (or similar) then we may be able to improve the | |||
2264 | // alignment. | |||
2265 | if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) { | |||
2266 | Align DestAlign = getKnownAlignment(MI->getDest(), *DL); | |||
2267 | MaybeAlign MIDestAlign = MI->getDestAlign(); | |||
2268 | if (!MIDestAlign || DestAlign > *MIDestAlign) | |||
2269 | MI->setDestAlignment(DestAlign); | |||
2270 | if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { | |||
2271 | MaybeAlign MTISrcAlign = MTI->getSourceAlign(); | |||
2272 | Align SrcAlign = getKnownAlignment(MTI->getSource(), *DL); | |||
2273 | if (!MTISrcAlign || SrcAlign > *MTISrcAlign) | |||
2274 | MTI->setSourceAlignment(SrcAlign); | |||
2275 | } | |||
2276 | } | |||
2277 | ||||
2278 | // If we have a cold call site, try to sink addressing computation into the | |||
2279 | // cold block. This interacts with our handling for loads and stores to | |||
2280 | // ensure that we can fold all uses of a potential addressing computation | |||
2281 | // into their uses. TODO: generalize this to work over profiling data | |||
2282 | if (CI->hasFnAttr(Attribute::Cold) && !OptSize && | |||
2283 | !llvm::shouldOptimizeForSize(BB, PSI, BFI.get())) | |||
2284 | for (auto &Arg : CI->args()) { | |||
2285 | if (!Arg->getType()->isPointerTy()) | |||
2286 | continue; | |||
2287 | unsigned AS = Arg->getType()->getPointerAddressSpace(); | |||
2288 | if (optimizeMemoryInst(CI, Arg, Arg->getType(), AS)) | |||
2289 | return true; | |||
2290 | } | |||
2291 | ||||
2292 | IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); | |||
2293 | if (II) { | |||
2294 | switch (II->getIntrinsicID()) { | |||
2295 | default: | |||
2296 | break; | |||
2297 | case Intrinsic::assume: | |||
2298 | llvm_unreachable("llvm.assume should have been removed already")::llvm::llvm_unreachable_internal("llvm.assume should have been removed already" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 2298); | |||
2299 | case Intrinsic::experimental_widenable_condition: { | |||
2300 | // Give up on future widening oppurtunties so that we can fold away dead | |||
2301 | // paths and merge blocks before going into block-local instruction | |||
2302 | // selection. | |||
2303 | if (II->use_empty()) { | |||
2304 | II->eraseFromParent(); | |||
2305 | return true; | |||
2306 | } | |||
2307 | Constant *RetVal = ConstantInt::getTrue(II->getContext()); | |||
2308 | resetIteratorIfInvalidatedWhileCalling(BB, [&]() { | |||
2309 | replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr); | |||
2310 | }); | |||
2311 | return true; | |||
2312 | } | |||
2313 | case Intrinsic::objectsize: | |||
2314 | llvm_unreachable("llvm.objectsize.* should have been lowered already")::llvm::llvm_unreachable_internal("llvm.objectsize.* should have been lowered already" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 2314); | |||
2315 | case Intrinsic::is_constant: | |||
2316 | llvm_unreachable("llvm.is.constant.* should have been lowered already")::llvm::llvm_unreachable_internal("llvm.is.constant.* should have been lowered already" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 2316); | |||
2317 | case Intrinsic::aarch64_stlxr: | |||
2318 | case Intrinsic::aarch64_stxr: { | |||
2319 | ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0)); | |||
2320 | if (!ExtVal || !ExtVal->hasOneUse() || | |||
2321 | ExtVal->getParent() == CI->getParent()) | |||
2322 | return false; | |||
2323 | // Sink a zext feeding stlxr/stxr before it, so it can be folded into it. | |||
2324 | ExtVal->moveBefore(CI); | |||
2325 | // Mark this instruction as "inserted by CGP", so that other | |||
2326 | // optimizations don't touch it. | |||
2327 | InsertedInsts.insert(ExtVal); | |||
2328 | return true; | |||
2329 | } | |||
2330 | ||||
2331 | case Intrinsic::launder_invariant_group: | |||
2332 | case Intrinsic::strip_invariant_group: { | |||
2333 | Value *ArgVal = II->getArgOperand(0); | |||
2334 | auto it = LargeOffsetGEPMap.find(II); | |||
2335 | if (it != LargeOffsetGEPMap.end()) { | |||
2336 | // Merge entries in LargeOffsetGEPMap to reflect the RAUW. | |||
2337 | // Make sure not to have to deal with iterator invalidation | |||
2338 | // after possibly adding ArgVal to LargeOffsetGEPMap. | |||
2339 | auto GEPs = std::move(it->second); | |||
2340 | LargeOffsetGEPMap[ArgVal].append(GEPs.begin(), GEPs.end()); | |||
2341 | LargeOffsetGEPMap.erase(II); | |||
2342 | } | |||
2343 | ||||
2344 | replaceAllUsesWith(II, ArgVal, FreshBBs, IsHugeFunc); | |||
2345 | II->eraseFromParent(); | |||
2346 | return true; | |||
2347 | } | |||
2348 | case Intrinsic::cttz: | |||
2349 | case Intrinsic::ctlz: | |||
2350 | // If counting zeros is expensive, try to avoid it. | |||
2351 | return despeculateCountZeros(II, TLI, DL, ModifiedDT, FreshBBs, | |||
2352 | IsHugeFunc); | |||
2353 | case Intrinsic::fshl: | |||
2354 | case Intrinsic::fshr: | |||
2355 | return optimizeFunnelShift(II); | |||
2356 | case Intrinsic::dbg_assign: | |||
2357 | case Intrinsic::dbg_value: | |||
2358 | return fixupDbgValue(II); | |||
2359 | case Intrinsic::masked_gather: | |||
2360 | return optimizeGatherScatterInst(II, II->getArgOperand(0)); | |||
2361 | case Intrinsic::masked_scatter: | |||
2362 | return optimizeGatherScatterInst(II, II->getArgOperand(1)); | |||
2363 | } | |||
2364 | ||||
2365 | SmallVector<Value *, 2> PtrOps; | |||
2366 | Type *AccessTy; | |||
2367 | if (TLI->getAddrModeArguments(II, PtrOps, AccessTy)) | |||
2368 | while (!PtrOps.empty()) { | |||
2369 | Value *PtrVal = PtrOps.pop_back_val(); | |||
2370 | unsigned AS = PtrVal->getType()->getPointerAddressSpace(); | |||
2371 | if (optimizeMemoryInst(II, PtrVal, AccessTy, AS)) | |||
2372 | return true; | |||
2373 | } | |||
2374 | } | |||
2375 | ||||
2376 | // From here on out we're working with named functions. | |||
2377 | if (!CI->getCalledFunction()) | |||
2378 | return false; | |||
2379 | ||||
2380 | // Lower all default uses of _chk calls. This is very similar | |||
2381 | // to what InstCombineCalls does, but here we are only lowering calls | |||
2382 | // to fortified library functions (e.g. __memcpy_chk) that have the default | |||
2383 | // "don't know" as the objectsize. Anything else should be left alone. | |||
2384 | FortifiedLibCallSimplifier Simplifier(TLInfo, true); | |||
2385 | IRBuilder<> Builder(CI); | |||
2386 | if (Value *V = Simplifier.optimizeCall(CI, Builder)) { | |||
2387 | replaceAllUsesWith(CI, V, FreshBBs, IsHugeFunc); | |||
2388 | CI->eraseFromParent(); | |||
2389 | return true; | |||
2390 | } | |||
2391 | ||||
2392 | return false; | |||
2393 | } | |||
2394 | ||||
2395 | /// Look for opportunities to duplicate return instructions to the predecessor | |||
2396 | /// to enable tail call optimizations. The case it is currently looking for is: | |||
2397 | /// @code | |||
2398 | /// bb0: | |||
2399 | /// %tmp0 = tail call i32 @f0() | |||
2400 | /// br label %return | |||
2401 | /// bb1: | |||
2402 | /// %tmp1 = tail call i32 @f1() | |||
2403 | /// br label %return | |||
2404 | /// bb2: | |||
2405 | /// %tmp2 = tail call i32 @f2() | |||
2406 | /// br label %return | |||
2407 | /// return: | |||
2408 | /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ] | |||
2409 | /// ret i32 %retval | |||
2410 | /// @endcode | |||
2411 | /// | |||
2412 | /// => | |||
2413 | /// | |||
2414 | /// @code | |||
2415 | /// bb0: | |||
2416 | /// %tmp0 = tail call i32 @f0() | |||
2417 | /// ret i32 %tmp0 | |||
2418 | /// bb1: | |||
2419 | /// %tmp1 = tail call i32 @f1() | |||
2420 | /// ret i32 %tmp1 | |||
2421 | /// bb2: | |||
2422 | /// %tmp2 = tail call i32 @f2() | |||
2423 | /// ret i32 %tmp2 | |||
2424 | /// @endcode | |||
2425 | bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB, | |||
2426 | ModifyDT &ModifiedDT) { | |||
2427 | if (!BB->getTerminator()) | |||
2428 | return false; | |||
2429 | ||||
2430 | ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator()); | |||
2431 | if (!RetI) | |||
2432 | return false; | |||
2433 | ||||
2434 | PHINode *PN = nullptr; | |||
2435 | ExtractValueInst *EVI = nullptr; | |||
2436 | BitCastInst *BCI = nullptr; | |||
2437 | Value *V = RetI->getReturnValue(); | |||
2438 | if (V) { | |||
2439 | BCI = dyn_cast<BitCastInst>(V); | |||
2440 | if (BCI) | |||
2441 | V = BCI->getOperand(0); | |||
2442 | ||||
2443 | EVI = dyn_cast<ExtractValueInst>(V); | |||
2444 | if (EVI) { | |||
2445 | V = EVI->getOperand(0); | |||
2446 | if (!llvm::all_of(EVI->indices(), [](unsigned idx) { return idx == 0; })) | |||
2447 | return false; | |||
2448 | } | |||
2449 | ||||
2450 | PN = dyn_cast<PHINode>(V); | |||
2451 | if (!PN) | |||
2452 | return false; | |||
2453 | } | |||
2454 | ||||
2455 | if (PN && PN->getParent() != BB) | |||
2456 | return false; | |||
2457 | ||||
2458 | auto isLifetimeEndOrBitCastFor = [](const Instruction *Inst) { | |||
2459 | const BitCastInst *BC = dyn_cast<BitCastInst>(Inst); | |||
2460 | if (BC && BC->hasOneUse()) | |||
2461 | Inst = BC->user_back(); | |||
2462 | ||||
2463 | if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) | |||
2464 | return II->getIntrinsicID() == Intrinsic::lifetime_end; | |||
2465 | return false; | |||
2466 | }; | |||
2467 | ||||
2468 | // Make sure there are no instructions between the first instruction | |||
2469 | // and return. | |||
2470 | const Instruction *BI = BB->getFirstNonPHI(); | |||
2471 | // Skip over debug and the bitcast. | |||
2472 | while (isa<DbgInfoIntrinsic>(BI) || BI == BCI || BI == EVI || | |||
2473 | isa<PseudoProbeInst>(BI) || isLifetimeEndOrBitCastFor(BI)) | |||
2474 | BI = BI->getNextNode(); | |||
2475 | if (BI != RetI) | |||
2476 | return false; | |||
2477 | ||||
2478 | /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail | |||
2479 | /// call. | |||
2480 | const Function *F = BB->getParent(); | |||
2481 | SmallVector<BasicBlock *, 4> TailCallBBs; | |||
2482 | if (PN) { | |||
2483 | for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) { | |||
2484 | // Look through bitcasts. | |||
2485 | Value *IncomingVal = PN->getIncomingValue(I)->stripPointerCasts(); | |||
2486 | CallInst *CI = dyn_cast<CallInst>(IncomingVal); | |||
2487 | BasicBlock *PredBB = PN->getIncomingBlock(I); | |||
2488 | // Make sure the phi value is indeed produced by the tail call. | |||
2489 | if (CI && CI->hasOneUse() && CI->getParent() == PredBB && | |||
2490 | TLI->mayBeEmittedAsTailCall(CI) && | |||
2491 | attributesPermitTailCall(F, CI, RetI, *TLI)) | |||
2492 | TailCallBBs.push_back(PredBB); | |||
2493 | } | |||
2494 | } else { | |||
2495 | SmallPtrSet<BasicBlock *, 4> VisitedBBs; | |||
2496 | for (BasicBlock *Pred : predecessors(BB)) { | |||
2497 | if (!VisitedBBs.insert(Pred).second) | |||
2498 | continue; | |||
2499 | if (Instruction *I = Pred->rbegin()->getPrevNonDebugInstruction(true)) { | |||
2500 | CallInst *CI = dyn_cast<CallInst>(I); | |||
2501 | if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) && | |||
2502 | attributesPermitTailCall(F, CI, RetI, *TLI)) | |||
2503 | TailCallBBs.push_back(Pred); | |||
2504 | } | |||
2505 | } | |||
2506 | } | |||
2507 | ||||
2508 | bool Changed = false; | |||
2509 | for (auto const &TailCallBB : TailCallBBs) { | |||
2510 | // Make sure the call instruction is followed by an unconditional branch to | |||
2511 | // the return block. | |||
2512 | BranchInst *BI = dyn_cast<BranchInst>(TailCallBB->getTerminator()); | |||
2513 | if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB) | |||
2514 | continue; | |||
2515 | ||||
2516 | // Duplicate the return into TailCallBB. | |||
2517 | (void)FoldReturnIntoUncondBranch(RetI, BB, TailCallBB); | |||
2518 | assert(!VerifyBFIUpdates ||(static_cast <bool> (!VerifyBFIUpdates || BFI->getBlockFreq (BB) >= BFI->getBlockFreq(TailCallBB)) ? void (0) : __assert_fail ("!VerifyBFIUpdates || BFI->getBlockFreq(BB) >= BFI->getBlockFreq(TailCallBB)" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 2519, __extension__ __PRETTY_FUNCTION__ )) | |||
2519 | BFI->getBlockFreq(BB) >= BFI->getBlockFreq(TailCallBB))(static_cast <bool> (!VerifyBFIUpdates || BFI->getBlockFreq (BB) >= BFI->getBlockFreq(TailCallBB)) ? void (0) : __assert_fail ("!VerifyBFIUpdates || BFI->getBlockFreq(BB) >= BFI->getBlockFreq(TailCallBB)" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 2519, __extension__ __PRETTY_FUNCTION__ )); | |||
2520 | BFI->setBlockFreq( | |||
2521 | BB, | |||
2522 | (BFI->getBlockFreq(BB) - BFI->getBlockFreq(TailCallBB)).getFrequency()); | |||
2523 | ModifiedDT = ModifyDT::ModifyBBDT; | |||
2524 | Changed = true; | |||
2525 | ++NumRetsDup; | |||
2526 | } | |||
2527 | ||||
2528 | // If we eliminated all predecessors of the block, delete the block now. | |||
2529 | if (Changed && !BB->hasAddressTaken() && pred_empty(BB)) | |||
2530 | BB->eraseFromParent(); | |||
2531 | ||||
2532 | return Changed; | |||
2533 | } | |||
2534 | ||||
2535 | //===----------------------------------------------------------------------===// | |||
2536 | // Memory Optimization | |||
2537 | //===----------------------------------------------------------------------===// | |||
2538 | ||||
2539 | namespace { | |||
2540 | ||||
2541 | /// This is an extended version of TargetLowering::AddrMode | |||
2542 | /// which holds actual Value*'s for register values. | |||
2543 | struct ExtAddrMode : public TargetLowering::AddrMode { | |||
2544 | Value *BaseReg = nullptr; | |||
2545 | Value *ScaledReg = nullptr; | |||
2546 | Value *OriginalValue = nullptr; | |||
2547 | bool InBounds = true; | |||
2548 | ||||
2549 | enum FieldName { | |||
2550 | NoField = 0x00, | |||
2551 | BaseRegField = 0x01, | |||
2552 | BaseGVField = 0x02, | |||
2553 | BaseOffsField = 0x04, | |||
2554 | ScaledRegField = 0x08, | |||
2555 | ScaleField = 0x10, | |||
2556 | MultipleFields = 0xff | |||
2557 | }; | |||
2558 | ||||
2559 | ExtAddrMode() = default; | |||
2560 | ||||
2561 | void print(raw_ostream &OS) const; | |||
2562 | void dump() const; | |||
2563 | ||||
2564 | FieldName compare(const ExtAddrMode &other) { | |||
2565 | // First check that the types are the same on each field, as differing types | |||
2566 | // is something we can't cope with later on. | |||
2567 | if (BaseReg && other.BaseReg && | |||
2568 | BaseReg->getType() != other.BaseReg->getType()) | |||
2569 | return MultipleFields; | |||
2570 | if (BaseGV && other.BaseGV && BaseGV->getType() != other.BaseGV->getType()) | |||
2571 | return MultipleFields; | |||
2572 | if (ScaledReg && other.ScaledReg && | |||
2573 | ScaledReg->getType() != other.ScaledReg->getType()) | |||
2574 | return MultipleFields; | |||
2575 | ||||
2576 | // Conservatively reject 'inbounds' mismatches. | |||
2577 | if (InBounds != other.InBounds) | |||
2578 | return MultipleFields; | |||
2579 | ||||
2580 | // Check each field to see if it differs. | |||
2581 | unsigned Result = NoField; | |||
2582 | if (BaseReg != other.BaseReg) | |||
2583 | Result |= BaseRegField; | |||
2584 | if (BaseGV != other.BaseGV) | |||
2585 | Result |= BaseGVField; | |||
2586 | if (BaseOffs != other.BaseOffs) | |||
2587 | Result |= BaseOffsField; | |||
2588 | if (ScaledReg != other.ScaledReg) | |||
2589 | Result |= ScaledRegField; | |||
2590 | // Don't count 0 as being a different scale, because that actually means | |||
2591 | // unscaled (which will already be counted by having no ScaledReg). | |||
2592 | if (Scale && other.Scale && Scale != other.Scale) | |||
2593 | Result |= ScaleField; | |||
2594 | ||||
2595 | if (llvm::popcount(Result) > 1) | |||
2596 | return MultipleFields; | |||
2597 | else | |||
2598 | return static_cast<FieldName>(Result); | |||
2599 | } | |||
2600 | ||||
2601 | // An AddrMode is trivial if it involves no calculation i.e. it is just a base | |||
2602 | // with no offset. | |||
2603 | bool isTrivial() { | |||
2604 | // An AddrMode is (BaseGV + BaseReg + BaseOffs + ScaleReg * Scale) so it is | |||
2605 | // trivial if at most one of these terms is nonzero, except that BaseGV and | |||
2606 | // BaseReg both being zero actually means a null pointer value, which we | |||
2607 | // consider to be 'non-zero' here. | |||
2608 | return !BaseOffs && !Scale && !(BaseGV && BaseReg); | |||
2609 | } | |||
2610 | ||||
2611 | Value *GetFieldAsValue(FieldName Field, Type *IntPtrTy) { | |||
2612 | switch (Field) { | |||
2613 | default: | |||
2614 | return nullptr; | |||
2615 | case BaseRegField: | |||
2616 | return BaseReg; | |||
2617 | case BaseGVField: | |||
2618 | return BaseGV; | |||
2619 | case ScaledRegField: | |||
2620 | return ScaledReg; | |||
2621 | case BaseOffsField: | |||
2622 | return ConstantInt::get(IntPtrTy, BaseOffs); | |||
2623 | } | |||
2624 | } | |||
2625 | ||||
2626 | void SetCombinedField(FieldName Field, Value *V, | |||
2627 | const SmallVectorImpl<ExtAddrMode> &AddrModes) { | |||
2628 | switch (Field) { | |||
2629 | default: | |||
2630 | llvm_unreachable("Unhandled fields are expected to be rejected earlier")::llvm::llvm_unreachable_internal("Unhandled fields are expected to be rejected earlier" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 2630); | |||
2631 | break; | |||
2632 | case ExtAddrMode::BaseRegField: | |||
2633 | BaseReg = V; | |||
2634 | break; | |||
2635 | case ExtAddrMode::BaseGVField: | |||
2636 | // A combined BaseGV is an Instruction, not a GlobalValue, so it goes | |||
2637 | // in the BaseReg field. | |||
2638 | assert(BaseReg == nullptr)(static_cast <bool> (BaseReg == nullptr) ? void (0) : __assert_fail ("BaseReg == nullptr", "llvm/lib/CodeGen/CodeGenPrepare.cpp" , 2638, __extension__ __PRETTY_FUNCTION__)); | |||
2639 | BaseReg = V; | |||
2640 | BaseGV = nullptr; | |||
2641 | break; | |||
2642 | case ExtAddrMode::ScaledRegField: | |||
2643 | ScaledReg = V; | |||
2644 | // If we have a mix of scaled and unscaled addrmodes then we want scale | |||
2645 | // to be the scale and not zero. | |||
2646 | if (!Scale) | |||
2647 | for (const ExtAddrMode &AM : AddrModes) | |||
2648 | if (AM.Scale) { | |||
2649 | Scale = AM.Scale; | |||
2650 | break; | |||
2651 | } | |||
2652 | break; | |||
2653 | case ExtAddrMode::BaseOffsField: | |||
2654 | // The offset is no longer a constant, so it goes in ScaledReg with a | |||
2655 | // scale of 1. | |||
2656 | assert(ScaledReg == nullptr)(static_cast <bool> (ScaledReg == nullptr) ? void (0) : __assert_fail ("ScaledReg == nullptr", "llvm/lib/CodeGen/CodeGenPrepare.cpp" , 2656, __extension__ __PRETTY_FUNCTION__)); | |||
2657 | ScaledReg = V; | |||
2658 | Scale = 1; | |||
2659 | BaseOffs = 0; | |||
2660 | break; | |||
2661 | } | |||
2662 | } | |||
2663 | }; | |||
2664 | ||||
2665 | #ifndef NDEBUG | |||
2666 | static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) { | |||
2667 | AM.print(OS); | |||
2668 | return OS; | |||
2669 | } | |||
2670 | #endif | |||
2671 | ||||
2672 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) | |||
2673 | void ExtAddrMode::print(raw_ostream &OS) const { | |||
2674 | bool NeedPlus = false; | |||
2675 | OS << "["; | |||
2676 | if (InBounds) | |||
2677 | OS << "inbounds "; | |||
2678 | if (BaseGV) { | |||
2679 | OS << "GV:"; | |||
2680 | BaseGV->printAsOperand(OS, /*PrintType=*/false); | |||
2681 | NeedPlus = true; | |||
2682 | } | |||
2683 | ||||
2684 | if (BaseOffs) { | |||
2685 | OS << (NeedPlus ? " + " : "") << BaseOffs; | |||
2686 | NeedPlus = true; | |||
2687 | } | |||
2688 | ||||
2689 | if (BaseReg) { | |||
2690 | OS << (NeedPlus ? " + " : "") << "Base:"; | |||
2691 | BaseReg->printAsOperand(OS, /*PrintType=*/false); | |||
2692 | NeedPlus = true; | |||
2693 | } | |||
2694 | if (Scale) { | |||
2695 | OS << (NeedPlus ? " + " : "") << Scale << "*"; | |||
2696 | ScaledReg->printAsOperand(OS, /*PrintType=*/false); | |||
2697 | } | |||
2698 | ||||
2699 | OS << ']'; | |||
2700 | } | |||
2701 | ||||
2702 | LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void ExtAddrMode::dump() const { | |||
2703 | print(dbgs()); | |||
2704 | dbgs() << '\n'; | |||
2705 | } | |||
2706 | #endif | |||
2707 | ||||
2708 | } // end anonymous namespace | |||
2709 | ||||
2710 | namespace { | |||
2711 | ||||
2712 | /// This class provides transaction based operation on the IR. | |||
2713 | /// Every change made through this class is recorded in the internal state and | |||
2714 | /// can be undone (rollback) until commit is called. | |||
2715 | /// CGP does not check if instructions could be speculatively executed when | |||
2716 | /// moved. Preserving the original location would pessimize the debugging | |||
2717 | /// experience, as well as negatively impact the quality of sample PGO. | |||
2718 | class TypePromotionTransaction { | |||
2719 | /// This represents the common interface of the individual transaction. | |||
2720 | /// Each class implements the logic for doing one specific modification on | |||
2721 | /// the IR via the TypePromotionTransaction. | |||
2722 | class TypePromotionAction { | |||
2723 | protected: | |||
2724 | /// The Instruction modified. | |||
2725 | Instruction *Inst; | |||
2726 | ||||
2727 | public: | |||
2728 | /// Constructor of the action. | |||
2729 | /// The constructor performs the related action on the IR. | |||
2730 | TypePromotionAction(Instruction *Inst) : Inst(Inst) {} | |||
2731 | ||||
2732 | virtual ~TypePromotionAction() = default; | |||
2733 | ||||
2734 | /// Undo the modification done by this action. | |||
2735 | /// When this method is called, the IR must be in the same state as it was | |||
2736 | /// before this action was applied. | |||
2737 | /// \pre Undoing the action works if and only if the IR is in the exact same | |||
2738 | /// state as it was directly after this action was applied. | |||
2739 | virtual void undo() = 0; | |||
2740 | ||||
2741 | /// Advocate every change made by this action. | |||
2742 | /// When the results on the IR of the action are to be kept, it is important | |||
2743 | /// to call this function, otherwise hidden information may be kept forever. | |||
2744 | virtual void commit() { | |||
2745 | // Nothing to be done, this action is not doing anything. | |||
2746 | } | |||
2747 | }; | |||
2748 | ||||
2749 | /// Utility to remember the position of an instruction. | |||
2750 | class InsertionHandler { | |||
2751 | /// Position of an instruction. | |||
2752 | /// Either an instruction: | |||
2753 | /// - Is the first in a basic block: BB is used. | |||
2754 | /// - Has a previous instruction: PrevInst is used. | |||
2755 | union { | |||
2756 | Instruction *PrevInst; | |||
2757 | BasicBlock *BB; | |||
2758 | } Point; | |||
2759 | ||||
2760 | /// Remember whether or not the instruction had a previous instruction. | |||
2761 | bool HasPrevInstruction; | |||
2762 | ||||
2763 | public: | |||
2764 | /// Record the position of \p Inst. | |||
2765 | InsertionHandler(Instruction *Inst) { | |||
2766 | BasicBlock::iterator It = Inst->getIterator(); | |||
2767 | HasPrevInstruction = (It != (Inst->getParent()->begin())); | |||
2768 | if (HasPrevInstruction) | |||
2769 | Point.PrevInst = &*--It; | |||
2770 | else | |||
2771 | Point.BB = Inst->getParent(); | |||
2772 | } | |||
2773 | ||||
2774 | /// Insert \p Inst at the recorded position. | |||
2775 | void insert(Instruction *Inst) { | |||
2776 | if (HasPrevInstruction) { | |||
2777 | if (Inst->getParent()) | |||
2778 | Inst->removeFromParent(); | |||
2779 | Inst->insertAfter(Point.PrevInst); | |||
2780 | } else { | |||
2781 | Instruction *Position = &*Point.BB->getFirstInsertionPt(); | |||
2782 | if (Inst->getParent()) | |||
2783 | Inst->moveBefore(Position); | |||
2784 | else | |||
2785 | Inst->insertBefore(Position); | |||
2786 | } | |||
2787 | } | |||
2788 | }; | |||
2789 | ||||
2790 | /// Move an instruction before another. | |||
2791 | class InstructionMoveBefore : public TypePromotionAction { | |||
2792 | /// Original position of the instruction. | |||
2793 | InsertionHandler Position; | |||
2794 | ||||
2795 | public: | |||
2796 | /// Move \p Inst before \p Before. | |||
2797 | InstructionMoveBefore(Instruction *Inst, Instruction *Before) | |||
2798 | : TypePromotionAction(Inst), Position(Inst) { | |||
2799 | LLVM_DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Beforedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: move: " << * Inst << "\nbefore: " << *Before << "\n"; } } while (false) | |||
2800 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: move: " << * Inst << "\nbefore: " << *Before << "\n"; } } while (false); | |||
2801 | Inst->moveBefore(Before); | |||
2802 | } | |||
2803 | ||||
2804 | /// Move the instruction back to its original position. | |||
2805 | void undo() override { | |||
2806 | LLVM_DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: moveBefore: " << *Inst << "\n"; } } while (false); | |||
2807 | Position.insert(Inst); | |||
2808 | } | |||
2809 | }; | |||
2810 | ||||
2811 | /// Set the operand of an instruction with a new value. | |||
2812 | class OperandSetter : public TypePromotionAction { | |||
2813 | /// Original operand of the instruction. | |||
2814 | Value *Origin; | |||
2815 | ||||
2816 | /// Index of the modified instruction. | |||
2817 | unsigned Idx; | |||
2818 | ||||
2819 | public: | |||
2820 | /// Set \p Idx operand of \p Inst with \p NewVal. | |||
2821 | OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal) | |||
2822 | : TypePromotionAction(Inst), Idx(Idx) { | |||
2823 | LLVM_DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: setOperand: " << Idx << "\n" << "for:" << *Inst << "\n" << "with:" << *NewVal << "\n"; } } while ( false) | |||
2824 | << "for:" << *Inst << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: setOperand: " << Idx << "\n" << "for:" << *Inst << "\n" << "with:" << *NewVal << "\n"; } } while ( false) | |||
2825 | << "with:" << *NewVal << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: setOperand: " << Idx << "\n" << "for:" << *Inst << "\n" << "with:" << *NewVal << "\n"; } } while ( false); | |||
2826 | Origin = Inst->getOperand(Idx); | |||
2827 | Inst->setOperand(Idx, NewVal); | |||
2828 | } | |||
2829 | ||||
2830 | /// Restore the original value of the instruction. | |||
2831 | void undo() override { | |||
2832 | LLVM_DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: setOperand:" << Idx << "\n" << "for: " << *Inst << "\n" << "with: " << *Origin << "\n"; } } while ( false) | |||
2833 | << "for: " << *Inst << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: setOperand:" << Idx << "\n" << "for: " << *Inst << "\n" << "with: " << *Origin << "\n"; } } while ( false) | |||
2834 | << "with: " << *Origin << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: setOperand:" << Idx << "\n" << "for: " << *Inst << "\n" << "with: " << *Origin << "\n"; } } while ( false); | |||
2835 | Inst->setOperand(Idx, Origin); | |||
2836 | } | |||
2837 | }; | |||
2838 | ||||
2839 | /// Hide the operands of an instruction. | |||
2840 | /// Do as if this instruction was not using any of its operands. | |||
2841 | class OperandsHider : public TypePromotionAction { | |||
2842 | /// The list of original operands. | |||
2843 | SmallVector<Value *, 4> OriginalValues; | |||
2844 | ||||
2845 | public: | |||
2846 | /// Remove \p Inst from the uses of the operands of \p Inst. | |||
2847 | OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) { | |||
2848 | LLVM_DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: OperandsHider: " << *Inst << "\n"; } } while (false); | |||
2849 | unsigned NumOpnds = Inst->getNumOperands(); | |||
2850 | OriginalValues.reserve(NumOpnds); | |||
2851 | for (unsigned It = 0; It < NumOpnds; ++It) { | |||
2852 | // Save the current operand. | |||
2853 | Value *Val = Inst->getOperand(It); | |||
2854 | OriginalValues.push_back(Val); | |||
2855 | // Set a dummy one. | |||
2856 | // We could use OperandSetter here, but that would imply an overhead | |||
2857 | // that we are not willing to pay. | |||
2858 | Inst->setOperand(It, UndefValue::get(Val->getType())); | |||
2859 | } | |||
2860 | } | |||
2861 | ||||
2862 | /// Restore the original list of uses. | |||
2863 | void undo() override { | |||
2864 | LLVM_DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: OperandsHider: " << *Inst << "\n"; } } while (false); | |||
2865 | for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It) | |||
2866 | Inst->setOperand(It, OriginalValues[It]); | |||
2867 | } | |||
2868 | }; | |||
2869 | ||||
2870 | /// Build a truncate instruction. | |||
2871 | class TruncBuilder : public TypePromotionAction { | |||
2872 | Value *Val; | |||
2873 | ||||
2874 | public: | |||
2875 | /// Build a truncate instruction of \p Opnd producing a \p Ty | |||
2876 | /// result. | |||
2877 | /// trunc Opnd to Ty. | |||
2878 | TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) { | |||
2879 | IRBuilder<> Builder(Opnd); | |||
2880 | Builder.SetCurrentDebugLocation(DebugLoc()); | |||
2881 | Val = Builder.CreateTrunc(Opnd, Ty, "promoted"); | |||
2882 | LLVM_DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: TruncBuilder: " << *Val << "\n"; } } while (false); | |||
2883 | } | |||
2884 | ||||
2885 | /// Get the built value. | |||
2886 | Value *getBuiltValue() { return Val; } | |||
2887 | ||||
2888 | /// Remove the built instruction. | |||
2889 | void undo() override { | |||
2890 | LLVM_DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: TruncBuilder: " << *Val << "\n"; } } while (false); | |||
2891 | if (Instruction *IVal = dyn_cast<Instruction>(Val)) | |||
2892 | IVal->eraseFromParent(); | |||
2893 | } | |||
2894 | }; | |||
2895 | ||||
2896 | /// Build a sign extension instruction. | |||
2897 | class SExtBuilder : public TypePromotionAction { | |||
2898 | Value *Val; | |||
2899 | ||||
2900 | public: | |||
2901 | /// Build a sign extension instruction of \p Opnd producing a \p Ty | |||
2902 | /// result. | |||
2903 | /// sext Opnd to Ty. | |||
2904 | SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) | |||
2905 | : TypePromotionAction(InsertPt) { | |||
2906 | IRBuilder<> Builder(InsertPt); | |||
2907 | Val = Builder.CreateSExt(Opnd, Ty, "promoted"); | |||
2908 | LLVM_DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: SExtBuilder: " << *Val << "\n"; } } while (false); | |||
2909 | } | |||
2910 | ||||
2911 | /// Get the built value. | |||
2912 | Value *getBuiltValue() { return Val; } | |||
2913 | ||||
2914 | /// Remove the built instruction. | |||
2915 | void undo() override { | |||
2916 | LLVM_DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: SExtBuilder: " << *Val << "\n"; } } while (false); | |||
2917 | if (Instruction *IVal = dyn_cast<Instruction>(Val)) | |||
2918 | IVal->eraseFromParent(); | |||
2919 | } | |||
2920 | }; | |||
2921 | ||||
2922 | /// Build a zero extension instruction. | |||
2923 | class ZExtBuilder : public TypePromotionAction { | |||
2924 | Value *Val; | |||
2925 | ||||
2926 | public: | |||
2927 | /// Build a zero extension instruction of \p Opnd producing a \p Ty | |||
2928 | /// result. | |||
2929 | /// zext Opnd to Ty. | |||
2930 | ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) | |||
2931 | : TypePromotionAction(InsertPt) { | |||
2932 | IRBuilder<> Builder(InsertPt); | |||
2933 | Builder.SetCurrentDebugLocation(DebugLoc()); | |||
2934 | Val = Builder.CreateZExt(Opnd, Ty, "promoted"); | |||
2935 | LLVM_DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: ZExtBuilder: " << *Val << "\n"; } } while (false); | |||
2936 | } | |||
2937 | ||||
2938 | /// Get the built value. | |||
2939 | Value *getBuiltValue() { return Val; } | |||
2940 | ||||
2941 | /// Remove the built instruction. | |||
2942 | void undo() override { | |||
2943 | LLVM_DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: ZExtBuilder: " << *Val << "\n"; } } while (false); | |||
2944 | if (Instruction *IVal = dyn_cast<Instruction>(Val)) | |||
2945 | IVal->eraseFromParent(); | |||
2946 | } | |||
2947 | }; | |||
2948 | ||||
2949 | /// Mutate an instruction to another type. | |||
2950 | class TypeMutator : public TypePromotionAction { | |||
2951 | /// Record the original type. | |||
2952 | Type *OrigTy; | |||
2953 | ||||
2954 | public: | |||
2955 | /// Mutate the type of \p Inst into \p NewTy. | |||
2956 | TypeMutator(Instruction *Inst, Type *NewTy) | |||
2957 | : TypePromotionAction(Inst), OrigTy(Inst->getType()) { | |||
2958 | LLVM_DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTydo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy << "\n"; } } while (false) | |||
2959 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy << "\n"; } } while (false); | |||
2960 | Inst->mutateType(NewTy); | |||
2961 | } | |||
2962 | ||||
2963 | /// Mutate the instruction back to its original type. | |||
2964 | void undo() override { | |||
2965 | LLVM_DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTydo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy << "\n"; } } while (false) | |||
2966 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy << "\n"; } } while (false); | |||
2967 | Inst->mutateType(OrigTy); | |||
2968 | } | |||
2969 | }; | |||
2970 | ||||
2971 | /// Replace the uses of an instruction by another instruction. | |||
2972 | class UsesReplacer : public TypePromotionAction { | |||
2973 | /// Helper structure to keep track of the replaced uses. | |||
2974 | struct InstructionAndIdx { | |||
2975 | /// The instruction using the instruction. | |||
2976 | Instruction *Inst; | |||
2977 | ||||
2978 | /// The index where this instruction is used for Inst. | |||
2979 | unsigned Idx; | |||
2980 | ||||
2981 | InstructionAndIdx(Instruction *Inst, unsigned Idx) | |||
2982 | : Inst(Inst), Idx(Idx) {} | |||
2983 | }; | |||
2984 | ||||
2985 | /// Keep track of the original uses (pair Instruction, Index). | |||
2986 | SmallVector<InstructionAndIdx, 4> OriginalUses; | |||
2987 | /// Keep track of the debug users. | |||
2988 | SmallVector<DbgValueInst *, 1> DbgValues; | |||
2989 | ||||
2990 | /// Keep track of the new value so that we can undo it by replacing | |||
2991 | /// instances of the new value with the original value. | |||
2992 | Value *New; | |||
2993 | ||||
2994 | using use_iterator = SmallVectorImpl<InstructionAndIdx>::iterator; | |||
2995 | ||||
2996 | public: | |||
2997 | /// Replace all the use of \p Inst by \p New. | |||
2998 | UsesReplacer(Instruction *Inst, Value *New) | |||
2999 | : TypePromotionAction(Inst), New(New) { | |||
3000 | LLVM_DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *Newdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New << "\n"; } } while (false) | |||
3001 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New << "\n"; } } while (false); | |||
3002 | // Record the original uses. | |||
3003 | for (Use &U : Inst->uses()) { | |||
3004 | Instruction *UserI = cast<Instruction>(U.getUser()); | |||
3005 | OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo())); | |||
3006 | } | |||
3007 | // Record the debug uses separately. They are not in the instruction's | |||
3008 | // use list, but they are replaced by RAUW. | |||
3009 | findDbgValues(DbgValues, Inst); | |||
3010 | ||||
3011 | // Now, we can replace the uses. | |||
3012 | Inst->replaceAllUsesWith(New); | |||
3013 | } | |||
3014 | ||||
3015 | /// Reassign the original uses of Inst to Inst. | |||
3016 | void undo() override { | |||
3017 | LLVM_DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: UsersReplacer: " << *Inst << "\n"; } } while (false); | |||
3018 | for (InstructionAndIdx &Use : OriginalUses) | |||
3019 | Use.Inst->setOperand(Use.Idx, Inst); | |||
3020 | // RAUW has replaced all original uses with references to the new value, | |||
3021 | // including the debug uses. Since we are undoing the replacements, | |||
3022 | // the original debug uses must also be reinstated to maintain the | |||
3023 | // correctness and utility of debug value instructions. | |||
3024 | for (auto *DVI : DbgValues) | |||
3025 | DVI->replaceVariableLocationOp(New, Inst); | |||
3026 | } | |||
3027 | }; | |||
3028 | ||||
3029 | /// Remove an instruction from the IR. | |||
3030 | class InstructionRemover : public TypePromotionAction { | |||
3031 | /// Original position of the instruction. | |||
3032 | InsertionHandler Inserter; | |||
3033 | ||||
3034 | /// Helper structure to hide all the link to the instruction. In other | |||
3035 | /// words, this helps to do as if the instruction was removed. | |||
3036 | OperandsHider Hider; | |||
3037 | ||||
3038 | /// Keep track of the uses replaced, if any. | |||
3039 | UsesReplacer *Replacer = nullptr; | |||
3040 | ||||
3041 | /// Keep track of instructions removed. | |||
3042 | SetOfInstrs &RemovedInsts; | |||
3043 | ||||
3044 | public: | |||
3045 | /// Remove all reference of \p Inst and optionally replace all its | |||
3046 | /// uses with New. | |||
3047 | /// \p RemovedInsts Keep track of the instructions removed by this Action. | |||
3048 | /// \pre If !Inst->use_empty(), then New != nullptr | |||
3049 | InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts, | |||
3050 | Value *New = nullptr) | |||
3051 | : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst), | |||
3052 | RemovedInsts(RemovedInsts) { | |||
3053 | if (New) | |||
3054 | Replacer = new UsesReplacer(Inst, New); | |||
3055 | LLVM_DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: InstructionRemover: " << *Inst << "\n"; } } while (false); | |||
3056 | RemovedInsts.insert(Inst); | |||
3057 | /// The instructions removed here will be freed after completing | |||
3058 | /// optimizeBlock() for all blocks as we need to keep track of the | |||
3059 | /// removed instructions during promotion. | |||
3060 | Inst->removeFromParent(); | |||
3061 | } | |||
3062 | ||||
3063 | ~InstructionRemover() override { delete Replacer; } | |||
3064 | ||||
3065 | /// Resurrect the instruction and reassign it to the proper uses if | |||
3066 | /// new value was provided when build this action. | |||
3067 | void undo() override { | |||
3068 | LLVM_DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: InstructionRemover: " << *Inst << "\n"; } } while (false); | |||
3069 | Inserter.insert(Inst); | |||
3070 | if (Replacer) | |||
3071 | Replacer->undo(); | |||
3072 | Hider.undo(); | |||
3073 | RemovedInsts.erase(Inst); | |||
3074 | } | |||
3075 | }; | |||
3076 | ||||
3077 | public: | |||
3078 | /// Restoration point. | |||
3079 | /// The restoration point is a pointer to an action instead of an iterator | |||
3080 | /// because the iterator may be invalidated but not the pointer. | |||
3081 | using ConstRestorationPt = const TypePromotionAction *; | |||
3082 | ||||
3083 | TypePromotionTransaction(SetOfInstrs &RemovedInsts) | |||
3084 | : RemovedInsts(RemovedInsts) {} | |||
3085 | ||||
3086 | /// Advocate every changes made in that transaction. Return true if any change | |||
3087 | /// happen. | |||
3088 | bool commit(); | |||
3089 | ||||
3090 | /// Undo all the changes made after the given point. | |||
3091 | void rollback(ConstRestorationPt Point); | |||
3092 | ||||
3093 | /// Get the current restoration point. | |||
3094 | ConstRestorationPt getRestorationPoint() const; | |||
3095 | ||||
3096 | /// \name API for IR modification with state keeping to support rollback. | |||
3097 | /// @{ | |||
3098 | /// Same as Instruction::setOperand. | |||
3099 | void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal); | |||
3100 | ||||
3101 | /// Same as Instruction::eraseFromParent. | |||
3102 | void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr); | |||
3103 | ||||
3104 | /// Same as Value::replaceAllUsesWith. | |||
3105 | void replaceAllUsesWith(Instruction *Inst, Value *New); | |||
3106 | ||||
3107 | /// Same as Value::mutateType. | |||
3108 | void mutateType(Instruction *Inst, Type *NewTy); | |||
3109 | ||||
3110 | /// Same as IRBuilder::createTrunc. | |||
3111 | Value *createTrunc(Instruction *Opnd, Type *Ty); | |||
3112 | ||||
3113 | /// Same as IRBuilder::createSExt. | |||
3114 | Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty); | |||
3115 | ||||
3116 | /// Same as IRBuilder::createZExt. | |||
3117 | Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty); | |||
3118 | ||||
3119 | /// Same as Instruction::moveBefore. | |||
3120 | void moveBefore(Instruction *Inst, Instruction *Before); | |||
3121 | /// @} | |||
3122 | ||||
3123 | private: | |||
3124 | /// The ordered list of actions made so far. | |||
3125 | SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions; | |||
3126 | ||||
3127 | using CommitPt = | |||
3128 | SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator; | |||
3129 | ||||
3130 | SetOfInstrs &RemovedInsts; | |||
3131 | }; | |||
3132 | ||||
3133 | } // end anonymous namespace | |||
3134 | ||||
3135 | void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx, | |||
3136 | Value *NewVal) { | |||
3137 | Actions.push_back(std::make_unique<TypePromotionTransaction::OperandSetter>( | |||
3138 | Inst, Idx, NewVal)); | |||
3139 | } | |||
3140 | ||||
3141 | void TypePromotionTransaction::eraseInstruction(Instruction *Inst, | |||
3142 | Value *NewVal) { | |||
3143 | Actions.push_back( | |||
3144 | std::make_unique<TypePromotionTransaction::InstructionRemover>( | |||
3145 | Inst, RemovedInsts, NewVal)); | |||
3146 | } | |||
3147 | ||||
3148 | void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst, | |||
3149 | Value *New) { | |||
3150 | Actions.push_back( | |||
3151 | std::make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New)); | |||
3152 | } | |||
3153 | ||||
3154 | void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) { | |||
3155 | Actions.push_back( | |||
3156 | std::make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy)); | |||
3157 | } | |||
3158 | ||||
3159 | Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, Type *Ty) { | |||
3160 | std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty)); | |||
3161 | Value *Val = Ptr->getBuiltValue(); | |||
3162 | Actions.push_back(std::move(Ptr)); | |||
3163 | return Val; | |||
3164 | } | |||
3165 | ||||
3166 | Value *TypePromotionTransaction::createSExt(Instruction *Inst, Value *Opnd, | |||
3167 | Type *Ty) { | |||
3168 | std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty)); | |||
3169 | Value *Val = Ptr->getBuiltValue(); | |||
3170 | Actions.push_back(std::move(Ptr)); | |||
3171 | return Val; | |||
3172 | } | |||
3173 | ||||
3174 | Value *TypePromotionTransaction::createZExt(Instruction *Inst, Value *Opnd, | |||
3175 | Type *Ty) { | |||
3176 | std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty)); | |||
3177 | Value *Val = Ptr->getBuiltValue(); | |||
3178 | Actions.push_back(std::move(Ptr)); | |||
3179 | return Val; | |||
3180 | } | |||
3181 | ||||
3182 | void TypePromotionTransaction::moveBefore(Instruction *Inst, | |||
3183 | Instruction *Before) { | |||
3184 | Actions.push_back( | |||
3185 | std::make_unique<TypePromotionTransaction::InstructionMoveBefore>( | |||
3186 | Inst, Before)); | |||
3187 | } | |||
3188 | ||||
3189 | TypePromotionTransaction::ConstRestorationPt | |||
3190 | TypePromotionTransaction::getRestorationPoint() const { | |||
3191 | return !Actions.empty() ? Actions.back().get() : nullptr; | |||
3192 | } | |||
3193 | ||||
3194 | bool TypePromotionTransaction::commit() { | |||
3195 | for (std::unique_ptr<TypePromotionAction> &Action : Actions) | |||
3196 | Action->commit(); | |||
3197 | bool Modified = !Actions.empty(); | |||
3198 | Actions.clear(); | |||
3199 | return Modified; | |||
3200 | } | |||
3201 | ||||
3202 | void TypePromotionTransaction::rollback( | |||
3203 | TypePromotionTransaction::ConstRestorationPt Point) { | |||
3204 | while (!Actions.empty() && Point != Actions.back().get()) { | |||
3205 | std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val(); | |||
3206 | Curr->undo(); | |||
3207 | } | |||
3208 | } | |||
3209 | ||||
3210 | namespace { | |||
3211 | ||||
3212 | /// A helper class for matching addressing modes. | |||
3213 | /// | |||
3214 | /// This encapsulates the logic for matching the target-legal addressing modes. | |||
3215 | class AddressingModeMatcher { | |||
3216 | SmallVectorImpl<Instruction *> &AddrModeInsts; | |||
3217 | const TargetLowering &TLI; | |||
3218 | const TargetRegisterInfo &TRI; | |||
3219 | const DataLayout &DL; | |||
3220 | const LoopInfo &LI; | |||
3221 | const std::function<const DominatorTree &()> getDTFn; | |||
3222 | ||||
3223 | /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and | |||
3224 | /// the memory instruction that we're computing this address for. | |||
3225 | Type *AccessTy; | |||
3226 | unsigned AddrSpace; | |||
3227 | Instruction *MemoryInst; | |||
3228 | ||||
3229 | /// This is the addressing mode that we're building up. This is | |||
3230 | /// part of the return value of this addressing mode matching stuff. | |||
3231 | ExtAddrMode &AddrMode; | |||
3232 | ||||
3233 | /// The instructions inserted by other CodeGenPrepare optimizations. | |||
3234 | const SetOfInstrs &InsertedInsts; | |||
3235 | ||||
3236 | /// A map from the instructions to their type before promotion. | |||
3237 | InstrToOrigTy &PromotedInsts; | |||
3238 | ||||
3239 | /// The ongoing transaction where every action should be registered. | |||
3240 | TypePromotionTransaction &TPT; | |||
3241 | ||||
3242 | // A GEP which has too large offset to be folded into the addressing mode. | |||
3243 | std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP; | |||
3244 | ||||
3245 | /// This is set to true when we should not do profitability checks. | |||
3246 | /// When true, IsProfitableToFoldIntoAddressingMode always returns true. | |||
3247 | bool IgnoreProfitability; | |||
3248 | ||||
3249 | /// True if we are optimizing for size. | |||
3250 | bool OptSize = false; | |||
3251 | ||||
3252 | ProfileSummaryInfo *PSI; | |||
3253 | BlockFrequencyInfo *BFI; | |||
3254 | ||||
3255 | AddressingModeMatcher( | |||
3256 | SmallVectorImpl<Instruction *> &AMI, const TargetLowering &TLI, | |||
3257 | const TargetRegisterInfo &TRI, const LoopInfo &LI, | |||
3258 | const std::function<const DominatorTree &()> getDTFn, Type *AT, | |||
3259 | unsigned AS, Instruction *MI, ExtAddrMode &AM, | |||
3260 | const SetOfInstrs &InsertedInsts, InstrToOrigTy &PromotedInsts, | |||
3261 | TypePromotionTransaction &TPT, | |||
3262 | std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP, | |||
3263 | bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) | |||
3264 | : AddrModeInsts(AMI), TLI(TLI), TRI(TRI), | |||
3265 | DL(MI->getModule()->getDataLayout()), LI(LI), getDTFn(getDTFn), | |||
3266 | AccessTy(AT), AddrSpace(AS), MemoryInst(MI), AddrMode(AM), | |||
3267 | InsertedInsts(InsertedInsts), PromotedInsts(PromotedInsts), TPT(TPT), | |||
3268 | LargeOffsetGEP(LargeOffsetGEP), OptSize(OptSize), PSI(PSI), BFI(BFI) { | |||
3269 | IgnoreProfitability = false; | |||
3270 | } | |||
3271 | ||||
3272 | public: | |||
3273 | /// Find the maximal addressing mode that a load/store of V can fold, | |||
3274 | /// give an access type of AccessTy. This returns a list of involved | |||
3275 | /// instructions in AddrModeInsts. | |||
3276 | /// \p InsertedInsts The instructions inserted by other CodeGenPrepare | |||
3277 | /// optimizations. | |||
3278 | /// \p PromotedInsts maps the instructions to their type before promotion. | |||
3279 | /// \p The ongoing transaction where every action should be registered. | |||
3280 | static ExtAddrMode | |||
3281 | Match(Value *V, Type *AccessTy, unsigned AS, Instruction *MemoryInst, | |||
3282 | SmallVectorImpl<Instruction *> &AddrModeInsts, | |||
3283 | const TargetLowering &TLI, const LoopInfo &LI, | |||
3284 | const std::function<const DominatorTree &()> getDTFn, | |||
3285 | const TargetRegisterInfo &TRI, const SetOfInstrs &InsertedInsts, | |||
3286 | InstrToOrigTy &PromotedInsts, TypePromotionTransaction &TPT, | |||
3287 | std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP, | |||
3288 | bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) { | |||
3289 | ExtAddrMode Result; | |||
3290 | ||||
3291 | bool Success = AddressingModeMatcher(AddrModeInsts, TLI, TRI, LI, getDTFn, | |||
3292 | AccessTy, AS, MemoryInst, Result, | |||
3293 | InsertedInsts, PromotedInsts, TPT, | |||
3294 | LargeOffsetGEP, OptSize, PSI, BFI) | |||
3295 | .matchAddr(V, 0); | |||
3296 | (void)Success; | |||
3297 | assert(Success && "Couldn't select *anything*?")(static_cast <bool> (Success && "Couldn't select *anything*?" ) ? void (0) : __assert_fail ("Success && \"Couldn't select *anything*?\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 3297, __extension__ __PRETTY_FUNCTION__ )); | |||
3298 | return Result; | |||
3299 | } | |||
3300 | ||||
3301 | private: | |||
3302 | bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); | |||
3303 | bool matchAddr(Value *Addr, unsigned Depth); | |||
3304 | bool matchOperationAddr(User *AddrInst, unsigned Opcode, unsigned Depth, | |||
3305 | bool *MovedAway = nullptr); | |||
3306 | bool isProfitableToFoldIntoAddressingMode(Instruction *I, | |||
3307 | ExtAddrMode &AMBefore, | |||
3308 | ExtAddrMode &AMAfter); | |||
3309 | bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2); | |||
3310 | bool isPromotionProfitable(unsigned NewCost, unsigned OldCost, | |||
3311 | Value *PromotedOperand) const; | |||
3312 | }; | |||
3313 | ||||
3314 | class PhiNodeSet; | |||
3315 | ||||
3316 | /// An iterator for PhiNodeSet. | |||
3317 | class PhiNodeSetIterator { | |||
3318 | PhiNodeSet *const Set; | |||
3319 | size_t CurrentIndex = 0; | |||
3320 | ||||
3321 | public: | |||
3322 | /// The constructor. Start should point to either a valid element, or be equal | |||
3323 | /// to the size of the underlying SmallVector of the PhiNodeSet. | |||
3324 | PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start); | |||
3325 | PHINode *operator*() const; | |||
3326 | PhiNodeSetIterator &operator++(); | |||
3327 | bool operator==(const PhiNodeSetIterator &RHS) const; | |||
3328 | bool operator!=(const PhiNodeSetIterator &RHS) const; | |||
3329 | }; | |||
3330 | ||||
3331 | /// Keeps a set of PHINodes. | |||
3332 | /// | |||
3333 | /// This is a minimal set implementation for a specific use case: | |||
3334 | /// It is very fast when there are very few elements, but also provides good | |||
3335 | /// performance when there are many. It is similar to SmallPtrSet, but also | |||
3336 | /// provides iteration by insertion order, which is deterministic and stable | |||
3337 | /// across runs. It is also similar to SmallSetVector, but provides removing | |||
3338 | /// elements in O(1) time. This is achieved by not actually removing the element | |||
3339 | /// from the underlying vector, so comes at the cost of using more memory, but | |||
3340 | /// that is fine, since PhiNodeSets are used as short lived objects. | |||
3341 | class PhiNodeSet { | |||
3342 | friend class PhiNodeSetIterator; | |||
3343 | ||||
3344 | using MapType = SmallDenseMap<PHINode *, size_t, 32>; | |||
3345 | using iterator = PhiNodeSetIterator; | |||
3346 | ||||
3347 | /// Keeps the elements in the order of their insertion in the underlying | |||
3348 | /// vector. To achieve constant time removal, it never deletes any element. | |||
3349 | SmallVector<PHINode *, 32> NodeList; | |||
3350 | ||||
3351 | /// Keeps the elements in the underlying set implementation. This (and not the | |||
3352 | /// NodeList defined above) is the source of truth on whether an element | |||
3353 | /// is actually in the collection. | |||
3354 | MapType NodeMap; | |||
3355 | ||||
3356 | /// Points to the first valid (not deleted) element when the set is not empty | |||
3357 | /// and the value is not zero. Equals to the size of the underlying vector | |||
3358 | /// when the set is empty. When the value is 0, as in the beginning, the | |||
3359 | /// first element may or may not be valid. | |||
3360 | size_t FirstValidElement = 0; | |||
3361 | ||||
3362 | public: | |||
3363 | /// Inserts a new element to the collection. | |||
3364 | /// \returns true if the element is actually added, i.e. was not in the | |||
3365 | /// collection before the operation. | |||
3366 | bool insert(PHINode *Ptr) { | |||
3367 | if (NodeMap.insert(std::make_pair(Ptr, NodeList.size())).second) { | |||
3368 | NodeList.push_back(Ptr); | |||
3369 | return true; | |||
3370 | } | |||
3371 | return false; | |||
3372 | } | |||
3373 | ||||
3374 | /// Removes the element from the collection. | |||
3375 | /// \returns whether the element is actually removed, i.e. was in the | |||
3376 | /// collection before the operation. | |||
3377 | bool erase(PHINode *Ptr) { | |||
3378 | if (NodeMap.erase(Ptr)) { | |||
3379 | SkipRemovedElements(FirstValidElement); | |||
3380 | return true; | |||
3381 | } | |||
3382 | return false; | |||
3383 | } | |||
3384 | ||||
3385 | /// Removes all elements and clears the collection. | |||
3386 | void clear() { | |||
3387 | NodeMap.clear(); | |||
3388 | NodeList.clear(); | |||
3389 | FirstValidElement = 0; | |||
3390 | } | |||
3391 | ||||
3392 | /// \returns an iterator that will iterate the elements in the order of | |||
3393 | /// insertion. | |||
3394 | iterator begin() { | |||
3395 | if (FirstValidElement == 0) | |||
3396 | SkipRemovedElements(FirstValidElement); | |||
3397 | return PhiNodeSetIterator(this, FirstValidElement); | |||
3398 | } | |||
3399 | ||||
3400 | /// \returns an iterator that points to the end of the collection. | |||
3401 | iterator end() { return PhiNodeSetIterator(this, NodeList.size()); } | |||
3402 | ||||
3403 | /// Returns the number of elements in the collection. | |||
3404 | size_t size() const { return NodeMap.size(); } | |||
3405 | ||||
3406 | /// \returns 1 if the given element is in the collection, and 0 if otherwise. | |||
3407 | size_t count(PHINode *Ptr) const { return NodeMap.count(Ptr); } | |||
3408 | ||||
3409 | private: | |||
3410 | /// Updates the CurrentIndex so that it will point to a valid element. | |||
3411 | /// | |||
3412 | /// If the element of NodeList at CurrentIndex is valid, it does not | |||
3413 | /// change it. If there are no more valid elements, it updates CurrentIndex | |||
3414 | /// to point to the end of the NodeList. | |||
3415 | void SkipRemovedElements(size_t &CurrentIndex) { | |||
3416 | while (CurrentIndex < NodeList.size()) { | |||
3417 | auto it = NodeMap.find(NodeList[CurrentIndex]); | |||
3418 | // If the element has been deleted and added again later, NodeMap will | |||
3419 | // point to a different index, so CurrentIndex will still be invalid. | |||
3420 | if (it != NodeMap.end() && it->second == CurrentIndex) | |||
3421 | break; | |||
3422 | ++CurrentIndex; | |||
3423 | } | |||
3424 | } | |||
3425 | }; | |||
3426 | ||||
3427 | PhiNodeSetIterator::PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start) | |||
3428 | : Set(Set), CurrentIndex(Start) {} | |||
3429 | ||||
3430 | PHINode *PhiNodeSetIterator::operator*() const { | |||
3431 | assert(CurrentIndex < Set->NodeList.size() &&(static_cast <bool> (CurrentIndex < Set->NodeList .size() && "PhiNodeSet access out of range") ? void ( 0) : __assert_fail ("CurrentIndex < Set->NodeList.size() && \"PhiNodeSet access out of range\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 3432, __extension__ __PRETTY_FUNCTION__ )) | |||
3432 | "PhiNodeSet access out of range")(static_cast <bool> (CurrentIndex < Set->NodeList .size() && "PhiNodeSet access out of range") ? void ( 0) : __assert_fail ("CurrentIndex < Set->NodeList.size() && \"PhiNodeSet access out of range\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 3432, __extension__ __PRETTY_FUNCTION__ )); | |||
3433 | return Set->NodeList[CurrentIndex]; | |||
3434 | } | |||
3435 | ||||
3436 | PhiNodeSetIterator &PhiNodeSetIterator::operator++() { | |||
3437 | assert(CurrentIndex < Set->NodeList.size() &&(static_cast <bool> (CurrentIndex < Set->NodeList .size() && "PhiNodeSet access out of range") ? void ( 0) : __assert_fail ("CurrentIndex < Set->NodeList.size() && \"PhiNodeSet access out of range\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 3438, __extension__ __PRETTY_FUNCTION__ )) | |||
3438 | "PhiNodeSet access out of range")(static_cast <bool> (CurrentIndex < Set->NodeList .size() && "PhiNodeSet access out of range") ? void ( 0) : __assert_fail ("CurrentIndex < Set->NodeList.size() && \"PhiNodeSet access out of range\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 3438, __extension__ __PRETTY_FUNCTION__ )); | |||
3439 | ++CurrentIndex; | |||
3440 | Set->SkipRemovedElements(CurrentIndex); | |||
3441 | return *this; | |||
3442 | } | |||
3443 | ||||
3444 | bool PhiNodeSetIterator::operator==(const PhiNodeSetIterator &RHS) const { | |||
3445 | return CurrentIndex == RHS.CurrentIndex; | |||
3446 | } | |||
3447 | ||||
3448 | bool PhiNodeSetIterator::operator!=(const PhiNodeSetIterator &RHS) const { | |||
3449 | return !((*this) == RHS); | |||
3450 | } | |||
3451 | ||||
3452 | /// Keep track of simplification of Phi nodes. | |||
3453 | /// Accept the set of all phi nodes and erase phi node from this set | |||
3454 | /// if it is simplified. | |||
3455 | class SimplificationTracker { | |||
3456 | DenseMap<Value *, Value *> Storage; | |||
3457 | const SimplifyQuery &SQ; | |||
3458 | // Tracks newly created Phi nodes. The elements are iterated by insertion | |||
3459 | // order. | |||
3460 | PhiNodeSet AllPhiNodes; | |||
3461 | // Tracks newly created Select nodes. | |||
3462 | SmallPtrSet<SelectInst *, 32> AllSelectNodes; | |||
3463 | ||||
3464 | public: | |||
3465 | SimplificationTracker(const SimplifyQuery &sq) : SQ(sq) {} | |||
3466 | ||||
3467 | Value *Get(Value *V) { | |||
3468 | do { | |||
3469 | auto SV = Storage.find(V); | |||
3470 | if (SV == Storage.end()) | |||
3471 | return V; | |||
3472 | V = SV->second; | |||
3473 | } while (true); | |||
3474 | } | |||
3475 | ||||
3476 | Value *Simplify(Value *Val) { | |||
3477 | SmallVector<Value *, 32> WorkList; | |||
3478 | SmallPtrSet<Value *, 32> Visited; | |||
3479 | WorkList.push_back(Val); | |||
3480 | while (!WorkList.empty()) { | |||
3481 | auto *P = WorkList.pop_back_val(); | |||
3482 | if (!Visited.insert(P).second) | |||
3483 | continue; | |||
3484 | if (auto *PI = dyn_cast<Instruction>(P)) | |||
3485 | if (Value *V = simplifyInstruction(cast<Instruction>(PI), SQ)) { | |||
3486 | for (auto *U : PI->users()) | |||
3487 | WorkList.push_back(cast<Value>(U)); | |||
3488 | Put(PI, V); | |||
3489 | PI->replaceAllUsesWith(V); | |||
3490 | if (auto *PHI = dyn_cast<PHINode>(PI)) | |||
3491 | AllPhiNodes.erase(PHI); | |||
3492 | if (auto *Select = dyn_cast<SelectInst>(PI)) | |||
3493 | AllSelectNodes.erase(Select); | |||
3494 | PI->eraseFromParent(); | |||
3495 | } | |||
3496 | } | |||
3497 | return Get(Val); | |||
3498 | } | |||
3499 | ||||
3500 | void Put(Value *From, Value *To) { Storage.insert({From, To}); } | |||
3501 | ||||
3502 | void ReplacePhi(PHINode *From, PHINode *To) { | |||
3503 | Value *OldReplacement = Get(From); | |||
3504 | while (OldReplacement != From) { | |||
3505 | From = To; | |||
3506 | To = dyn_cast<PHINode>(OldReplacement); | |||
3507 | OldReplacement = Get(From); | |||
3508 | } | |||
3509 | assert(To && Get(To) == To && "Replacement PHI node is already replaced.")(static_cast <bool> (To && Get(To) == To && "Replacement PHI node is already replaced.") ? void (0) : __assert_fail ("To && Get(To) == To && \"Replacement PHI node is already replaced.\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 3509, __extension__ __PRETTY_FUNCTION__ )); | |||
3510 | Put(From, To); | |||
3511 | From->replaceAllUsesWith(To); | |||
3512 | AllPhiNodes.erase(From); | |||
3513 | From->eraseFromParent(); | |||
3514 | } | |||
3515 | ||||
3516 | PhiNodeSet &newPhiNodes() { return AllPhiNodes; } | |||
3517 | ||||
3518 | void insertNewPhi(PHINode *PN) { AllPhiNodes.insert(PN); } | |||
3519 | ||||
3520 | void insertNewSelect(SelectInst *SI) { AllSelectNodes.insert(SI); } | |||
3521 | ||||
3522 | unsigned countNewPhiNodes() const { return AllPhiNodes.size(); } | |||
3523 | ||||
3524 | unsigned countNewSelectNodes() const { return AllSelectNodes.size(); } | |||
3525 | ||||
3526 | void destroyNewNodes(Type *CommonType) { | |||
3527 | // For safe erasing, replace the uses with dummy value first. | |||
3528 | auto *Dummy = PoisonValue::get(CommonType); | |||
3529 | for (auto *I : AllPhiNodes) { | |||
3530 | I->replaceAllUsesWith(Dummy); | |||
3531 | I->eraseFromParent(); | |||
3532 | } | |||
3533 | AllPhiNodes.clear(); | |||
3534 | for (auto *I : AllSelectNodes) { | |||
3535 | I->replaceAllUsesWith(Dummy); | |||
3536 | I->eraseFromParent(); | |||
3537 | } | |||
3538 | AllSelectNodes.clear(); | |||
3539 | } | |||
3540 | }; | |||
3541 | ||||
3542 | /// A helper class for combining addressing modes. | |||
3543 | class AddressingModeCombiner { | |||
3544 | typedef DenseMap<Value *, Value *> FoldAddrToValueMapping; | |||
3545 | typedef std::pair<PHINode *, PHINode *> PHIPair; | |||
3546 | ||||
3547 | private: | |||
3548 | /// The addressing modes we've collected. | |||
3549 | SmallVector<ExtAddrMode, 16> AddrModes; | |||
3550 | ||||
3551 | /// The field in which the AddrModes differ, when we have more than one. | |||
3552 | ExtAddrMode::FieldName DifferentField = ExtAddrMode::NoField; | |||
3553 | ||||
3554 | /// Are the AddrModes that we have all just equal to their original values? | |||
3555 | bool AllAddrModesTrivial = true; | |||
3556 | ||||
3557 | /// Common Type for all different fields in addressing modes. | |||
3558 | Type *CommonType = nullptr; | |||
3559 | ||||
3560 | /// SimplifyQuery for simplifyInstruction utility. | |||
3561 | const SimplifyQuery &SQ; | |||
3562 | ||||
3563 | /// Original Address. | |||
3564 | Value *Original; | |||
3565 | ||||
3566 | /// Common value among addresses | |||
3567 | Value *CommonValue = nullptr; | |||
3568 | ||||
3569 | public: | |||
3570 | AddressingModeCombiner(const SimplifyQuery &_SQ, Value *OriginalValue) | |||
3571 | : SQ(_SQ), Original(OriginalValue) {} | |||
3572 | ||||
3573 | ~AddressingModeCombiner() { eraseCommonValueIfDead(); } | |||
3574 | ||||
3575 | /// Get the combined AddrMode | |||
3576 | const ExtAddrMode &getAddrMode() const { return AddrModes[0]; } | |||
3577 | ||||
3578 | /// Add a new AddrMode if it's compatible with the AddrModes we already | |||
3579 | /// have. | |||
3580 | /// \return True iff we succeeded in doing so. | |||
3581 | bool addNewAddrMode(ExtAddrMode &NewAddrMode) { | |||
3582 | // Take note of if we have any non-trivial AddrModes, as we need to detect | |||
3583 | // when all AddrModes are trivial as then we would introduce a phi or select | |||
3584 | // which just duplicates what's already there. | |||
3585 | AllAddrModesTrivial = AllAddrModesTrivial && NewAddrMode.isTrivial(); | |||
3586 | ||||
3587 | // If this is the first addrmode then everything is fine. | |||
3588 | if (AddrModes.empty()) { | |||
3589 | AddrModes.emplace_back(NewAddrMode); | |||
3590 | return true; | |||
3591 | } | |||
3592 | ||||
3593 | // Figure out how different this is from the other address modes, which we | |||
3594 | // can do just by comparing against the first one given that we only care | |||
3595 | // about the cumulative difference. | |||
3596 | ExtAddrMode::FieldName ThisDifferentField = | |||
3597 | AddrModes[0].compare(NewAddrMode); | |||
3598 | if (DifferentField == ExtAddrMode::NoField) | |||
3599 | DifferentField = ThisDifferentField; | |||
3600 | else if (DifferentField != ThisDifferentField) | |||
3601 | DifferentField = ExtAddrMode::MultipleFields; | |||
3602 | ||||
3603 | // If NewAddrMode differs in more than one dimension we cannot handle it. | |||
3604 | bool CanHandle = DifferentField != ExtAddrMode::MultipleFields; | |||
3605 | ||||
3606 | // If Scale Field is different then we reject. | |||
3607 | CanHandle = CanHandle && DifferentField != ExtAddrMode::ScaleField; | |||
3608 | ||||
3609 | // We also must reject the case when base offset is different and | |||
3610 | // scale reg is not null, we cannot handle this case due to merge of | |||
3611 | // different offsets will be used as ScaleReg. | |||
3612 | CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseOffsField || | |||
3613 | !NewAddrMode.ScaledReg); | |||
3614 | ||||
3615 | // We also must reject the case when GV is different and BaseReg installed | |||
3616 | // due to we want to use base reg as a merge of GV values. | |||
3617 | CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseGVField || | |||
3618 | !NewAddrMode.HasBaseReg); | |||
3619 | ||||
3620 | // Even if NewAddMode is the same we still need to collect it due to | |||
3621 | // original value is different. And later we will need all original values | |||
3622 | // as anchors during finding the common Phi node. | |||
3623 | if (CanHandle) | |||
3624 | AddrModes.emplace_back(NewAddrMode); | |||
3625 | else | |||
3626 | AddrModes.clear(); | |||
3627 | ||||
3628 | return CanHandle; | |||
3629 | } | |||
3630 | ||||
3631 | /// Combine the addressing modes we've collected into a single | |||
3632 | /// addressing mode. | |||
3633 | /// \return True iff we successfully combined them or we only had one so | |||
3634 | /// didn't need to combine them anyway. | |||
3635 | bool combineAddrModes() { | |||
3636 | // If we have no AddrModes then they can't be combined. | |||
3637 | if (AddrModes.size() == 0) | |||
3638 | return false; | |||
3639 | ||||
3640 | // A single AddrMode can trivially be combined. | |||
3641 | if (AddrModes.size() == 1 || DifferentField == ExtAddrMode::NoField) | |||
3642 | return true; | |||
3643 | ||||
3644 | // If the AddrModes we collected are all just equal to the value they are | |||
3645 | // derived from then combining them wouldn't do anything useful. | |||
3646 | if (AllAddrModesTrivial) | |||
3647 | return false; | |||
3648 | ||||
3649 | if (!addrModeCombiningAllowed()) | |||
3650 | return false; | |||
3651 | ||||
3652 | // Build a map between <original value, basic block where we saw it> to | |||
3653 | // value of base register. | |||
3654 | // Bail out if there is no common type. | |||
3655 | FoldAddrToValueMapping Map; | |||
3656 | if (!initializeMap(Map)) | |||
3657 | return false; | |||
3658 | ||||
3659 | CommonValue = findCommon(Map); | |||
3660 | if (CommonValue) | |||
3661 | AddrModes[0].SetCombinedField(DifferentField, CommonValue, AddrModes); | |||
3662 | return CommonValue != nullptr; | |||
3663 | } | |||
3664 | ||||
3665 | private: | |||
3666 | /// `CommonValue` may be a placeholder inserted by us. | |||
3667 | /// If the placeholder is not used, we should remove this dead instruction. | |||
3668 | void eraseCommonValueIfDead() { | |||
3669 | if (CommonValue && CommonValue->getNumUses() == 0) | |||
3670 | if (Instruction *CommonInst = dyn_cast<Instruction>(CommonValue)) | |||
3671 | CommonInst->eraseFromParent(); | |||
3672 | } | |||
3673 | ||||
3674 | /// Initialize Map with anchor values. For address seen | |||
3675 | /// we set the value of different field saw in this address. | |||
3676 | /// At the same time we find a common type for different field we will | |||
3677 | /// use to create new Phi/Select nodes. Keep it in CommonType field. | |||
3678 | /// Return false if there is no common type found. | |||
3679 | bool initializeMap(FoldAddrToValueMapping &Map) { | |||
3680 | // Keep track of keys where the value is null. We will need to replace it | |||
3681 | // with constant null when we know the common type. | |||
3682 | SmallVector<Value *, 2> NullValue; | |||
3683 | Type *IntPtrTy = SQ.DL.getIntPtrType(AddrModes[0].OriginalValue->getType()); | |||
3684 | for (auto &AM : AddrModes) { | |||
3685 | Value *DV = AM.GetFieldAsValue(DifferentField, IntPtrTy); | |||
3686 | if (DV) { | |||
3687 | auto *Type = DV->getType(); | |||
3688 | if (CommonType && CommonType != Type) | |||
3689 | return false; | |||
3690 | CommonType = Type; | |||
3691 | Map[AM.OriginalValue] = DV; | |||
3692 | } else { | |||
3693 | NullValue.push_back(AM.OriginalValue); | |||
3694 | } | |||
3695 | } | |||
3696 | assert(CommonType && "At least one non-null value must be!")(static_cast <bool> (CommonType && "At least one non-null value must be!" ) ? void (0) : __assert_fail ("CommonType && \"At least one non-null value must be!\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 3696, __extension__ __PRETTY_FUNCTION__ )); | |||
3697 | for (auto *V : NullValue) | |||
3698 | Map[V] = Constant::getNullValue(CommonType); | |||
3699 | return true; | |||
3700 | } | |||
3701 | ||||
3702 | /// We have mapping between value A and other value B where B was a field in | |||
3703 | /// addressing mode represented by A. Also we have an original value C | |||
3704 | /// representing an address we start with. Traversing from C through phi and | |||
3705 | /// selects we ended up with A's in a map. This utility function tries to find | |||
3706 | /// a value V which is a field in addressing mode C and traversing through phi | |||
3707 | /// nodes and selects we will end up in corresponded values B in a map. | |||
3708 | /// The utility will create a new Phi/Selects if needed. | |||
3709 | // The simple example looks as follows: | |||
3710 | // BB1: | |||
3711 | // p1 = b1 + 40 | |||
3712 | // br cond BB2, BB3 | |||
3713 | // BB2: | |||
3714 | // p2 = b2 + 40 | |||
3715 | // br BB3 | |||
3716 | // BB3: | |||
3717 | // p = phi [p1, BB1], [p2, BB2] | |||
3718 | // v = load p | |||
3719 | // Map is | |||
3720 | // p1 -> b1 | |||
3721 | // p2 -> b2 | |||
3722 | // Request is | |||
3723 | // p -> ? | |||
3724 | // The function tries to find or build phi [b1, BB1], [b2, BB2] in BB3. | |||
3725 | Value *findCommon(FoldAddrToValueMapping &Map) { | |||
3726 | // Tracks the simplification of newly created phi nodes. The reason we use | |||
3727 | // this mapping is because we will add new created Phi nodes in AddrToBase. | |||
3728 | // Simplification of Phi nodes is recursive, so some Phi node may | |||
3729 | // be simplified after we added it to AddrToBase. In reality this | |||
3730 | // simplification is possible only if original phi/selects were not | |||
3731 | // simplified yet. | |||
3732 | // Using this mapping we can find the current value in AddrToBase. | |||
3733 | SimplificationTracker ST(SQ); | |||
3734 | ||||
3735 | // First step, DFS to create PHI nodes for all intermediate blocks. | |||
3736 | // Also fill traverse order for the second step. | |||
3737 | SmallVector<Value *, 32> TraverseOrder; | |||
3738 | InsertPlaceholders(Map, TraverseOrder, ST); | |||
3739 | ||||
3740 | // Second Step, fill new nodes by merged values and simplify if possible. | |||
3741 | FillPlaceholders(Map, TraverseOrder, ST); | |||
3742 | ||||
3743 | if (!AddrSinkNewSelects && ST.countNewSelectNodes() > 0) { | |||
3744 | ST.destroyNewNodes(CommonType); | |||
3745 | return nullptr; | |||
3746 | } | |||
3747 | ||||
3748 | // Now we'd like to match New Phi nodes to existed ones. | |||
3749 | unsigned PhiNotMatchedCount = 0; | |||
3750 | if (!MatchPhiSet(ST, AddrSinkNewPhis, PhiNotMatchedCount)) { | |||
3751 | ST.destroyNewNodes(CommonType); | |||
3752 | return nullptr; | |||
3753 | } | |||
3754 | ||||
3755 | auto *Result = ST.Get(Map.find(Original)->second); | |||
3756 | if (Result) { | |||
3757 | NumMemoryInstsPhiCreated += ST.countNewPhiNodes() + PhiNotMatchedCount; | |||
3758 | NumMemoryInstsSelectCreated += ST.countNewSelectNodes(); | |||
3759 | } | |||
3760 | return Result; | |||
3761 | } | |||
3762 | ||||
3763 | /// Try to match PHI node to Candidate. | |||
3764 | /// Matcher tracks the matched Phi nodes. | |||
3765 | bool MatchPhiNode(PHINode *PHI, PHINode *Candidate, | |||
3766 | SmallSetVector<PHIPair, 8> &Matcher, | |||
3767 | PhiNodeSet &PhiNodesToMatch) { | |||
3768 | SmallVector<PHIPair, 8> WorkList; | |||
3769 | Matcher.insert({PHI, Candidate}); | |||
3770 | SmallSet<PHINode *, 8> MatchedPHIs; | |||
3771 | MatchedPHIs.insert(PHI); | |||
3772 | WorkList.push_back({PHI, Candidate}); | |||
3773 | SmallSet<PHIPair, 8> Visited; | |||
3774 | while (!WorkList.empty()) { | |||
3775 | auto Item = WorkList.pop_back_val(); | |||
3776 | if (!Visited.insert(Item).second) | |||
3777 | continue; | |||
3778 | // We iterate over all incoming values to Phi to compare them. | |||
3779 | // If values are different and both of them Phi and the first one is a | |||
3780 | // Phi we added (subject to match) and both of them is in the same basic | |||
3781 | // block then we can match our pair if values match. So we state that | |||
3782 | // these values match and add it to work list to verify that. | |||
3783 | for (auto *B : Item.first->blocks()) { | |||
3784 | Value *FirstValue = Item.first->getIncomingValueForBlock(B); | |||
3785 | Value *SecondValue = Item.second->getIncomingValueForBlock(B); | |||
3786 | if (FirstValue == SecondValue) | |||
3787 | continue; | |||
3788 | ||||
3789 | PHINode *FirstPhi = dyn_cast<PHINode>(FirstValue); | |||
3790 | PHINode *SecondPhi = dyn_cast<PHINode>(SecondValue); | |||
3791 | ||||
3792 | // One of them is not Phi or | |||
3793 | // The first one is not Phi node from the set we'd like to match or | |||
3794 | // Phi nodes from different basic blocks then | |||
3795 | // we will not be able to match. | |||
3796 | if (!FirstPhi || !SecondPhi || !PhiNodesToMatch.count(FirstPhi) || | |||
3797 | FirstPhi->getParent() != SecondPhi->getParent()) | |||
3798 | return false; | |||
3799 | ||||
3800 | // If we already matched them then continue. | |||
3801 | if (Matcher.count({FirstPhi, SecondPhi})) | |||
3802 | continue; | |||
3803 | // So the values are different and does not match. So we need them to | |||
3804 | // match. (But we register no more than one match per PHI node, so that | |||
3805 | // we won't later try to replace them twice.) | |||
3806 | if (MatchedPHIs.insert(FirstPhi).second) | |||
3807 | Matcher.insert({FirstPhi, SecondPhi}); | |||
3808 | // But me must check it. | |||
3809 | WorkList.push_back({FirstPhi, SecondPhi}); | |||
3810 | } | |||
3811 | } | |||
3812 | return true; | |||
3813 | } | |||
3814 | ||||
3815 | /// For the given set of PHI nodes (in the SimplificationTracker) try | |||
3816 | /// to find their equivalents. | |||
3817 | /// Returns false if this matching fails and creation of new Phi is disabled. | |||
3818 | bool MatchPhiSet(SimplificationTracker &ST, bool AllowNewPhiNodes, | |||
3819 | unsigned &PhiNotMatchedCount) { | |||
3820 | // Matched and PhiNodesToMatch iterate their elements in a deterministic | |||
3821 | // order, so the replacements (ReplacePhi) are also done in a deterministic | |||
3822 | // order. | |||
3823 | SmallSetVector<PHIPair, 8> Matched; | |||
3824 | SmallPtrSet<PHINode *, 8> WillNotMatch; | |||
3825 | PhiNodeSet &PhiNodesToMatch = ST.newPhiNodes(); | |||
3826 | while (PhiNodesToMatch.size()) { | |||
3827 | PHINode *PHI = *PhiNodesToMatch.begin(); | |||
3828 | ||||
3829 | // Add us, if no Phi nodes in the basic block we do not match. | |||
3830 | WillNotMatch.clear(); | |||
3831 | WillNotMatch.insert(PHI); | |||
3832 | ||||
3833 | // Traverse all Phis until we found equivalent or fail to do that. | |||
3834 | bool IsMatched = false; | |||
3835 | for (auto &P : PHI->getParent()->phis()) { | |||
3836 | // Skip new Phi nodes. | |||
3837 | if (PhiNodesToMatch.count(&P)) | |||
3838 | continue; | |||
3839 | if ((IsMatched = MatchPhiNode(PHI, &P, Matched, PhiNodesToMatch))) | |||
3840 | break; | |||
3841 | // If it does not match, collect all Phi nodes from matcher. | |||
3842 | // if we end up with no match, them all these Phi nodes will not match | |||
3843 | // later. | |||
3844 | for (auto M : Matched) | |||
3845 | WillNotMatch.insert(M.first); | |||
3846 | Matched.clear(); | |||
3847 | } | |||
3848 | if (IsMatched) { | |||
3849 | // Replace all matched values and erase them. | |||
3850 | for (auto MV : Matched) | |||
3851 | ST.ReplacePhi(MV.first, MV.second); | |||
3852 | Matched.clear(); | |||
3853 | continue; | |||
3854 | } | |||
3855 | // If we are not allowed to create new nodes then bail out. | |||
3856 | if (!AllowNewPhiNodes) | |||
3857 | return false; | |||
3858 | // Just remove all seen values in matcher. They will not match anything. | |||
3859 | PhiNotMatchedCount += WillNotMatch.size(); | |||
3860 | for (auto *P : WillNotMatch) | |||
3861 | PhiNodesToMatch.erase(P); | |||
3862 | } | |||
3863 | return true; | |||
3864 | } | |||
3865 | /// Fill the placeholders with values from predecessors and simplify them. | |||
3866 | void FillPlaceholders(FoldAddrToValueMapping &Map, | |||
3867 | SmallVectorImpl<Value *> &TraverseOrder, | |||
3868 | SimplificationTracker &ST) { | |||
3869 | while (!TraverseOrder.empty()) { | |||
3870 | Value *Current = TraverseOrder.pop_back_val(); | |||
3871 | assert(Map.contains(Current) && "No node to fill!!!")(static_cast <bool> (Map.contains(Current) && "No node to fill!!!" ) ? void (0) : __assert_fail ("Map.contains(Current) && \"No node to fill!!!\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 3871, __extension__ __PRETTY_FUNCTION__ )); | |||
3872 | Value *V = Map[Current]; | |||
3873 | ||||
3874 | if (SelectInst *Select = dyn_cast<SelectInst>(V)) { | |||
3875 | // CurrentValue also must be Select. | |||
3876 | auto *CurrentSelect = cast<SelectInst>(Current); | |||
3877 | auto *TrueValue = CurrentSelect->getTrueValue(); | |||
3878 | assert(Map.contains(TrueValue) && "No True Value!")(static_cast <bool> (Map.contains(TrueValue) && "No True Value!") ? void (0) : __assert_fail ("Map.contains(TrueValue) && \"No True Value!\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 3878, __extension__ __PRETTY_FUNCTION__ )); | |||
3879 | Select->setTrueValue(ST.Get(Map[TrueValue])); | |||
3880 | auto *FalseValue = CurrentSelect->getFalseValue(); | |||
3881 | assert(Map.contains(FalseValue) && "No False Value!")(static_cast <bool> (Map.contains(FalseValue) && "No False Value!") ? void (0) : __assert_fail ("Map.contains(FalseValue) && \"No False Value!\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 3881, __extension__ __PRETTY_FUNCTION__ )); | |||
3882 | Select->setFalseValue(ST.Get(Map[FalseValue])); | |||
3883 | } else { | |||
3884 | // Must be a Phi node then. | |||
3885 | auto *PHI = cast<PHINode>(V); | |||
3886 | // Fill the Phi node with values from predecessors. | |||
3887 | for (auto *B : predecessors(PHI->getParent())) { | |||
3888 | Value *PV = cast<PHINode>(Current)->getIncomingValueForBlock(B); | |||
3889 | assert(Map.contains(PV) && "No predecessor Value!")(static_cast <bool> (Map.contains(PV) && "No predecessor Value!" ) ? void (0) : __assert_fail ("Map.contains(PV) && \"No predecessor Value!\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 3889, __extension__ __PRETTY_FUNCTION__ )); | |||
3890 | PHI->addIncoming(ST.Get(Map[PV]), B); | |||
3891 | } | |||
3892 | } | |||
3893 | Map[Current] = ST.Simplify(V); | |||
3894 | } | |||
3895 | } | |||
3896 | ||||
3897 | /// Starting from original value recursively iterates over def-use chain up to | |||
3898 | /// known ending values represented in a map. For each traversed phi/select | |||
3899 | /// inserts a placeholder Phi or Select. | |||
3900 | /// Reports all new created Phi/Select nodes by adding them to set. | |||
3901 | /// Also reports and order in what values have been traversed. | |||
3902 | void InsertPlaceholders(FoldAddrToValueMapping &Map, | |||
3903 | SmallVectorImpl<Value *> &TraverseOrder, | |||
3904 | SimplificationTracker &ST) { | |||
3905 | SmallVector<Value *, 32> Worklist; | |||
3906 | assert((isa<PHINode>(Original) || isa<SelectInst>(Original)) &&(static_cast <bool> ((isa<PHINode>(Original) || isa <SelectInst>(Original)) && "Address must be a Phi or Select node" ) ? void (0) : __assert_fail ("(isa<PHINode>(Original) || isa<SelectInst>(Original)) && \"Address must be a Phi or Select node\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 3907, __extension__ __PRETTY_FUNCTION__ )) | |||
3907 | "Address must be a Phi or Select node")(static_cast <bool> ((isa<PHINode>(Original) || isa <SelectInst>(Original)) && "Address must be a Phi or Select node" ) ? void (0) : __assert_fail ("(isa<PHINode>(Original) || isa<SelectInst>(Original)) && \"Address must be a Phi or Select node\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 3907, __extension__ __PRETTY_FUNCTION__ )); | |||
3908 | auto *Dummy = PoisonValue::get(CommonType); | |||
3909 | Worklist.push_back(Original); | |||
3910 | while (!Worklist.empty()) { | |||
3911 | Value *Current = Worklist.pop_back_val(); | |||
3912 | // if it is already visited or it is an ending value then skip it. | |||
3913 | if (Map.contains(Current)) | |||
3914 | continue; | |||
3915 | TraverseOrder.push_back(Current); | |||
3916 | ||||
3917 | // CurrentValue must be a Phi node or select. All others must be covered | |||
3918 | // by anchors. | |||
3919 | if (SelectInst *CurrentSelect = dyn_cast<SelectInst>(Current)) { | |||
3920 | // Is it OK to get metadata from OrigSelect?! | |||
3921 | // Create a Select placeholder with dummy value. | |||
3922 | SelectInst *Select = SelectInst::Create( | |||
3923 | CurrentSelect->getCondition(), Dummy, Dummy, | |||
3924 | CurrentSelect->getName(), CurrentSelect, CurrentSelect); | |||
3925 | Map[Current] = Select; | |||
3926 | ST.insertNewSelect(Select); | |||
3927 | // We are interested in True and False values. | |||
3928 | Worklist.push_back(CurrentSelect->getTrueValue()); | |||
3929 | Worklist.push_back(CurrentSelect->getFalseValue()); | |||
3930 | } else { | |||
3931 | // It must be a Phi node then. | |||
3932 | PHINode *CurrentPhi = cast<PHINode>(Current); | |||
3933 | unsigned PredCount = CurrentPhi->getNumIncomingValues(); | |||
3934 | PHINode *PHI = | |||
3935 | PHINode::Create(CommonType, PredCount, "sunk_phi", CurrentPhi); | |||
3936 | Map[Current] = PHI; | |||
3937 | ST.insertNewPhi(PHI); | |||
3938 | append_range(Worklist, CurrentPhi->incoming_values()); | |||
3939 | } | |||
3940 | } | |||
3941 | } | |||
3942 | ||||
3943 | bool addrModeCombiningAllowed() { | |||
3944 | if (DisableComplexAddrModes) | |||
3945 | return false; | |||
3946 | switch (DifferentField) { | |||
3947 | default: | |||
3948 | return false; | |||
3949 | case ExtAddrMode::BaseRegField: | |||
3950 | return AddrSinkCombineBaseReg; | |||
3951 | case ExtAddrMode::BaseGVField: | |||
3952 | return AddrSinkCombineBaseGV; | |||
3953 | case ExtAddrMode::BaseOffsField: | |||
3954 | return AddrSinkCombineBaseOffs; | |||
3955 | case ExtAddrMode::ScaledRegField: | |||
3956 | return AddrSinkCombineScaledReg; | |||
3957 | } | |||
3958 | } | |||
3959 | }; | |||
3960 | } // end anonymous namespace | |||
3961 | ||||
3962 | /// Try adding ScaleReg*Scale to the current addressing mode. | |||
3963 | /// Return true and update AddrMode if this addr mode is legal for the target, | |||
3964 | /// false if not. | |||
3965 | bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale, | |||
3966 | unsigned Depth) { | |||
3967 | // If Scale is 1, then this is the same as adding ScaleReg to the addressing | |||
3968 | // mode. Just process that directly. | |||
3969 | if (Scale == 1) | |||
3970 | return matchAddr(ScaleReg, Depth); | |||
3971 | ||||
3972 | // If the scale is 0, it takes nothing to add this. | |||
3973 | if (Scale == 0) | |||
3974 | return true; | |||
3975 | ||||
3976 | // If we already have a scale of this value, we can add to it, otherwise, we | |||
3977 | // need an available scale field. | |||
3978 | if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) | |||
3979 | return false; | |||
3980 | ||||
3981 | ExtAddrMode TestAddrMode = AddrMode; | |||
3982 | ||||
3983 | // Add scale to turn X*4+X*3 -> X*7. This could also do things like | |||
3984 | // [A+B + A*7] -> [B+A*8]. | |||
3985 | TestAddrMode.Scale += Scale; | |||
3986 | TestAddrMode.ScaledReg = ScaleReg; | |||
3987 | ||||
3988 | // If the new address isn't legal, bail out. | |||
3989 | if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) | |||
3990 | return false; | |||
3991 | ||||
3992 | // It was legal, so commit it. | |||
3993 | AddrMode = TestAddrMode; | |||
3994 | ||||
3995 | // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now | |||
3996 | // to see if ScaleReg is actually X+C. If so, we can turn this into adding | |||
3997 | // X*Scale + C*Scale to addr mode. If we found available IV increment, do not | |||
3998 | // go any further: we can reuse it and cannot eliminate it. | |||
3999 | ConstantInt *CI = nullptr; | |||
4000 | Value *AddLHS = nullptr; | |||
4001 | if (isa<Instruction>(ScaleReg) && // not a constant expr. | |||
4002 | match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI))) && | |||
4003 | !isIVIncrement(ScaleReg, &LI) && CI->getValue().isSignedIntN(64)) { | |||
4004 | TestAddrMode.InBounds = false; | |||
4005 | TestAddrMode.ScaledReg = AddLHS; | |||
4006 | TestAddrMode.BaseOffs += CI->getSExtValue() * TestAddrMode.Scale; | |||
4007 | ||||
4008 | // If this addressing mode is legal, commit it and remember that we folded | |||
4009 | // this instruction. | |||
4010 | if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) { | |||
4011 | AddrModeInsts.push_back(cast<Instruction>(ScaleReg)); | |||
4012 | AddrMode = TestAddrMode; | |||
4013 | return true; | |||
4014 | } | |||
4015 | // Restore status quo. | |||
4016 | TestAddrMode = AddrMode; | |||
4017 | } | |||
4018 | ||||
4019 | // If this is an add recurrence with a constant step, return the increment | |||
4020 | // instruction and the canonicalized step. | |||
4021 | auto GetConstantStep = | |||
4022 | [this](const Value *V) -> std::optional<std::pair<Instruction *, APInt>> { | |||
4023 | auto *PN = dyn_cast<PHINode>(V); | |||
4024 | if (!PN) | |||
4025 | return std::nullopt; | |||
4026 | auto IVInc = getIVIncrement(PN, &LI); | |||
4027 | if (!IVInc) | |||
4028 | return std::nullopt; | |||
4029 | // TODO: The result of the intrinsics above is two-complement. However when | |||
4030 | // IV inc is expressed as add or sub, iv.next is potentially a poison value. | |||
4031 | // If it has nuw or nsw flags, we need to make sure that these flags are | |||
4032 | // inferrable at the point of memory instruction. Otherwise we are replacing | |||
4033 | // well-defined two-complement computation with poison. Currently, to avoid | |||
4034 | // potentially complex analysis needed to prove this, we reject such cases. | |||
4035 | if (auto *OIVInc = dyn_cast<OverflowingBinaryOperator>(IVInc->first)) | |||
4036 | if (OIVInc->hasNoSignedWrap() || OIVInc->hasNoUnsignedWrap()) | |||
4037 | return std::nullopt; | |||
4038 | if (auto *ConstantStep = dyn_cast<ConstantInt>(IVInc->second)) | |||
4039 | return std::make_pair(IVInc->first, ConstantStep->getValue()); | |||
4040 | return std::nullopt; | |||
4041 | }; | |||
4042 | ||||
4043 | // Try to account for the following special case: | |||
4044 | // 1. ScaleReg is an inductive variable; | |||
4045 | // 2. We use it with non-zero offset; | |||
4046 | // 3. IV's increment is available at the point of memory instruction. | |||
4047 | // | |||
4048 | // In this case, we may reuse the IV increment instead of the IV Phi to | |||
4049 | // achieve the following advantages: | |||
4050 | // 1. If IV step matches the offset, we will have no need in the offset; | |||
4051 | // 2. Even if they don't match, we will reduce the overlap of living IV | |||
4052 | // and IV increment, that will potentially lead to better register | |||
4053 | // assignment. | |||
4054 | if (AddrMode.BaseOffs) { | |||
4055 | if (auto IVStep = GetConstantStep(ScaleReg)) { | |||
4056 | Instruction *IVInc = IVStep->first; | |||
4057 | // The following assert is important to ensure a lack of infinite loops. | |||
4058 | // This transforms is (intentionally) the inverse of the one just above. | |||
4059 | // If they don't agree on the definition of an increment, we'd alternate | |||
4060 | // back and forth indefinitely. | |||
4061 | assert(isIVIncrement(IVInc, &LI) && "implied by GetConstantStep")(static_cast <bool> (isIVIncrement(IVInc, &LI) && "implied by GetConstantStep") ? void (0) : __assert_fail ("isIVIncrement(IVInc, &LI) && \"implied by GetConstantStep\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 4061, __extension__ __PRETTY_FUNCTION__ )); | |||
4062 | APInt Step = IVStep->second; | |||
4063 | APInt Offset = Step * AddrMode.Scale; | |||
4064 | if (Offset.isSignedIntN(64)) { | |||
4065 | TestAddrMode.InBounds = false; | |||
4066 | TestAddrMode.ScaledReg = IVInc; | |||
4067 | TestAddrMode.BaseOffs -= Offset.getLimitedValue(); | |||
4068 | // If this addressing mode is legal, commit it.. | |||
4069 | // (Note that we defer the (expensive) domtree base legality check | |||
4070 | // to the very last possible point.) | |||
4071 | if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace) && | |||
4072 | getDTFn().dominates(IVInc, MemoryInst)) { | |||
4073 | AddrModeInsts.push_back(cast<Instruction>(IVInc)); | |||
4074 | AddrMode = TestAddrMode; | |||
4075 | return true; | |||
4076 | } | |||
4077 | // Restore status quo. | |||
4078 | TestAddrMode = AddrMode; | |||
4079 | } | |||
4080 | } | |||
4081 | } | |||
4082 | ||||
4083 | // Otherwise, just return what we have. | |||
4084 | return true; | |||
4085 | } | |||
4086 | ||||
4087 | /// This is a little filter, which returns true if an addressing computation | |||
4088 | /// involving I might be folded into a load/store accessing it. | |||
4089 | /// This doesn't need to be perfect, but needs to accept at least | |||
4090 | /// the set of instructions that MatchOperationAddr can. | |||
4091 | static bool MightBeFoldableInst(Instruction *I) { | |||
4092 | switch (I->getOpcode()) { | |||
4093 | case Instruction::BitCast: | |||
4094 | case Instruction::AddrSpaceCast: | |||
4095 | // Don't touch identity bitcasts. | |||
4096 | if (I->getType() == I->getOperand(0)->getType()) | |||
4097 | return false; | |||
4098 | return I->getType()->isIntOrPtrTy(); | |||
4099 | case Instruction::PtrToInt: | |||
4100 | // PtrToInt is always a noop, as we know that the int type is pointer sized. | |||
4101 | return true; | |||
4102 | case Instruction::IntToPtr: | |||
4103 | // We know the input is intptr_t, so this is foldable. | |||
4104 | return true; | |||
4105 | case Instruction::Add: | |||
4106 | return true; | |||
4107 | case Instruction::Mul: | |||
4108 | case Instruction::Shl: | |||
4109 | // Can only handle X*C and X << C. | |||
4110 | return isa<ConstantInt>(I->getOperand(1)); | |||
4111 | case Instruction::GetElementPtr: | |||
4112 | return true; | |||
4113 | default: | |||
4114 | return false; | |||
4115 | } | |||
4116 | } | |||
4117 | ||||
4118 | /// Check whether or not \p Val is a legal instruction for \p TLI. | |||
4119 | /// \note \p Val is assumed to be the product of some type promotion. | |||
4120 | /// Therefore if \p Val has an undefined state in \p TLI, this is assumed | |||
4121 | /// to be legal, as the non-promoted value would have had the same state. | |||
4122 | static bool isPromotedInstructionLegal(const TargetLowering &TLI, | |||
4123 | const DataLayout &DL, Value *Val) { | |||
4124 | Instruction *PromotedInst = dyn_cast<Instruction>(Val); | |||
4125 | if (!PromotedInst) | |||
4126 | return false; | |||
4127 | int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode()); | |||
4128 | // If the ISDOpcode is undefined, it was undefined before the promotion. | |||
4129 | if (!ISDOpcode) | |||
4130 | return true; | |||
4131 | // Otherwise, check if the promoted instruction is legal or not. | |||
4132 | return TLI.isOperationLegalOrCustom( | |||
4133 | ISDOpcode, TLI.getValueType(DL, PromotedInst->getType())); | |||
4134 | } | |||
4135 | ||||
4136 | namespace { | |||
4137 | ||||
4138 | /// Hepler class to perform type promotion. | |||
4139 | class TypePromotionHelper { | |||
4140 | /// Utility function to add a promoted instruction \p ExtOpnd to | |||
4141 | /// \p PromotedInsts and record the type of extension we have seen. | |||
4142 | static void addPromotedInst(InstrToOrigTy &PromotedInsts, | |||
4143 | Instruction *ExtOpnd, bool IsSExt) { | |||
4144 | ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension; | |||
4145 | InstrToOrigTy::iterator It = PromotedInsts.find(ExtOpnd); | |||
4146 | if (It != PromotedInsts.end()) { | |||
4147 | // If the new extension is same as original, the information in | |||
4148 | // PromotedInsts[ExtOpnd] is still correct. | |||
4149 | if (It->second.getInt() == ExtTy) | |||
4150 | return; | |||
4151 | ||||
4152 | // Now the new extension is different from old extension, we make | |||
4153 | // the type information invalid by setting extension type to | |||
4154 | // BothExtension. | |||
4155 | ExtTy = BothExtension; | |||
4156 | } | |||
4157 | PromotedInsts[ExtOpnd] = TypeIsSExt(ExtOpnd->getType(), ExtTy); | |||
4158 | } | |||
4159 | ||||
4160 | /// Utility function to query the original type of instruction \p Opnd | |||
4161 | /// with a matched extension type. If the extension doesn't match, we | |||
4162 | /// cannot use the information we had on the original type. | |||
4163 | /// BothExtension doesn't match any extension type. | |||
4164 | static const Type *getOrigType(const InstrToOrigTy &PromotedInsts, | |||
4165 | Instruction *Opnd, bool IsSExt) { | |||
4166 | ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension; | |||
4167 | InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd); | |||
4168 | if (It != PromotedInsts.end() && It->second.getInt() == ExtTy) | |||
4169 | return It->second.getPointer(); | |||
4170 | return nullptr; | |||
4171 | } | |||
4172 | ||||
4173 | /// Utility function to check whether or not a sign or zero extension | |||
4174 | /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by | |||
4175 | /// either using the operands of \p Inst or promoting \p Inst. | |||
4176 | /// The type of the extension is defined by \p IsSExt. | |||
4177 | /// In other words, check if: | |||
4178 | /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType. | |||
4179 | /// #1 Promotion applies: | |||
4180 | /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...). | |||
4181 | /// #2 Operand reuses: | |||
4182 | /// ext opnd1 to ConsideredExtType. | |||
4183 | /// \p PromotedInsts maps the instructions to their type before promotion. | |||
4184 | static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType, | |||
4185 | const InstrToOrigTy &PromotedInsts, bool IsSExt); | |||
4186 | ||||
4187 | /// Utility function to determine if \p OpIdx should be promoted when | |||
4188 | /// promoting \p Inst. | |||
4189 | static bool shouldExtOperand(const Instruction *Inst, int OpIdx) { | |||
4190 | return !(isa<SelectInst>(Inst) && OpIdx == 0); | |||
4191 | } | |||
4192 | ||||
4193 | /// Utility function to promote the operand of \p Ext when this | |||
4194 | /// operand is a promotable trunc or sext or zext. | |||
4195 | /// \p PromotedInsts maps the instructions to their type before promotion. | |||
4196 | /// \p CreatedInstsCost[out] contains the cost of all instructions | |||
4197 | /// created to promote the operand of Ext. | |||
4198 | /// Newly added extensions are inserted in \p Exts. | |||
4199 | /// Newly added truncates are inserted in \p Truncs. | |||
4200 | /// Should never be called directly. | |||
4201 | /// \return The promoted value which is used instead of Ext. | |||
4202 | static Value *promoteOperandForTruncAndAnyExt( | |||
4203 | Instruction *Ext, TypePromotionTransaction &TPT, | |||
4204 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, | |||
4205 | SmallVectorImpl<Instruction *> *Exts, | |||
4206 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI); | |||
4207 | ||||
4208 | /// Utility function to promote the operand of \p Ext when this | |||
4209 | /// operand is promotable and is not a supported trunc or sext. | |||
4210 | /// \p PromotedInsts maps the instructions to their type before promotion. | |||
4211 | /// \p CreatedInstsCost[out] contains the cost of all the instructions | |||
4212 | /// created to promote the operand of Ext. | |||
4213 | /// Newly added extensions are inserted in \p Exts. | |||
4214 | /// Newly added truncates are inserted in \p Truncs. | |||
4215 | /// Should never be called directly. | |||
4216 | /// \return The promoted value which is used instead of Ext. | |||
4217 | static Value *promoteOperandForOther(Instruction *Ext, | |||
4218 | TypePromotionTransaction &TPT, | |||
4219 | InstrToOrigTy &PromotedInsts, | |||
4220 | unsigned &CreatedInstsCost, | |||
4221 | SmallVectorImpl<Instruction *> *Exts, | |||
4222 | SmallVectorImpl<Instruction *> *Truncs, | |||
4223 | const TargetLowering &TLI, bool IsSExt); | |||
4224 | ||||
4225 | /// \see promoteOperandForOther. | |||
4226 | static Value *signExtendOperandForOther( | |||
4227 | Instruction *Ext, TypePromotionTransaction &TPT, | |||
4228 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, | |||
4229 | SmallVectorImpl<Instruction *> *Exts, | |||
4230 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { | |||
4231 | return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, | |||
4232 | Exts, Truncs, TLI, true); | |||
4233 | } | |||
4234 | ||||
4235 | /// \see promoteOperandForOther. | |||
4236 | static Value *zeroExtendOperandForOther( | |||
4237 | Instruction *Ext, TypePromotionTransaction &TPT, | |||
4238 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, | |||
4239 | SmallVectorImpl<Instruction *> *Exts, | |||
4240 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { | |||
4241 | return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, | |||
4242 | Exts, Truncs, TLI, false); | |||
4243 | } | |||
4244 | ||||
4245 | public: | |||
4246 | /// Type for the utility function that promotes the operand of Ext. | |||
4247 | using Action = Value *(*)(Instruction *Ext, TypePromotionTransaction &TPT, | |||
4248 | InstrToOrigTy &PromotedInsts, | |||
4249 | unsigned &CreatedInstsCost, | |||
4250 | SmallVectorImpl<Instruction *> *Exts, | |||
4251 | SmallVectorImpl<Instruction *> *Truncs, | |||
4252 | const TargetLowering &TLI); | |||
4253 | ||||
4254 | /// Given a sign/zero extend instruction \p Ext, return the appropriate | |||
4255 | /// action to promote the operand of \p Ext instead of using Ext. | |||
4256 | /// \return NULL if no promotable action is possible with the current | |||
4257 | /// sign extension. | |||
4258 | /// \p InsertedInsts keeps track of all the instructions inserted by the | |||
4259 | /// other CodeGenPrepare optimizations. This information is important | |||
4260 | /// because we do not want to promote these instructions as CodeGenPrepare | |||
4261 | /// will reinsert them later. Thus creating an infinite loop: create/remove. | |||
4262 | /// \p PromotedInsts maps the instructions to their type before promotion. | |||
4263 | static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts, | |||
4264 | const TargetLowering &TLI, | |||
4265 | const InstrToOrigTy &PromotedInsts); | |||
4266 | }; | |||
4267 | ||||
4268 | } // end anonymous namespace | |||
4269 | ||||
4270 | bool TypePromotionHelper::canGetThrough(const Instruction *Inst, | |||
4271 | Type *ConsideredExtType, | |||
4272 | const InstrToOrigTy &PromotedInsts, | |||
4273 | bool IsSExt) { | |||
4274 | // The promotion helper does not know how to deal with vector types yet. | |||
4275 | // To be able to fix that, we would need to fix the places where we | |||
4276 | // statically extend, e.g., constants and such. | |||
4277 | if (Inst->getType()->isVectorTy()) | |||
4278 | return false; | |||
4279 | ||||
4280 | // We can always get through zext. | |||
4281 | if (isa<ZExtInst>(Inst)) | |||
4282 | return true; | |||
4283 | ||||
4284 | // sext(sext) is ok too. | |||
4285 | if (IsSExt && isa<SExtInst>(Inst)) | |||
4286 | return true; | |||
4287 | ||||
4288 | // We can get through binary operator, if it is legal. In other words, the | |||
4289 | // binary operator must have a nuw or nsw flag. | |||
4290 | if (const auto *BinOp = dyn_cast<BinaryOperator>(Inst)) | |||
4291 | if (isa<OverflowingBinaryOperator>(BinOp) && | |||
4292 | ((!IsSExt && BinOp->hasNoUnsignedWrap()) || | |||
4293 | (IsSExt && BinOp->hasNoSignedWrap()))) | |||
4294 | return true; | |||
4295 | ||||
4296 | // ext(and(opnd, cst)) --> and(ext(opnd), ext(cst)) | |||
4297 | if ((Inst->getOpcode() == Instruction::And || | |||
4298 | Inst->getOpcode() == Instruction::Or)) | |||
4299 | return true; | |||
4300 | ||||
4301 | // ext(xor(opnd, cst)) --> xor(ext(opnd), ext(cst)) | |||
4302 | if (Inst->getOpcode() == Instruction::Xor) { | |||
4303 | // Make sure it is not a NOT. | |||
4304 | if (const auto *Cst = dyn_cast<ConstantInt>(Inst->getOperand(1))) | |||
4305 | if (!Cst->getValue().isAllOnes()) | |||
4306 | return true; | |||
4307 | } | |||
4308 | ||||
4309 | // zext(shrl(opnd, cst)) --> shrl(zext(opnd), zext(cst)) | |||
4310 | // It may change a poisoned value into a regular value, like | |||
4311 | // zext i32 (shrl i8 %val, 12) --> shrl i32 (zext i8 %val), 12 | |||
4312 | // poisoned value regular value | |||
4313 | // It should be OK since undef covers valid value. | |||
4314 | if (Inst->getOpcode() == Instruction::LShr && !IsSExt) | |||
4315 | return true; | |||
4316 | ||||
4317 | // and(ext(shl(opnd, cst)), cst) --> and(shl(ext(opnd), ext(cst)), cst) | |||
4318 | // It may change a poisoned value into a regular value, like | |||
4319 | // zext i32 (shl i8 %val, 12) --> shl i32 (zext i8 %val), 12 | |||
4320 | // poisoned value regular value | |||
4321 | // It should be OK since undef covers valid value. | |||
4322 | if (Inst->getOpcode() == Instruction::Shl && Inst->hasOneUse()) { | |||
4323 | const auto *ExtInst = cast<const Instruction>(*Inst->user_begin()); | |||
4324 | if (ExtInst->hasOneUse()) { | |||
4325 | const auto *AndInst = dyn_cast<const Instruction>(*ExtInst->user_begin()); | |||
4326 | if (AndInst && AndInst->getOpcode() == Instruction::And) { | |||
4327 | const auto *Cst = dyn_cast<ConstantInt>(AndInst->getOperand(1)); | |||
4328 | if (Cst && | |||
4329 | Cst->getValue().isIntN(Inst->getType()->getIntegerBitWidth())) | |||
4330 | return true; | |||
4331 | } | |||
4332 | } | |||
4333 | } | |||
4334 | ||||
4335 | // Check if we can do the following simplification. | |||
4336 | // ext(trunc(opnd)) --> ext(opnd) | |||
4337 | if (!isa<TruncInst>(Inst)) | |||
4338 | return false; | |||
4339 | ||||
4340 | Value *OpndVal = Inst->getOperand(0); | |||
4341 | // Check if we can use this operand in the extension. | |||
4342 | // If the type is larger than the result type of the extension, we cannot. | |||
4343 | if (!OpndVal->getType()->isIntegerTy() || | |||
4344 | OpndVal->getType()->getIntegerBitWidth() > | |||
4345 | ConsideredExtType->getIntegerBitWidth()) | |||
4346 | return false; | |||
4347 | ||||
4348 | // If the operand of the truncate is not an instruction, we will not have | |||
4349 | // any information on the dropped bits. | |||
4350 | // (Actually we could for constant but it is not worth the extra logic). | |||
4351 | Instruction *Opnd = dyn_cast<Instruction>(OpndVal); | |||
4352 | if (!Opnd) | |||
4353 | return false; | |||
4354 | ||||
4355 | // Check if the source of the type is narrow enough. | |||
4356 | // I.e., check that trunc just drops extended bits of the same kind of | |||
4357 | // the extension. | |||
4358 | // #1 get the type of the operand and check the kind of the extended bits. | |||
4359 | const Type *OpndType = getOrigType(PromotedInsts, Opnd, IsSExt); | |||
4360 | if (OpndType) | |||
4361 | ; | |||
4362 | else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd))) | |||
4363 | OpndType = Opnd->getOperand(0)->getType(); | |||
4364 | else | |||
4365 | return false; | |||
4366 | ||||
4367 | // #2 check that the truncate just drops extended bits. | |||
4368 | return Inst->getType()->getIntegerBitWidth() >= | |||
4369 | OpndType->getIntegerBitWidth(); | |||
4370 | } | |||
4371 | ||||
4372 | TypePromotionHelper::Action TypePromotionHelper::getAction( | |||
4373 | Instruction *Ext, const SetOfInstrs &InsertedInsts, | |||
4374 | const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) { | |||
4375 | assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&(static_cast <bool> ((isa<SExtInst>(Ext) || isa< ZExtInst>(Ext)) && "Unexpected instruction type") ? void (0) : __assert_fail ("(isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && \"Unexpected instruction type\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 4376, __extension__ __PRETTY_FUNCTION__ )) | |||
4376 | "Unexpected instruction type")(static_cast <bool> ((isa<SExtInst>(Ext) || isa< ZExtInst>(Ext)) && "Unexpected instruction type") ? void (0) : __assert_fail ("(isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && \"Unexpected instruction type\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 4376, __extension__ __PRETTY_FUNCTION__ )); | |||
4377 | Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0)); | |||
4378 | Type *ExtTy = Ext->getType(); | |||
4379 | bool IsSExt = isa<SExtInst>(Ext); | |||
4380 | // If the operand of the extension is not an instruction, we cannot | |||
4381 | // get through. | |||
4382 | // If it, check we can get through. | |||
4383 | if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt)) | |||
4384 | return nullptr; | |||
4385 | ||||
4386 | // Do not promote if the operand has been added by codegenprepare. | |||
4387 | // Otherwise, it means we are undoing an optimization that is likely to be | |||
4388 | // redone, thus causing potential infinite loop. | |||
4389 | if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd)) | |||
4390 | return nullptr; | |||
4391 | ||||
4392 | // SExt or Trunc instructions. | |||
4393 | // Return the related handler. | |||
4394 | if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) || | |||
4395 | isa<ZExtInst>(ExtOpnd)) | |||
4396 | return promoteOperandForTruncAndAnyExt; | |||
4397 | ||||
4398 | // Regular instruction. | |||
4399 | // Abort early if we will have to insert non-free instructions. | |||
4400 | if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType())) | |||
4401 | return nullptr; | |||
4402 | return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther; | |||
4403 | } | |||
4404 | ||||
4405 | Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt( | |||
4406 | Instruction *SExt, TypePromotionTransaction &TPT, | |||
4407 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, | |||
4408 | SmallVectorImpl<Instruction *> *Exts, | |||
4409 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { | |||
4410 | // By construction, the operand of SExt is an instruction. Otherwise we cannot | |||
4411 | // get through it and this method should not be called. | |||
4412 | Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); | |||
4413 | Value *ExtVal = SExt; | |||
4414 | bool HasMergedNonFreeExt = false; | |||
4415 | if (isa<ZExtInst>(SExtOpnd)) { | |||
4416 | // Replace s|zext(zext(opnd)) | |||
4417 | // => zext(opnd). | |||
4418 | HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd); | |||
4419 | Value *ZExt = | |||
4420 | TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType()); | |||
4421 | TPT.replaceAllUsesWith(SExt, ZExt); | |||
4422 | TPT.eraseInstruction(SExt); | |||
4423 | ExtVal = ZExt; | |||
4424 | } else { | |||
4425 | // Replace z|sext(trunc(opnd)) or sext(sext(opnd)) | |||
4426 | // => z|sext(opnd). | |||
4427 | TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0)); | |||
4428 | } | |||
4429 | CreatedInstsCost = 0; | |||
4430 | ||||
4431 | // Remove dead code. | |||
4432 | if (SExtOpnd->use_empty()) | |||
4433 | TPT.eraseInstruction(SExtOpnd); | |||
4434 | ||||
4435 | // Check if the extension is still needed. | |||
4436 | Instruction *ExtInst = dyn_cast<Instruction>(ExtVal); | |||
4437 | if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) { | |||
4438 | if (ExtInst) { | |||
4439 | if (Exts) | |||
4440 | Exts->push_back(ExtInst); | |||
4441 | CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt; | |||
4442 | } | |||
4443 | return ExtVal; | |||
4444 | } | |||
4445 | ||||
4446 | // At this point we have: ext ty opnd to ty. | |||
4447 | // Reassign the uses of ExtInst to the opnd and remove ExtInst. | |||
4448 | Value *NextVal = ExtInst->getOperand(0); | |||
4449 | TPT.eraseInstruction(ExtInst, NextVal); | |||
4450 | return NextVal; | |||
4451 | } | |||
4452 | ||||
4453 | Value *TypePromotionHelper::promoteOperandForOther( | |||
4454 | Instruction *Ext, TypePromotionTransaction &TPT, | |||
4455 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, | |||
4456 | SmallVectorImpl<Instruction *> *Exts, | |||
4457 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI, | |||
4458 | bool IsSExt) { | |||
4459 | // By construction, the operand of Ext is an instruction. Otherwise we cannot | |||
4460 | // get through it and this method should not be called. | |||
4461 | Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0)); | |||
4462 | CreatedInstsCost = 0; | |||
4463 | if (!ExtOpnd->hasOneUse()) { | |||
4464 | // ExtOpnd will be promoted. | |||
4465 | // All its uses, but Ext, will need to use a truncated value of the | |||
4466 | // promoted version. | |||
4467 | // Create the truncate now. | |||
4468 | Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType()); | |||
4469 | if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) { | |||
4470 | // Insert it just after the definition. | |||
4471 | ITrunc->moveAfter(ExtOpnd); | |||
4472 | if (Truncs) | |||
4473 | Truncs->push_back(ITrunc); | |||
4474 | } | |||
4475 | ||||
4476 | TPT.replaceAllUsesWith(ExtOpnd, Trunc); | |||
4477 | // Restore the operand of Ext (which has been replaced by the previous call | |||
4478 | // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext. | |||
4479 | TPT.setOperand(Ext, 0, ExtOpnd); | |||
4480 | } | |||
4481 | ||||
4482 | // Get through the Instruction: | |||
4483 | // 1. Update its type. | |||
4484 | // 2. Replace the uses of Ext by Inst. | |||
4485 | // 3. Extend each operand that needs to be extended. | |||
4486 | ||||
4487 | // Remember the original type of the instruction before promotion. | |||
4488 | // This is useful to know that the high bits are sign extended bits. | |||
4489 | addPromotedInst(PromotedInsts, ExtOpnd, IsSExt); | |||
4490 | // Step #1. | |||
4491 | TPT.mutateType(ExtOpnd, Ext->getType()); | |||
4492 | // Step #2. | |||
4493 | TPT.replaceAllUsesWith(Ext, ExtOpnd); | |||
4494 | // Step #3. | |||
4495 | Instruction *ExtForOpnd = Ext; | |||
4496 | ||||
4497 | LLVM_DEBUG(dbgs() << "Propagate Ext to operands\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Propagate Ext to operands\n" ; } } while (false); | |||
4498 | for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx; | |||
4499 | ++OpIdx) { | |||
4500 | LLVM_DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Operand:\n" << * (ExtOpnd->getOperand(OpIdx)) << '\n'; } } while (false ); | |||
4501 | if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() || | |||
4502 | !shouldExtOperand(ExtOpnd, OpIdx)) { | |||
4503 | LLVM_DEBUG(dbgs() << "No need to propagate\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "No need to propagate\n" ; } } while (false); | |||
4504 | continue; | |||
4505 | } | |||
4506 | // Check if we can statically extend the operand. | |||
4507 | Value *Opnd = ExtOpnd->getOperand(OpIdx); | |||
4508 | if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) { | |||
4509 | LLVM_DEBUG(dbgs() << "Statically extend\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Statically extend\n"; } } while (false); | |||
4510 | unsigned BitWidth = Ext->getType()->getIntegerBitWidth(); | |||
4511 | APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth) | |||
4512 | : Cst->getValue().zext(BitWidth); | |||
4513 | TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal)); | |||
4514 | continue; | |||
4515 | } | |||
4516 | // UndefValue are typed, so we have to statically sign extend them. | |||
4517 | if (isa<UndefValue>(Opnd)) { | |||
4518 | LLVM_DEBUG(dbgs() << "Statically extend\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Statically extend\n"; } } while (false); | |||
4519 | TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType())); | |||
4520 | continue; | |||
4521 | } | |||
4522 | ||||
4523 | // Otherwise we have to explicitly sign extend the operand. | |||
4524 | // Check if Ext was reused to extend an operand. | |||
4525 | if (!ExtForOpnd) { | |||
4526 | // If yes, create a new one. | |||
4527 | LLVM_DEBUG(dbgs() << "More operands to ext\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "More operands to ext\n" ; } } while (false); | |||
4528 | Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType()) | |||
4529 | : TPT.createZExt(Ext, Opnd, Ext->getType()); | |||
4530 | if (!isa<Instruction>(ValForExtOpnd)) { | |||
4531 | TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd); | |||
4532 | continue; | |||
4533 | } | |||
4534 | ExtForOpnd = cast<Instruction>(ValForExtOpnd); | |||
4535 | } | |||
4536 | if (Exts) | |||
4537 | Exts->push_back(ExtForOpnd); | |||
4538 | TPT.setOperand(ExtForOpnd, 0, Opnd); | |||
4539 | ||||
4540 | // Move the sign extension before the insertion point. | |||
4541 | TPT.moveBefore(ExtForOpnd, ExtOpnd); | |||
4542 | TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd); | |||
4543 | CreatedInstsCost += !TLI.isExtFree(ExtForOpnd); | |||
4544 | // If more sext are required, new instructions will have to be created. | |||
4545 | ExtForOpnd = nullptr; | |||
4546 | } | |||
4547 | if (ExtForOpnd == Ext) { | |||
4548 | LLVM_DEBUG(dbgs() << "Extension is useless now\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Extension is useless now\n" ; } } while (false); | |||
4549 | TPT.eraseInstruction(Ext); | |||
4550 | } | |||
4551 | return ExtOpnd; | |||
4552 | } | |||
4553 | ||||
4554 | /// Check whether or not promoting an instruction to a wider type is profitable. | |||
4555 | /// \p NewCost gives the cost of extension instructions created by the | |||
4556 | /// promotion. | |||
4557 | /// \p OldCost gives the cost of extension instructions before the promotion | |||
4558 | /// plus the number of instructions that have been | |||
4559 | /// matched in the addressing mode the promotion. | |||
4560 | /// \p PromotedOperand is the value that has been promoted. | |||
4561 | /// \return True if the promotion is profitable, false otherwise. | |||
4562 | bool AddressingModeMatcher::isPromotionProfitable( | |||
4563 | unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const { | |||
4564 | LLVM_DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCostdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost << '\n'; } } while (false) | |||
4565 | << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost << '\n'; } } while (false); | |||
4566 | // The cost of the new extensions is greater than the cost of the | |||
4567 | // old extension plus what we folded. | |||
4568 | // This is not profitable. | |||
4569 | if (NewCost > OldCost) | |||
4570 | return false; | |||
4571 | if (NewCost < OldCost) | |||
4572 | return true; | |||
4573 | // The promotion is neutral but it may help folding the sign extension in | |||
4574 | // loads for instance. | |||
4575 | // Check that we did not create an illegal instruction. | |||
4576 | return isPromotedInstructionLegal(TLI, DL, PromotedOperand); | |||
4577 | } | |||
4578 | ||||
4579 | /// Given an instruction or constant expr, see if we can fold the operation | |||
4580 | /// into the addressing mode. If so, update the addressing mode and return | |||
4581 | /// true, otherwise return false without modifying AddrMode. | |||
4582 | /// If \p MovedAway is not NULL, it contains the information of whether or | |||
4583 | /// not AddrInst has to be folded into the addressing mode on success. | |||
4584 | /// If \p MovedAway == true, \p AddrInst will not be part of the addressing | |||
4585 | /// because it has been moved away. | |||
4586 | /// Thus AddrInst must not be added in the matched instructions. | |||
4587 | /// This state can happen when AddrInst is a sext, since it may be moved away. | |||
4588 | /// Therefore, AddrInst may not be valid when MovedAway is true and it must | |||
4589 | /// not be referenced anymore. | |||
4590 | bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode, | |||
4591 | unsigned Depth, | |||
4592 | bool *MovedAway) { | |||
4593 | // Avoid exponential behavior on extremely deep expression trees. | |||
4594 | if (Depth >= 5) | |||
4595 | return false; | |||
4596 | ||||
4597 | // By default, all matched instructions stay in place. | |||
4598 | if (MovedAway) | |||
4599 | *MovedAway = false; | |||
4600 | ||||
4601 | switch (Opcode) { | |||
4602 | case Instruction::PtrToInt: | |||
4603 | // PtrToInt is always a noop, as we know that the int type is pointer sized. | |||
4604 | return matchAddr(AddrInst->getOperand(0), Depth); | |||
4605 | case Instruction::IntToPtr: { | |||
4606 | auto AS = AddrInst->getType()->getPointerAddressSpace(); | |||
4607 | auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); | |||
4608 | // This inttoptr is a no-op if the integer type is pointer sized. | |||
4609 | if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy) | |||
4610 | return matchAddr(AddrInst->getOperand(0), Depth); | |||
4611 | return false; | |||
4612 | } | |||
4613 | case Instruction::BitCast: | |||
4614 | // BitCast is always a noop, and we can handle it as long as it is | |||
4615 | // int->int or pointer->pointer (we don't want int<->fp or something). | |||
4616 | if (AddrInst->getOperand(0)->getType()->isIntOrPtrTy() && | |||
4617 | // Don't touch identity bitcasts. These were probably put here by LSR, | |||
4618 | // and we don't want to mess around with them. Assume it knows what it | |||
4619 | // is doing. | |||
4620 | AddrInst->getOperand(0)->getType() != AddrInst->getType()) | |||
4621 | return matchAddr(AddrInst->getOperand(0), Depth); | |||
4622 | return false; | |||
4623 | case Instruction::AddrSpaceCast: { | |||
4624 | unsigned SrcAS = | |||
4625 | AddrInst->getOperand(0)->getType()->getPointerAddressSpace(); | |||
4626 | unsigned DestAS = AddrInst->getType()->getPointerAddressSpace(); | |||
4627 | if (TLI.getTargetMachine().isNoopAddrSpaceCast(SrcAS, DestAS)) | |||
4628 | return matchAddr(AddrInst->getOperand(0), Depth); | |||
4629 | return false; | |||
4630 | } | |||
4631 | case Instruction::Add: { | |||
4632 | // Check to see if we can merge in one operand, then the other. If so, we | |||
4633 | // win. | |||
4634 | ExtAddrMode BackupAddrMode = AddrMode; | |||
4635 | unsigned OldSize = AddrModeInsts.size(); | |||
4636 | // Start a transaction at this point. | |||
4637 | // The LHS may match but not the RHS. | |||
4638 | // Therefore, we need a higher level restoration point to undo partially | |||
4639 | // matched operation. | |||
4640 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
4641 | TPT.getRestorationPoint(); | |||
4642 | ||||
4643 | // Try to match an integer constant second to increase its chance of ending | |||
4644 | // up in `BaseOffs`, resp. decrease its chance of ending up in `BaseReg`. | |||
4645 | int First = 0, Second = 1; | |||
4646 | if (isa<ConstantInt>(AddrInst->getOperand(First)) | |||
4647 | && !isa<ConstantInt>(AddrInst->getOperand(Second))) | |||
4648 | std::swap(First, Second); | |||
4649 | AddrMode.InBounds = false; | |||
4650 | if (matchAddr(AddrInst->getOperand(First), Depth + 1) && | |||
4651 | matchAddr(AddrInst->getOperand(Second), Depth + 1)) | |||
4652 | return true; | |||
4653 | ||||
4654 | // Restore the old addr mode info. | |||
4655 | AddrMode = BackupAddrMode; | |||
4656 | AddrModeInsts.resize(OldSize); | |||
4657 | TPT.rollback(LastKnownGood); | |||
4658 | ||||
4659 | // Otherwise this was over-aggressive. Try merging operands in the opposite | |||
4660 | // order. | |||
4661 | if (matchAddr(AddrInst->getOperand(Second), Depth + 1) && | |||
4662 | matchAddr(AddrInst->getOperand(First), Depth + 1)) | |||
4663 | return true; | |||
4664 | ||||
4665 | // Otherwise we definitely can't merge the ADD in. | |||
4666 | AddrMode = BackupAddrMode; | |||
4667 | AddrModeInsts.resize(OldSize); | |||
4668 | TPT.rollback(LastKnownGood); | |||
4669 | break; | |||
4670 | } | |||
4671 | // case Instruction::Or: | |||
4672 | // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. | |||
4673 | // break; | |||
4674 | case Instruction::Mul: | |||
4675 | case Instruction::Shl: { | |||
4676 | // Can only handle X*C and X << C. | |||
4677 | AddrMode.InBounds = false; | |||
4678 | ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1)); | |||
4679 | if (!RHS || RHS->getBitWidth() > 64) | |||
4680 | return false; | |||
4681 | int64_t Scale = Opcode == Instruction::Shl | |||
4682 | ? 1LL << RHS->getLimitedValue(RHS->getBitWidth() - 1) | |||
4683 | : RHS->getSExtValue(); | |||
4684 | ||||
4685 | return matchScaledValue(AddrInst->getOperand(0), Scale, Depth); | |||
4686 | } | |||
4687 | case Instruction::GetElementPtr: { | |||
4688 | // Scan the GEP. We check it if it contains constant offsets and at most | |||
4689 | // one variable offset. | |||
4690 | int VariableOperand = -1; | |||
4691 | unsigned VariableScale = 0; | |||
4692 | ||||
4693 | int64_t ConstantOffset = 0; | |||
4694 | gep_type_iterator GTI = gep_type_begin(AddrInst); | |||
4695 | for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { | |||
4696 | if (StructType *STy = GTI.getStructTypeOrNull()) { | |||
4697 | const StructLayout *SL = DL.getStructLayout(STy); | |||
4698 | unsigned Idx = | |||
4699 | cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue(); | |||
4700 | ConstantOffset += SL->getElementOffset(Idx); | |||
4701 | } else { | |||
4702 | TypeSize TS = DL.getTypeAllocSize(GTI.getIndexedType()); | |||
4703 | if (TS.isNonZero()) { | |||
4704 | // The optimisations below currently only work for fixed offsets. | |||
4705 | if (TS.isScalable()) | |||
4706 | return false; | |||
4707 | int64_t TypeSize = TS.getFixedValue(); | |||
4708 | if (ConstantInt *CI = | |||
4709 | dyn_cast<ConstantInt>(AddrInst->getOperand(i))) { | |||
4710 | const APInt &CVal = CI->getValue(); | |||
4711 | if (CVal.getSignificantBits() <= 64) { | |||
4712 | ConstantOffset += CVal.getSExtValue() * TypeSize; | |||
4713 | continue; | |||
4714 | } | |||
4715 | } | |||
4716 | // We only allow one variable index at the moment. | |||
4717 | if (VariableOperand != -1) | |||
4718 | return false; | |||
4719 | ||||
4720 | // Remember the variable index. | |||
4721 | VariableOperand = i; | |||
4722 | VariableScale = TypeSize; | |||
4723 | } | |||
4724 | } | |||
4725 | } | |||
4726 | ||||
4727 | // A common case is for the GEP to only do a constant offset. In this case, | |||
4728 | // just add it to the disp field and check validity. | |||
4729 | if (VariableOperand == -1) { | |||
4730 | AddrMode.BaseOffs += ConstantOffset; | |||
4731 | if (matchAddr(AddrInst->getOperand(0), Depth + 1)) { | |||
4732 | if (!cast<GEPOperator>(AddrInst)->isInBounds()) | |||
4733 | AddrMode.InBounds = false; | |||
4734 | return true; | |||
4735 | } | |||
4736 | AddrMode.BaseOffs -= ConstantOffset; | |||
4737 | ||||
4738 | if (EnableGEPOffsetSplit && isa<GetElementPtrInst>(AddrInst) && | |||
4739 | TLI.shouldConsiderGEPOffsetSplit() && Depth == 0 && | |||
4740 | ConstantOffset > 0) { | |||
4741 | // Record GEPs with non-zero offsets as candidates for splitting in | |||
4742 | // the event that the offset cannot fit into the r+i addressing mode. | |||
4743 | // Simple and common case that only one GEP is used in calculating the | |||
4744 | // address for the memory access. | |||
4745 | Value *Base = AddrInst->getOperand(0); | |||
4746 | auto *BaseI = dyn_cast<Instruction>(Base); | |||
4747 | auto *GEP = cast<GetElementPtrInst>(AddrInst); | |||
4748 | if (isa<Argument>(Base) || isa<GlobalValue>(Base) || | |||
4749 | (BaseI && !isa<CastInst>(BaseI) && | |||
4750 | !isa<GetElementPtrInst>(BaseI))) { | |||
4751 | // Make sure the parent block allows inserting non-PHI instructions | |||
4752 | // before the terminator. | |||
4753 | BasicBlock *Parent = BaseI ? BaseI->getParent() | |||
4754 | : &GEP->getFunction()->getEntryBlock(); | |||
4755 | if (!Parent->getTerminator()->isEHPad()) | |||
4756 | LargeOffsetGEP = std::make_pair(GEP, ConstantOffset); | |||
4757 | } | |||
4758 | } | |||
4759 | ||||
4760 | return false; | |||
4761 | } | |||
4762 | ||||
4763 | // Save the valid addressing mode in case we can't match. | |||
4764 | ExtAddrMode BackupAddrMode = AddrMode; | |||
4765 | unsigned OldSize = AddrModeInsts.size(); | |||
4766 | ||||
4767 | // See if the scale and offset amount is valid for this target. | |||
4768 | AddrMode.BaseOffs += ConstantOffset; | |||
4769 | if (!cast<GEPOperator>(AddrInst)->isInBounds()) | |||
4770 | AddrMode.InBounds = false; | |||
4771 | ||||
4772 | // Match the base operand of the GEP. | |||
4773 | if (!matchAddr(AddrInst->getOperand(0), Depth + 1)) { | |||
4774 | // If it couldn't be matched, just stuff the value in a register. | |||
4775 | if (AddrMode.HasBaseReg) { | |||
4776 | AddrMode = BackupAddrMode; | |||
4777 | AddrModeInsts.resize(OldSize); | |||
4778 | return false; | |||
4779 | } | |||
4780 | AddrMode.HasBaseReg = true; | |||
4781 | AddrMode.BaseReg = AddrInst->getOperand(0); | |||
4782 | } | |||
4783 | ||||
4784 | // Match the remaining variable portion of the GEP. | |||
4785 | if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale, | |||
4786 | Depth)) { | |||
4787 | // If it couldn't be matched, try stuffing the base into a register | |||
4788 | // instead of matching it, and retrying the match of the scale. | |||
4789 | AddrMode = BackupAddrMode; | |||
4790 | AddrModeInsts.resize(OldSize); | |||
4791 | if (AddrMode.HasBaseReg) | |||
4792 | return false; | |||
4793 | AddrMode.HasBaseReg = true; | |||
4794 | AddrMode.BaseReg = AddrInst->getOperand(0); | |||
4795 | AddrMode.BaseOffs += ConstantOffset; | |||
4796 | if (!matchScaledValue(AddrInst->getOperand(VariableOperand), | |||
4797 | VariableScale, Depth)) { | |||
4798 | // If even that didn't work, bail. | |||
4799 | AddrMode = BackupAddrMode; | |||
4800 | AddrModeInsts.resize(OldSize); | |||
4801 | return false; | |||
4802 | } | |||
4803 | } | |||
4804 | ||||
4805 | return true; | |||
4806 | } | |||
4807 | case Instruction::SExt: | |||
4808 | case Instruction::ZExt: { | |||
4809 | Instruction *Ext = dyn_cast<Instruction>(AddrInst); | |||
4810 | if (!Ext) | |||
4811 | return false; | |||
4812 | ||||
4813 | // Try to move this ext out of the way of the addressing mode. | |||
4814 | // Ask for a method for doing so. | |||
4815 | TypePromotionHelper::Action TPH = | |||
4816 | TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts); | |||
4817 | if (!TPH) | |||
4818 | return false; | |||
4819 | ||||
4820 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
4821 | TPT.getRestorationPoint(); | |||
4822 | unsigned CreatedInstsCost = 0; | |||
4823 | unsigned ExtCost = !TLI.isExtFree(Ext); | |||
4824 | Value *PromotedOperand = | |||
4825 | TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI); | |||
4826 | // SExt has been moved away. | |||
4827 | // Thus either it will be rematched later in the recursive calls or it is | |||
4828 | // gone. Anyway, we must not fold it into the addressing mode at this point. | |||
4829 | // E.g., | |||
4830 | // op = add opnd, 1 | |||
4831 | // idx = ext op | |||
4832 | // addr = gep base, idx | |||
4833 | // is now: | |||
4834 | // promotedOpnd = ext opnd <- no match here | |||
4835 | // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls) | |||
4836 | // addr = gep base, op <- match | |||
4837 | if (MovedAway) | |||
4838 | *MovedAway = true; | |||
4839 | ||||
4840 | assert(PromotedOperand &&(static_cast <bool> (PromotedOperand && "TypePromotionHelper should have filtered out those cases" ) ? void (0) : __assert_fail ("PromotedOperand && \"TypePromotionHelper should have filtered out those cases\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 4841, __extension__ __PRETTY_FUNCTION__ )) | |||
4841 | "TypePromotionHelper should have filtered out those cases")(static_cast <bool> (PromotedOperand && "TypePromotionHelper should have filtered out those cases" ) ? void (0) : __assert_fail ("PromotedOperand && \"TypePromotionHelper should have filtered out those cases\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 4841, __extension__ __PRETTY_FUNCTION__ )); | |||
4842 | ||||
4843 | ExtAddrMode BackupAddrMode = AddrMode; | |||
4844 | unsigned OldSize = AddrModeInsts.size(); | |||
4845 | ||||
4846 | if (!matchAddr(PromotedOperand, Depth) || | |||
4847 | // The total of the new cost is equal to the cost of the created | |||
4848 | // instructions. | |||
4849 | // The total of the old cost is equal to the cost of the extension plus | |||
4850 | // what we have saved in the addressing mode. | |||
4851 | !isPromotionProfitable(CreatedInstsCost, | |||
4852 | ExtCost + (AddrModeInsts.size() - OldSize), | |||
4853 | PromotedOperand)) { | |||
4854 | AddrMode = BackupAddrMode; | |||
4855 | AddrModeInsts.resize(OldSize); | |||
4856 | LLVM_DEBUG(dbgs() << "Sign extension does not pay off: rollback\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Sign extension does not pay off: rollback\n" ; } } while (false); | |||
4857 | TPT.rollback(LastKnownGood); | |||
4858 | return false; | |||
4859 | } | |||
4860 | return true; | |||
4861 | } | |||
4862 | } | |||
4863 | return false; | |||
4864 | } | |||
4865 | ||||
4866 | /// If we can, try to add the value of 'Addr' into the current addressing mode. | |||
4867 | /// If Addr can't be added to AddrMode this returns false and leaves AddrMode | |||
4868 | /// unmodified. This assumes that Addr is either a pointer type or intptr_t | |||
4869 | /// for the target. | |||
4870 | /// | |||
4871 | bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) { | |||
4872 | // Start a transaction at this point that we will rollback if the matching | |||
4873 | // fails. | |||
4874 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
4875 | TPT.getRestorationPoint(); | |||
4876 | if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) { | |||
4877 | if (CI->getValue().isSignedIntN(64)) { | |||
4878 | // Fold in immediates if legal for the target. | |||
4879 | AddrMode.BaseOffs += CI->getSExtValue(); | |||
4880 | if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) | |||
4881 | return true; | |||
4882 | AddrMode.BaseOffs -= CI->getSExtValue(); | |||
4883 | } | |||
4884 | } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) { | |||
4885 | // If this is a global variable, try to fold it into the addressing mode. | |||
4886 | if (!AddrMode.BaseGV) { | |||
4887 | AddrMode.BaseGV = GV; | |||
4888 | if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) | |||
4889 | return true; | |||
4890 | AddrMode.BaseGV = nullptr; | |||
4891 | } | |||
4892 | } else if (Instruction *I = dyn_cast<Instruction>(Addr)) { | |||
4893 | ExtAddrMode BackupAddrMode = AddrMode; | |||
4894 | unsigned OldSize = AddrModeInsts.size(); | |||
4895 | ||||
4896 | // Check to see if it is possible to fold this operation. | |||
4897 | bool MovedAway = false; | |||
4898 | if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) { | |||
4899 | // This instruction may have been moved away. If so, there is nothing | |||
4900 | // to check here. | |||
4901 | if (MovedAway) | |||
4902 | return true; | |||
4903 | // Okay, it's possible to fold this. Check to see if it is actually | |||
4904 | // *profitable* to do so. We use a simple cost model to avoid increasing | |||
4905 | // register pressure too much. | |||
4906 | if (I->hasOneUse() || | |||
4907 | isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) { | |||
4908 | AddrModeInsts.push_back(I); | |||
4909 | return true; | |||
4910 | } | |||
4911 | ||||
4912 | // It isn't profitable to do this, roll back. | |||
4913 | AddrMode = BackupAddrMode; | |||
4914 | AddrModeInsts.resize(OldSize); | |||
4915 | TPT.rollback(LastKnownGood); | |||
4916 | } | |||
4917 | } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) { | |||
4918 | if (matchOperationAddr(CE, CE->getOpcode(), Depth)) | |||
4919 | return true; | |||
4920 | TPT.rollback(LastKnownGood); | |||
4921 | } else if (isa<ConstantPointerNull>(Addr)) { | |||
4922 | // Null pointer gets folded without affecting the addressing mode. | |||
4923 | return true; | |||
4924 | } | |||
4925 | ||||
4926 | // Worse case, the target should support [reg] addressing modes. :) | |||
4927 | if (!AddrMode.HasBaseReg) { | |||
4928 | AddrMode.HasBaseReg = true; | |||
4929 | AddrMode.BaseReg = Addr; | |||
4930 | // Still check for legality in case the target supports [imm] but not [i+r]. | |||
4931 | if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) | |||
4932 | return true; | |||
4933 | AddrMode.HasBaseReg = false; | |||
4934 | AddrMode.BaseReg = nullptr; | |||
4935 | } | |||
4936 | ||||
4937 | // If the base register is already taken, see if we can do [r+r]. | |||
4938 | if (AddrMode.Scale == 0) { | |||
4939 | AddrMode.Scale = 1; | |||
4940 | AddrMode.ScaledReg = Addr; | |||
4941 | if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) | |||
4942 | return true; | |||
4943 | AddrMode.Scale = 0; | |||
4944 | AddrMode.ScaledReg = nullptr; | |||
4945 | } | |||
4946 | // Couldn't match. | |||
4947 | TPT.rollback(LastKnownGood); | |||
4948 | return false; | |||
4949 | } | |||
4950 | ||||
4951 | /// Check to see if all uses of OpVal by the specified inline asm call are due | |||
4952 | /// to memory operands. If so, return true, otherwise return false. | |||
4953 | static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, | |||
4954 | const TargetLowering &TLI, | |||
4955 | const TargetRegisterInfo &TRI) { | |||
4956 | const Function *F = CI->getFunction(); | |||
4957 | TargetLowering::AsmOperandInfoVector TargetConstraints = | |||
4958 | TLI.ParseConstraints(F->getParent()->getDataLayout(), &TRI, *CI); | |||
4959 | ||||
4960 | for (TargetLowering::AsmOperandInfo &OpInfo : TargetConstraints) { | |||
4961 | // Compute the constraint code and ConstraintType to use. | |||
4962 | TLI.ComputeConstraintToUse(OpInfo, SDValue()); | |||
4963 | ||||
4964 | // If this asm operand is our Value*, and if it isn't an indirect memory | |||
4965 | // operand, we can't fold it! TODO: Also handle C_Address? | |||
4966 | if (OpInfo.CallOperandVal == OpVal && | |||
4967 | (OpInfo.ConstraintType != TargetLowering::C_Memory || | |||
4968 | !OpInfo.isIndirect)) | |||
4969 | return false; | |||
4970 | } | |||
4971 | ||||
4972 | return true; | |||
4973 | } | |||
4974 | ||||
4975 | /// Recursively walk all the uses of I until we find a memory use. | |||
4976 | /// If we find an obviously non-foldable instruction, return true. | |||
4977 | /// Add accessed addresses and types to MemoryUses. | |||
4978 | static bool FindAllMemoryUses( | |||
4979 | Instruction *I, SmallVectorImpl<std::pair<Use *, Type *>> &MemoryUses, | |||
4980 | SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI, | |||
4981 | const TargetRegisterInfo &TRI, bool OptSize, ProfileSummaryInfo *PSI, | |||
4982 | BlockFrequencyInfo *BFI, unsigned &SeenInsts) { | |||
4983 | // If we already considered this instruction, we're done. | |||
4984 | if (!ConsideredInsts.insert(I).second) | |||
4985 | return false; | |||
4986 | ||||
4987 | // If this is an obviously unfoldable instruction, bail out. | |||
4988 | if (!MightBeFoldableInst(I)) | |||
4989 | return true; | |||
4990 | ||||
4991 | // Loop over all the uses, recursively processing them. | |||
4992 | for (Use &U : I->uses()) { | |||
4993 | // Conservatively return true if we're seeing a large number or a deep chain | |||
4994 | // of users. This avoids excessive compilation times in pathological cases. | |||
4995 | if (SeenInsts++ >= MaxAddressUsersToScan) | |||
4996 | return true; | |||
4997 | ||||
4998 | Instruction *UserI = cast<Instruction>(U.getUser()); | |||
4999 | if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) { | |||
5000 | MemoryUses.push_back({&U, LI->getType()}); | |||
5001 | continue; | |||
5002 | } | |||
5003 | ||||
5004 | if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) { | |||
5005 | if (U.getOperandNo() != StoreInst::getPointerOperandIndex()) | |||
5006 | return true; // Storing addr, not into addr. | |||
5007 | MemoryUses.push_back({&U, SI->getValueOperand()->getType()}); | |||
5008 | continue; | |||
5009 | } | |||
5010 | ||||
5011 | if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) { | |||
5012 | if (U.getOperandNo() != AtomicRMWInst::getPointerOperandIndex()) | |||
5013 | return true; // Storing addr, not into addr. | |||
5014 | MemoryUses.push_back({&U, RMW->getValOperand()->getType()}); | |||
5015 | continue; | |||
5016 | } | |||
5017 | ||||
5018 | if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) { | |||
5019 | if (U.getOperandNo() != AtomicCmpXchgInst::getPointerOperandIndex()) | |||
5020 | return true; // Storing addr, not into addr. | |||
5021 | MemoryUses.push_back({&U, CmpX->getCompareOperand()->getType()}); | |||
5022 | continue; | |||
5023 | } | |||
5024 | ||||
5025 | if (CallInst *CI = dyn_cast<CallInst>(UserI)) { | |||
5026 | if (CI->hasFnAttr(Attribute::Cold)) { | |||
5027 | // If this is a cold call, we can sink the addressing calculation into | |||
5028 | // the cold path. See optimizeCallInst | |||
5029 | bool OptForSize = | |||
5030 | OptSize || llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI); | |||
5031 | if (!OptForSize) | |||
5032 | continue; | |||
5033 | } | |||
5034 | ||||
5035 | InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledOperand()); | |||
5036 | if (!IA) | |||
5037 | return true; | |||
5038 | ||||
5039 | // If this is a memory operand, we're cool, otherwise bail out. | |||
5040 | if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI)) | |||
5041 | return true; | |||
5042 | continue; | |||
5043 | } | |||
5044 | ||||
5045 | if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI, OptSize, | |||
5046 | PSI, BFI, SeenInsts)) | |||
5047 | return true; | |||
5048 | } | |||
5049 | ||||
5050 | return false; | |||
5051 | } | |||
5052 | ||||
5053 | static bool FindAllMemoryUses( | |||
5054 | Instruction *I, SmallVectorImpl<std::pair<Use *, Type *>> &MemoryUses, | |||
5055 | const TargetLowering &TLI, const TargetRegisterInfo &TRI, bool OptSize, | |||
5056 | ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) { | |||
5057 | unsigned SeenInsts = 0; | |||
5058 | SmallPtrSet<Instruction *, 16> ConsideredInsts; | |||
5059 | return FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI, OptSize, | |||
5060 | PSI, BFI, SeenInsts); | |||
5061 | } | |||
5062 | ||||
5063 | ||||
5064 | /// Return true if Val is already known to be live at the use site that we're | |||
5065 | /// folding it into. If so, there is no cost to include it in the addressing | |||
5066 | /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the | |||
5067 | /// instruction already. | |||
5068 | bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val, | |||
5069 | Value *KnownLive1, | |||
5070 | Value *KnownLive2) { | |||
5071 | // If Val is either of the known-live values, we know it is live! | |||
5072 | if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2) | |||
5073 | return true; | |||
5074 | ||||
5075 | // All values other than instructions and arguments (e.g. constants) are live. | |||
5076 | if (!isa<Instruction>(Val) && !isa<Argument>(Val)) | |||
5077 | return true; | |||
5078 | ||||
5079 | // If Val is a constant sized alloca in the entry block, it is live, this is | |||
5080 | // true because it is just a reference to the stack/frame pointer, which is | |||
5081 | // live for the whole function. | |||
5082 | if (AllocaInst *AI = dyn_cast<AllocaInst>(Val)) | |||
5083 | if (AI->isStaticAlloca()) | |||
5084 | return true; | |||
5085 | ||||
5086 | // Check to see if this value is already used in the memory instruction's | |||
5087 | // block. If so, it's already live into the block at the very least, so we | |||
5088 | // can reasonably fold it. | |||
5089 | return Val->isUsedInBasicBlock(MemoryInst->getParent()); | |||
5090 | } | |||
5091 | ||||
5092 | /// It is possible for the addressing mode of the machine to fold the specified | |||
5093 | /// instruction into a load or store that ultimately uses it. | |||
5094 | /// However, the specified instruction has multiple uses. | |||
5095 | /// Given this, it may actually increase register pressure to fold it | |||
5096 | /// into the load. For example, consider this code: | |||
5097 | /// | |||
5098 | /// X = ... | |||
5099 | /// Y = X+1 | |||
5100 | /// use(Y) -> nonload/store | |||
5101 | /// Z = Y+1 | |||
5102 | /// load Z | |||
5103 | /// | |||
5104 | /// In this case, Y has multiple uses, and can be folded into the load of Z | |||
5105 | /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to | |||
5106 | /// be live at the use(Y) line. If we don't fold Y into load Z, we use one | |||
5107 | /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the | |||
5108 | /// number of computations either. | |||
5109 | /// | |||
5110 | /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If | |||
5111 | /// X was live across 'load Z' for other reasons, we actually *would* want to | |||
5112 | /// fold the addressing mode in the Z case. This would make Y die earlier. | |||
5113 | bool AddressingModeMatcher::isProfitableToFoldIntoAddressingMode( | |||
5114 | Instruction *I, ExtAddrMode &AMBefore, ExtAddrMode &AMAfter) { | |||
5115 | if (IgnoreProfitability) | |||
5116 | return true; | |||
5117 | ||||
5118 | // AMBefore is the addressing mode before this instruction was folded into it, | |||
5119 | // and AMAfter is the addressing mode after the instruction was folded. Get | |||
5120 | // the set of registers referenced by AMAfter and subtract out those | |||
5121 | // referenced by AMBefore: this is the set of values which folding in this | |||
5122 | // address extends the lifetime of. | |||
5123 | // | |||
5124 | // Note that there are only two potential values being referenced here, | |||
5125 | // BaseReg and ScaleReg (global addresses are always available, as are any | |||
5126 | // folded immediates). | |||
5127 | Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg; | |||
5128 | ||||
5129 | // If the BaseReg or ScaledReg was referenced by the previous addrmode, their | |||
5130 | // lifetime wasn't extended by adding this instruction. | |||
5131 | if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg)) | |||
5132 | BaseReg = nullptr; | |||
5133 | if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg)) | |||
5134 | ScaledReg = nullptr; | |||
5135 | ||||
5136 | // If folding this instruction (and it's subexprs) didn't extend any live | |||
5137 | // ranges, we're ok with it. | |||
5138 | if (!BaseReg && !ScaledReg) | |||
5139 | return true; | |||
5140 | ||||
5141 | // If all uses of this instruction can have the address mode sunk into them, | |||
5142 | // we can remove the addressing mode and effectively trade one live register | |||
5143 | // for another (at worst.) In this context, folding an addressing mode into | |||
5144 | // the use is just a particularly nice way of sinking it. | |||
5145 | SmallVector<std::pair<Use *, Type *>, 16> MemoryUses; | |||
5146 | if (FindAllMemoryUses(I, MemoryUses, TLI, TRI, OptSize, PSI, BFI)) | |||
5147 | return false; // Has a non-memory, non-foldable use! | |||
5148 | ||||
5149 | // Now that we know that all uses of this instruction are part of a chain of | |||
5150 | // computation involving only operations that could theoretically be folded | |||
5151 | // into a memory use, loop over each of these memory operation uses and see | |||
5152 | // if they could *actually* fold the instruction. The assumption is that | |||
5153 | // addressing modes are cheap and that duplicating the computation involved | |||
5154 | // many times is worthwhile, even on a fastpath. For sinking candidates | |||
5155 | // (i.e. cold call sites), this serves as a way to prevent excessive code | |||
5156 | // growth since most architectures have some reasonable small and fast way to | |||
5157 | // compute an effective address. (i.e LEA on x86) | |||
5158 | SmallVector<Instruction *, 32> MatchedAddrModeInsts; | |||
5159 | for (const std::pair<Use *, Type *> &Pair : MemoryUses) { | |||
5160 | Value *Address = Pair.first->get(); | |||
5161 | Instruction *UserI = cast<Instruction>(Pair.first->getUser()); | |||
5162 | Type *AddressAccessTy = Pair.second; | |||
5163 | unsigned AS = Address->getType()->getPointerAddressSpace(); | |||
5164 | ||||
5165 | // Do a match against the root of this address, ignoring profitability. This | |||
5166 | // will tell us if the addressing mode for the memory operation will | |||
5167 | // *actually* cover the shared instruction. | |||
5168 | ExtAddrMode Result; | |||
5169 | std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr, | |||
5170 | 0); | |||
5171 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
5172 | TPT.getRestorationPoint(); | |||
5173 | AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, TRI, LI, getDTFn, | |||
5174 | AddressAccessTy, AS, UserI, Result, | |||
5175 | InsertedInsts, PromotedInsts, TPT, | |||
5176 | LargeOffsetGEP, OptSize, PSI, BFI); | |||
5177 | Matcher.IgnoreProfitability = true; | |||
5178 | bool Success = Matcher.matchAddr(Address, 0); | |||
5179 | (void)Success; | |||
5180 | assert(Success && "Couldn't select *anything*?")(static_cast <bool> (Success && "Couldn't select *anything*?" ) ? void (0) : __assert_fail ("Success && \"Couldn't select *anything*?\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 5180, __extension__ __PRETTY_FUNCTION__ )); | |||
5181 | ||||
5182 | // The match was to check the profitability, the changes made are not | |||
5183 | // part of the original matcher. Therefore, they should be dropped | |||
5184 | // otherwise the original matcher will not present the right state. | |||
5185 | TPT.rollback(LastKnownGood); | |||
5186 | ||||
5187 | // If the match didn't cover I, then it won't be shared by it. | |||
5188 | if (!is_contained(MatchedAddrModeInsts, I)) | |||
5189 | return false; | |||
5190 | ||||
5191 | MatchedAddrModeInsts.clear(); | |||
5192 | } | |||
5193 | ||||
5194 | return true; | |||
5195 | } | |||
5196 | ||||
5197 | /// Return true if the specified values are defined in a | |||
5198 | /// different basic block than BB. | |||
5199 | static bool IsNonLocalValue(Value *V, BasicBlock *BB) { | |||
5200 | if (Instruction *I = dyn_cast<Instruction>(V)) | |||
5201 | return I->getParent() != BB; | |||
5202 | return false; | |||
5203 | } | |||
5204 | ||||
5205 | /// Sink addressing mode computation immediate before MemoryInst if doing so | |||
5206 | /// can be done without increasing register pressure. The need for the | |||
5207 | /// register pressure constraint means this can end up being an all or nothing | |||
5208 | /// decision for all uses of the same addressing computation. | |||
5209 | /// | |||
5210 | /// Load and Store Instructions often have addressing modes that can do | |||
5211 | /// significant amounts of computation. As such, instruction selection will try | |||
5212 | /// to get the load or store to do as much computation as possible for the | |||
5213 | /// program. The problem is that isel can only see within a single block. As | |||
5214 | /// such, we sink as much legal addressing mode work into the block as possible. | |||
5215 | /// | |||
5216 | /// This method is used to optimize both load/store and inline asms with memory | |||
5217 | /// operands. It's also used to sink addressing computations feeding into cold | |||
5218 | /// call sites into their (cold) basic block. | |||
5219 | /// | |||
5220 | /// The motivation for handling sinking into cold blocks is that doing so can | |||
5221 | /// both enable other address mode sinking (by satisfying the register pressure | |||
5222 | /// constraint above), and reduce register pressure globally (by removing the | |||
5223 | /// addressing mode computation from the fast path entirely.). | |||
5224 | bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, | |||
5225 | Type *AccessTy, unsigned AddrSpace) { | |||
5226 | Value *Repl = Addr; | |||
5227 | ||||
5228 | // Try to collapse single-value PHI nodes. This is necessary to undo | |||
5229 | // unprofitable PRE transformations. | |||
5230 | SmallVector<Value *, 8> worklist; | |||
5231 | SmallPtrSet<Value *, 16> Visited; | |||
5232 | worklist.push_back(Addr); | |||
5233 | ||||
5234 | // Use a worklist to iteratively look through PHI and select nodes, and | |||
5235 | // ensure that the addressing mode obtained from the non-PHI/select roots of | |||
5236 | // the graph are compatible. | |||
5237 | bool PhiOrSelectSeen = false; | |||
5238 | SmallVector<Instruction *, 16> AddrModeInsts; | |||
5239 | const SimplifyQuery SQ(*DL, TLInfo); | |||
5240 | AddressingModeCombiner AddrModes(SQ, Addr); | |||
5241 | TypePromotionTransaction TPT(RemovedInsts); | |||
5242 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
5243 | TPT.getRestorationPoint(); | |||
5244 | while (!worklist.empty()) { | |||
5245 | Value *V = worklist.pop_back_val(); | |||
5246 | ||||
5247 | // We allow traversing cyclic Phi nodes. | |||
5248 | // In case of success after this loop we ensure that traversing through | |||
5249 | // Phi nodes ends up with all cases to compute address of the form | |||
5250 | // BaseGV + Base + Scale * Index + Offset | |||
5251 | // where Scale and Offset are constans and BaseGV, Base and Index | |||
5252 | // are exactly the same Values in all cases. | |||
5253 | // It means that BaseGV, Scale and Offset dominate our memory instruction | |||
5254 | // and have the same value as they had in address computation represented | |||
5255 | // as Phi. So we can safely sink address computation to memory instruction. | |||
5256 | if (!Visited.insert(V).second) | |||
5257 | continue; | |||
5258 | ||||
5259 | // For a PHI node, push all of its incoming values. | |||
5260 | if (PHINode *P = dyn_cast<PHINode>(V)) { | |||
5261 | append_range(worklist, P->incoming_values()); | |||
5262 | PhiOrSelectSeen = true; | |||
5263 | continue; | |||
5264 | } | |||
5265 | // Similar for select. | |||
5266 | if (SelectInst *SI = dyn_cast<SelectInst>(V)) { | |||
5267 | worklist.push_back(SI->getFalseValue()); | |||
5268 | worklist.push_back(SI->getTrueValue()); | |||
5269 | PhiOrSelectSeen = true; | |||
5270 | continue; | |||
5271 | } | |||
5272 | ||||
5273 | // For non-PHIs, determine the addressing mode being computed. Note that | |||
5274 | // the result may differ depending on what other uses our candidate | |||
5275 | // addressing instructions might have. | |||
5276 | AddrModeInsts.clear(); | |||
5277 | std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr, | |||
5278 | 0); | |||
5279 | // Defer the query (and possible computation of) the dom tree to point of | |||
5280 | // actual use. It's expected that most address matches don't actually need | |||
5281 | // the domtree. | |||
5282 | auto getDTFn = [MemoryInst, this]() -> const DominatorTree & { | |||
5283 | Function *F = MemoryInst->getParent()->getParent(); | |||
5284 | return this->getDT(*F); | |||
5285 | }; | |||
5286 | ExtAddrMode NewAddrMode = AddressingModeMatcher::Match( | |||
5287 | V, AccessTy, AddrSpace, MemoryInst, AddrModeInsts, *TLI, *LI, getDTFn, | |||
5288 | *TRI, InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP, OptSize, PSI, | |||
5289 | BFI.get()); | |||
5290 | ||||
5291 | GetElementPtrInst *GEP = LargeOffsetGEP.first; | |||
5292 | if (GEP && !NewGEPBases.count(GEP)) { | |||
5293 | // If splitting the underlying data structure can reduce the offset of a | |||
5294 | // GEP, collect the GEP. Skip the GEPs that are the new bases of | |||
5295 | // previously split data structures. | |||
5296 | LargeOffsetGEPMap[GEP->getPointerOperand()].push_back(LargeOffsetGEP); | |||
5297 | LargeOffsetGEPID.insert(std::make_pair(GEP, LargeOffsetGEPID.size())); | |||
5298 | } | |||
5299 | ||||
5300 | NewAddrMode.OriginalValue = V; | |||
5301 | if (!AddrModes.addNewAddrMode(NewAddrMode)) | |||
5302 | break; | |||
5303 | } | |||
5304 | ||||
5305 | // Try to combine the AddrModes we've collected. If we couldn't collect any, | |||
5306 | // or we have multiple but either couldn't combine them or combining them | |||
5307 | // wouldn't do anything useful, bail out now. | |||
5308 | if (!AddrModes.combineAddrModes()) { | |||
5309 | TPT.rollback(LastKnownGood); | |||
5310 | return false; | |||
5311 | } | |||
5312 | bool Modified = TPT.commit(); | |||
5313 | ||||
5314 | // Get the combined AddrMode (or the only AddrMode, if we only had one). | |||
5315 | ExtAddrMode AddrMode = AddrModes.getAddrMode(); | |||
5316 | ||||
5317 | // If all the instructions matched are already in this BB, don't do anything. | |||
5318 | // If we saw a Phi node then it is not local definitely, and if we saw a | |||
5319 | // select then we want to push the address calculation past it even if it's | |||
5320 | // already in this BB. | |||
5321 | if (!PhiOrSelectSeen && none_of(AddrModeInsts, [&](Value *V) { | |||
5322 | return IsNonLocalValue(V, MemoryInst->getParent()); | |||
5323 | })) { | |||
5324 | LLVM_DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrModedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n"; } } while (false) | |||
5325 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n"; } } while (false); | |||
5326 | return Modified; | |||
5327 | } | |||
5328 | ||||
5329 | // Insert this computation right after this user. Since our caller is | |||
5330 | // scanning from the top of the BB to the bottom, reuse of the expr are | |||
5331 | // guaranteed to happen later. | |||
5332 | IRBuilder<> Builder(MemoryInst); | |||
5333 | ||||
5334 | // Now that we determined the addressing expression we want to use and know | |||
5335 | // that we have to sink it into this block. Check to see if we have already | |||
5336 | // done this for some other load/store instr in this block. If so, reuse | |||
5337 | // the computation. Before attempting reuse, check if the address is valid | |||
5338 | // as it may have been erased. | |||
5339 | ||||
5340 | WeakTrackingVH SunkAddrVH = SunkAddrs[Addr]; | |||
5341 | ||||
5342 | Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr; | |||
5343 | Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); | |||
5344 | if (SunkAddr) { | |||
5345 | LLVM_DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrModedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (false) | |||
5346 | << " for " << *MemoryInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (false); | |||
5347 | if (SunkAddr->getType() != Addr->getType()) { | |||
5348 | if (SunkAddr->getType()->getPointerAddressSpace() != | |||
5349 | Addr->getType()->getPointerAddressSpace() && | |||
5350 | !DL->isNonIntegralPointerType(Addr->getType())) { | |||
5351 | // There are two reasons the address spaces might not match: a no-op | |||
5352 | // addrspacecast, or a ptrtoint/inttoptr pair. Either way, we emit a | |||
5353 | // ptrtoint/inttoptr pair to ensure we match the original semantics. | |||
5354 | // TODO: allow bitcast between different address space pointers with the | |||
5355 | // same size. | |||
5356 | SunkAddr = Builder.CreatePtrToInt(SunkAddr, IntPtrTy, "sunkaddr"); | |||
5357 | SunkAddr = | |||
5358 | Builder.CreateIntToPtr(SunkAddr, Addr->getType(), "sunkaddr"); | |||
5359 | } else | |||
5360 | SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); | |||
5361 | } | |||
5362 | } else if (AddrSinkUsingGEPs || (!AddrSinkUsingGEPs.getNumOccurrences() && | |||
5363 | SubtargetInfo->addrSinkUsingGEPs())) { | |||
5364 | // By default, we use the GEP-based method when AA is used later. This | |||
5365 | // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities. | |||
5366 | LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrModedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (false) | |||
5367 | << " for " << *MemoryInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (false); | |||
5368 | Value *ResultPtr = nullptr, *ResultIndex = nullptr; | |||
5369 | ||||
5370 | // First, find the pointer. | |||
5371 | if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) { | |||
5372 | ResultPtr = AddrMode.BaseReg; | |||
5373 | AddrMode.BaseReg = nullptr; | |||
5374 | } | |||
5375 | ||||
5376 | if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) { | |||
5377 | // We can't add more than one pointer together, nor can we scale a | |||
5378 | // pointer (both of which seem meaningless). | |||
5379 | if (ResultPtr || AddrMode.Scale != 1) | |||
5380 | return Modified; | |||
5381 | ||||
5382 | ResultPtr = AddrMode.ScaledReg; | |||
5383 | AddrMode.Scale = 0; | |||
5384 | } | |||
5385 | ||||
5386 | // It is only safe to sign extend the BaseReg if we know that the math | |||
5387 | // required to create it did not overflow before we extend it. Since | |||
5388 | // the original IR value was tossed in favor of a constant back when | |||
5389 | // the AddrMode was created we need to bail out gracefully if widths | |||
5390 | // do not match instead of extending it. | |||
5391 | // | |||
5392 | // (See below for code to add the scale.) | |||
5393 | if (AddrMode.Scale) { | |||
5394 | Type *ScaledRegTy = AddrMode.ScaledReg->getType(); | |||
5395 | if (cast<IntegerType>(IntPtrTy)->getBitWidth() > | |||
5396 | cast<IntegerType>(ScaledRegTy)->getBitWidth()) | |||
5397 | return Modified; | |||
5398 | } | |||
5399 | ||||
5400 | if (AddrMode.BaseGV) { | |||
5401 | if (ResultPtr) | |||
5402 | return Modified; | |||
5403 | ||||
5404 | ResultPtr = AddrMode.BaseGV; | |||
5405 | } | |||
5406 | ||||
5407 | // If the real base value actually came from an inttoptr, then the matcher | |||
5408 | // will look through it and provide only the integer value. In that case, | |||
5409 | // use it here. | |||
5410 | if (!DL->isNonIntegralPointerType(Addr->getType())) { | |||
5411 | if (!ResultPtr && AddrMode.BaseReg) { | |||
5412 | ResultPtr = Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), | |||
5413 | "sunkaddr"); | |||
5414 | AddrMode.BaseReg = nullptr; | |||
5415 | } else if (!ResultPtr && AddrMode.Scale == 1) { | |||
5416 | ResultPtr = Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), | |||
5417 | "sunkaddr"); | |||
5418 | AddrMode.Scale = 0; | |||
5419 | } | |||
5420 | } | |||
5421 | ||||
5422 | if (!ResultPtr && !AddrMode.BaseReg && !AddrMode.Scale && | |||
5423 | !AddrMode.BaseOffs) { | |||
5424 | SunkAddr = Constant::getNullValue(Addr->getType()); | |||
5425 | } else if (!ResultPtr) { | |||
5426 | return Modified; | |||
5427 | } else { | |||
5428 | Type *I8PtrTy = | |||
5429 | Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace()); | |||
5430 | Type *I8Ty = Builder.getInt8Ty(); | |||
5431 | ||||
5432 | // Start with the base register. Do this first so that subsequent address | |||
5433 | // matching finds it last, which will prevent it from trying to match it | |||
5434 | // as the scaled value in case it happens to be a mul. That would be | |||
5435 | // problematic if we've sunk a different mul for the scale, because then | |||
5436 | // we'd end up sinking both muls. | |||
5437 | if (AddrMode.BaseReg) { | |||
5438 | Value *V = AddrMode.BaseReg; | |||
5439 | if (V->getType() != IntPtrTy) | |||
5440 | V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); | |||
5441 | ||||
5442 | ResultIndex = V; | |||
5443 | } | |||
5444 | ||||
5445 | // Add the scale value. | |||
5446 | if (AddrMode.Scale) { | |||
5447 | Value *V = AddrMode.ScaledReg; | |||
5448 | if (V->getType() == IntPtrTy) { | |||
5449 | // done. | |||
5450 | } else { | |||
5451 | assert(cast<IntegerType>(IntPtrTy)->getBitWidth() <(static_cast <bool> (cast<IntegerType>(IntPtrTy)-> getBitWidth() < cast<IntegerType>(V->getType())-> getBitWidth() && "We can't transform if ScaledReg is too narrow" ) ? void (0) : __assert_fail ("cast<IntegerType>(IntPtrTy)->getBitWidth() < cast<IntegerType>(V->getType())->getBitWidth() && \"We can't transform if ScaledReg is too narrow\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 5453, __extension__ __PRETTY_FUNCTION__ )) | |||
5452 | cast<IntegerType>(V->getType())->getBitWidth() &&(static_cast <bool> (cast<IntegerType>(IntPtrTy)-> getBitWidth() < cast<IntegerType>(V->getType())-> getBitWidth() && "We can't transform if ScaledReg is too narrow" ) ? void (0) : __assert_fail ("cast<IntegerType>(IntPtrTy)->getBitWidth() < cast<IntegerType>(V->getType())->getBitWidth() && \"We can't transform if ScaledReg is too narrow\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 5453, __extension__ __PRETTY_FUNCTION__ )) | |||
5453 | "We can't transform if ScaledReg is too narrow")(static_cast <bool> (cast<IntegerType>(IntPtrTy)-> getBitWidth() < cast<IntegerType>(V->getType())-> getBitWidth() && "We can't transform if ScaledReg is too narrow" ) ? void (0) : __assert_fail ("cast<IntegerType>(IntPtrTy)->getBitWidth() < cast<IntegerType>(V->getType())->getBitWidth() && \"We can't transform if ScaledReg is too narrow\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 5453, __extension__ __PRETTY_FUNCTION__ )); | |||
5454 | V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); | |||
5455 | } | |||
5456 | ||||
5457 | if (AddrMode.Scale != 1) | |||
5458 | V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), | |||
5459 | "sunkaddr"); | |||
5460 | if (ResultIndex) | |||
5461 | ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr"); | |||
5462 | else | |||
5463 | ResultIndex = V; | |||
5464 | } | |||
5465 | ||||
5466 | // Add in the Base Offset if present. | |||
5467 | if (AddrMode.BaseOffs) { | |||
5468 | Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); | |||
5469 | if (ResultIndex) { | |||
5470 | // We need to add this separately from the scale above to help with | |||
5471 | // SDAG consecutive load/store merging. | |||
5472 | if (ResultPtr->getType() != I8PtrTy) | |||
5473 | ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); | |||
5474 | ResultPtr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, | |||
5475 | "sunkaddr", AddrMode.InBounds); | |||
5476 | } | |||
5477 | ||||
5478 | ResultIndex = V; | |||
5479 | } | |||
5480 | ||||
5481 | if (!ResultIndex) { | |||
5482 | SunkAddr = ResultPtr; | |||
5483 | } else { | |||
5484 | if (ResultPtr->getType() != I8PtrTy) | |||
5485 | ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); | |||
5486 | SunkAddr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr", | |||
5487 | AddrMode.InBounds); | |||
5488 | } | |||
5489 | ||||
5490 | if (SunkAddr->getType() != Addr->getType()) { | |||
5491 | if (SunkAddr->getType()->getPointerAddressSpace() != | |||
5492 | Addr->getType()->getPointerAddressSpace() && | |||
5493 | !DL->isNonIntegralPointerType(Addr->getType())) { | |||
5494 | // There are two reasons the address spaces might not match: a no-op | |||
5495 | // addrspacecast, or a ptrtoint/inttoptr pair. Either way, we emit a | |||
5496 | // ptrtoint/inttoptr pair to ensure we match the original semantics. | |||
5497 | // TODO: allow bitcast between different address space pointers with | |||
5498 | // the same size. | |||
5499 | SunkAddr = Builder.CreatePtrToInt(SunkAddr, IntPtrTy, "sunkaddr"); | |||
5500 | SunkAddr = | |||
5501 | Builder.CreateIntToPtr(SunkAddr, Addr->getType(), "sunkaddr"); | |||
5502 | } else | |||
5503 | SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); | |||
5504 | } | |||
5505 | } | |||
5506 | } else { | |||
5507 | // We'd require a ptrtoint/inttoptr down the line, which we can't do for | |||
5508 | // non-integral pointers, so in that case bail out now. | |||
5509 | Type *BaseTy = AddrMode.BaseReg ? AddrMode.BaseReg->getType() : nullptr; | |||
5510 | Type *ScaleTy = AddrMode.Scale ? AddrMode.ScaledReg->getType() : nullptr; | |||
5511 | PointerType *BasePtrTy = dyn_cast_or_null<PointerType>(BaseTy); | |||
5512 | PointerType *ScalePtrTy = dyn_cast_or_null<PointerType>(ScaleTy); | |||
5513 | if (DL->isNonIntegralPointerType(Addr->getType()) || | |||
5514 | (BasePtrTy && DL->isNonIntegralPointerType(BasePtrTy)) || | |||
5515 | (ScalePtrTy && DL->isNonIntegralPointerType(ScalePtrTy)) || | |||
5516 | (AddrMode.BaseGV && | |||
5517 | DL->isNonIntegralPointerType(AddrMode.BaseGV->getType()))) | |||
5518 | return Modified; | |||
5519 | ||||
5520 | LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrModedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (false) | |||
5521 | << " for " << *MemoryInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (false); | |||
5522 | Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); | |||
5523 | Value *Result = nullptr; | |||
5524 | ||||
5525 | // Start with the base register. Do this first so that subsequent address | |||
5526 | // matching finds it last, which will prevent it from trying to match it | |||
5527 | // as the scaled value in case it happens to be a mul. That would be | |||
5528 | // problematic if we've sunk a different mul for the scale, because then | |||
5529 | // we'd end up sinking both muls. | |||
5530 | if (AddrMode.BaseReg) { | |||
5531 | Value *V = AddrMode.BaseReg; | |||
5532 | if (V->getType()->isPointerTy()) | |||
5533 | V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); | |||
5534 | if (V->getType() != IntPtrTy) | |||
5535 | V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); | |||
5536 | Result = V; | |||
5537 | } | |||
5538 | ||||
5539 | // Add the scale value. | |||
5540 | if (AddrMode.Scale) { | |||
5541 | Value *V = AddrMode.ScaledReg; | |||
5542 | if (V->getType() == IntPtrTy) { | |||
5543 | // done. | |||
5544 | } else if (V->getType()->isPointerTy()) { | |||
5545 | V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); | |||
5546 | } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < | |||
5547 | cast<IntegerType>(V->getType())->getBitWidth()) { | |||
5548 | V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); | |||
5549 | } else { | |||
5550 | // It is only safe to sign extend the BaseReg if we know that the math | |||
5551 | // required to create it did not overflow before we extend it. Since | |||
5552 | // the original IR value was tossed in favor of a constant back when | |||
5553 | // the AddrMode was created we need to bail out gracefully if widths | |||
5554 | // do not match instead of extending it. | |||
5555 | Instruction *I = dyn_cast_or_null<Instruction>(Result); | |||
5556 | if (I && (Result != AddrMode.BaseReg)) | |||
5557 | I->eraseFromParent(); | |||
5558 | return Modified; | |||
5559 | } | |||
5560 | if (AddrMode.Scale != 1) | |||
5561 | V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), | |||
5562 | "sunkaddr"); | |||
5563 | if (Result) | |||
5564 | Result = Builder.CreateAdd(Result, V, "sunkaddr"); | |||
5565 | else | |||
5566 | Result = V; | |||
5567 | } | |||
5568 | ||||
5569 | // Add in the BaseGV if present. | |||
5570 | if (AddrMode.BaseGV) { | |||
5571 | Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr"); | |||
5572 | if (Result) | |||
5573 | Result = Builder.CreateAdd(Result, V, "sunkaddr"); | |||
5574 | else | |||
5575 | Result = V; | |||
5576 | } | |||
5577 | ||||
5578 | // Add in the Base Offset if present. | |||
5579 | if (AddrMode.BaseOffs) { | |||
5580 | Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); | |||
5581 | if (Result) | |||
5582 | Result = Builder.CreateAdd(Result, V, "sunkaddr"); | |||
5583 | else | |||
5584 | Result = V; | |||
5585 | } | |||
5586 | ||||
5587 | if (!Result) | |||
5588 | SunkAddr = Constant::getNullValue(Addr->getType()); | |||
5589 | else | |||
5590 | SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr"); | |||
5591 | } | |||
5592 | ||||
5593 | MemoryInst->replaceUsesOfWith(Repl, SunkAddr); | |||
5594 | // Store the newly computed address into the cache. In the case we reused a | |||
5595 | // value, this should be idempotent. | |||
5596 | SunkAddrs[Addr] = WeakTrackingVH(SunkAddr); | |||
5597 | ||||
5598 | // If we have no uses, recursively delete the value and all dead instructions | |||
5599 | // using it. | |||
5600 | if (Repl->use_empty()) { | |||
5601 | resetIteratorIfInvalidatedWhileCalling(CurInstIterator->getParent(), [&]() { | |||
5602 | RecursivelyDeleteTriviallyDeadInstructions( | |||
5603 | Repl, TLInfo, nullptr, | |||
5604 | [&](Value *V) { removeAllAssertingVHReferences(V); }); | |||
5605 | }); | |||
5606 | } | |||
5607 | ++NumMemoryInsts; | |||
5608 | return true; | |||
5609 | } | |||
5610 | ||||
5611 | /// Rewrite GEP input to gather/scatter to enable SelectionDAGBuilder to find | |||
5612 | /// a uniform base to use for ISD::MGATHER/MSCATTER. SelectionDAGBuilder can | |||
5613 | /// only handle a 2 operand GEP in the same basic block or a splat constant | |||
5614 | /// vector. The 2 operands to the GEP must have a scalar pointer and a vector | |||
5615 | /// index. | |||
5616 | /// | |||
5617 | /// If the existing GEP has a vector base pointer that is splat, we can look | |||
5618 | /// through the splat to find the scalar pointer. If we can't find a scalar | |||
5619 | /// pointer there's nothing we can do. | |||
5620 | /// | |||
5621 | /// If we have a GEP with more than 2 indices where the middle indices are all | |||
5622 | /// zeroes, we can replace it with 2 GEPs where the second has 2 operands. | |||
5623 | /// | |||
5624 | /// If the final index isn't a vector or is a splat, we can emit a scalar GEP | |||
5625 | /// followed by a GEP with an all zeroes vector index. This will enable | |||
5626 | /// SelectionDAGBuilder to use the scalar GEP as the uniform base and have a | |||
5627 | /// zero index. | |||
5628 | bool CodeGenPrepare::optimizeGatherScatterInst(Instruction *MemoryInst, | |||
5629 | Value *Ptr) { | |||
5630 | Value *NewAddr; | |||
5631 | ||||
5632 | if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) { | |||
5633 | // Don't optimize GEPs that don't have indices. | |||
5634 | if (!GEP->hasIndices()) | |||
5635 | return false; | |||
5636 | ||||
5637 | // If the GEP and the gather/scatter aren't in the same BB, don't optimize. | |||
5638 | // FIXME: We should support this by sinking the GEP. | |||
5639 | if (MemoryInst->getParent() != GEP->getParent()) | |||
5640 | return false; | |||
5641 | ||||
5642 | SmallVector<Value *, 2> Ops(GEP->operands()); | |||
5643 | ||||
5644 | bool RewriteGEP = false; | |||
5645 | ||||
5646 | if (Ops[0]->getType()->isVectorTy()) { | |||
5647 | Ops[0] = getSplatValue(Ops[0]); | |||
5648 | if (!Ops[0]) | |||
5649 | return false; | |||
5650 | RewriteGEP = true; | |||
5651 | } | |||
5652 | ||||
5653 | unsigned FinalIndex = Ops.size() - 1; | |||
5654 | ||||
5655 | // Ensure all but the last index is 0. | |||
5656 | // FIXME: This isn't strictly required. All that's required is that they are | |||
5657 | // all scalars or splats. | |||
5658 | for (unsigned i = 1; i < FinalIndex; ++i) { | |||
5659 | auto *C = dyn_cast<Constant>(Ops[i]); | |||
5660 | if (!C) | |||
5661 | return false; | |||
5662 | if (isa<VectorType>(C->getType())) | |||
5663 | C = C->getSplatValue(); | |||
5664 | auto *CI = dyn_cast_or_null<ConstantInt>(C); | |||
5665 | if (!CI || !CI->isZero()) | |||
5666 | return false; | |||
5667 | // Scalarize the index if needed. | |||
5668 | Ops[i] = CI; | |||
5669 | } | |||
5670 | ||||
5671 | // Try to scalarize the final index. | |||
5672 | if (Ops[FinalIndex]->getType()->isVectorTy()) { | |||
5673 | if (Value *V = getSplatValue(Ops[FinalIndex])) { | |||
5674 | auto *C = dyn_cast<ConstantInt>(V); | |||
5675 | // Don't scalarize all zeros vector. | |||
5676 | if (!C || !C->isZero()) { | |||
5677 | Ops[FinalIndex] = V; | |||
5678 | RewriteGEP = true; | |||
5679 | } | |||
5680 | } | |||
5681 | } | |||
5682 | ||||
5683 | // If we made any changes or the we have extra operands, we need to generate | |||
5684 | // new instructions. | |||
5685 | if (!RewriteGEP && Ops.size() == 2) | |||
5686 | return false; | |||
5687 | ||||
5688 | auto NumElts = cast<VectorType>(Ptr->getType())->getElementCount(); | |||
5689 | ||||
5690 | IRBuilder<> Builder(MemoryInst); | |||
5691 | ||||
5692 | Type *SourceTy = GEP->getSourceElementType(); | |||
5693 | Type *ScalarIndexTy = DL->getIndexType(Ops[0]->getType()->getScalarType()); | |||
5694 | ||||
5695 | // If the final index isn't a vector, emit a scalar GEP containing all ops | |||
5696 | // and a vector GEP with all zeroes final index. | |||
5697 | if (!Ops[FinalIndex]->getType()->isVectorTy()) { | |||
5698 | NewAddr = Builder.CreateGEP(SourceTy, Ops[0], ArrayRef(Ops).drop_front()); | |||
5699 | auto *IndexTy = VectorType::get(ScalarIndexTy, NumElts); | |||
5700 | auto *SecondTy = GetElementPtrInst::getIndexedType( | |||
5701 | SourceTy, ArrayRef(Ops).drop_front()); | |||
5702 | NewAddr = | |||
5703 | Builder.CreateGEP(SecondTy, NewAddr, Constant::getNullValue(IndexTy)); | |||
5704 | } else { | |||
5705 | Value *Base = Ops[0]; | |||
5706 | Value *Index = Ops[FinalIndex]; | |||
5707 | ||||
5708 | // Create a scalar GEP if there are more than 2 operands. | |||
5709 | if (Ops.size() != 2) { | |||
5710 | // Replace the last index with 0. | |||
5711 | Ops[FinalIndex] = Constant::getNullValue(ScalarIndexTy); | |||
5712 | Base = Builder.CreateGEP(SourceTy, Base, ArrayRef(Ops).drop_front()); | |||
5713 | SourceTy = GetElementPtrInst::getIndexedType( | |||
5714 | SourceTy, ArrayRef(Ops).drop_front()); | |||
5715 | } | |||
5716 | ||||
5717 | // Now create the GEP with scalar pointer and vector index. | |||
5718 | NewAddr = Builder.CreateGEP(SourceTy, Base, Index); | |||
5719 | } | |||
5720 | } else if (!isa<Constant>(Ptr)) { | |||
5721 | // Not a GEP, maybe its a splat and we can create a GEP to enable | |||
5722 | // SelectionDAGBuilder to use it as a uniform base. | |||
5723 | Value *V = getSplatValue(Ptr); | |||
5724 | if (!V) | |||
5725 | return false; | |||
5726 | ||||
5727 | auto NumElts = cast<VectorType>(Ptr->getType())->getElementCount(); | |||
5728 | ||||
5729 | IRBuilder<> Builder(MemoryInst); | |||
5730 | ||||
5731 | // Emit a vector GEP with a scalar pointer and all 0s vector index. | |||
5732 | Type *ScalarIndexTy = DL->getIndexType(V->getType()->getScalarType()); | |||
5733 | auto *IndexTy = VectorType::get(ScalarIndexTy, NumElts); | |||
5734 | Type *ScalarTy; | |||
5735 | if (cast<IntrinsicInst>(MemoryInst)->getIntrinsicID() == | |||
5736 | Intrinsic::masked_gather) { | |||
5737 | ScalarTy = MemoryInst->getType()->getScalarType(); | |||
5738 | } else { | |||
5739 | assert(cast<IntrinsicInst>(MemoryInst)->getIntrinsicID() ==(static_cast <bool> (cast<IntrinsicInst>(MemoryInst )->getIntrinsicID() == Intrinsic::masked_scatter) ? void ( 0) : __assert_fail ("cast<IntrinsicInst>(MemoryInst)->getIntrinsicID() == Intrinsic::masked_scatter" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 5740, __extension__ __PRETTY_FUNCTION__ )) | |||
5740 | Intrinsic::masked_scatter)(static_cast <bool> (cast<IntrinsicInst>(MemoryInst )->getIntrinsicID() == Intrinsic::masked_scatter) ? void ( 0) : __assert_fail ("cast<IntrinsicInst>(MemoryInst)->getIntrinsicID() == Intrinsic::masked_scatter" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 5740, __extension__ __PRETTY_FUNCTION__ )); | |||
5741 | ScalarTy = MemoryInst->getOperand(0)->getType()->getScalarType(); | |||
5742 | } | |||
5743 | NewAddr = Builder.CreateGEP(ScalarTy, V, Constant::getNullValue(IndexTy)); | |||
5744 | } else { | |||
5745 | // Constant, SelectionDAGBuilder knows to check if its a splat. | |||
5746 | return false; | |||
5747 | } | |||
5748 | ||||
5749 | MemoryInst->replaceUsesOfWith(Ptr, NewAddr); | |||
5750 | ||||
5751 | // If we have no uses, recursively delete the value and all dead instructions | |||
5752 | // using it. | |||
5753 | if (Ptr->use_empty()) | |||
5754 | RecursivelyDeleteTriviallyDeadInstructions( | |||
5755 | Ptr, TLInfo, nullptr, | |||
5756 | [&](Value *V) { removeAllAssertingVHReferences(V); }); | |||
5757 | ||||
5758 | return true; | |||
5759 | } | |||
5760 | ||||
5761 | /// If there are any memory operands, use OptimizeMemoryInst to sink their | |||
5762 | /// address computing into the block when possible / profitable. | |||
5763 | bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) { | |||
5764 | bool MadeChange = false; | |||
5765 | ||||
5766 | const TargetRegisterInfo *TRI = | |||
5767 | TM->getSubtargetImpl(*CS->getFunction())->getRegisterInfo(); | |||
5768 | TargetLowering::AsmOperandInfoVector TargetConstraints = | |||
5769 | TLI->ParseConstraints(*DL, TRI, *CS); | |||
5770 | unsigned ArgNo = 0; | |||
5771 | for (TargetLowering::AsmOperandInfo &OpInfo : TargetConstraints) { | |||
5772 | // Compute the constraint code and ConstraintType to use. | |||
5773 | TLI->ComputeConstraintToUse(OpInfo, SDValue()); | |||
5774 | ||||
5775 | // TODO: Also handle C_Address? | |||
5776 | if (OpInfo.ConstraintType == TargetLowering::C_Memory && | |||
5777 | OpInfo.isIndirect) { | |||
5778 | Value *OpVal = CS->getArgOperand(ArgNo++); | |||
5779 | MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u); | |||
5780 | } else if (OpInfo.Type == InlineAsm::isInput) | |||
5781 | ArgNo++; | |||
5782 | } | |||
5783 | ||||
5784 | return MadeChange; | |||
5785 | } | |||
5786 | ||||
5787 | /// Check if all the uses of \p Val are equivalent (or free) zero or | |||
5788 | /// sign extensions. | |||
5789 | static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) { | |||
5790 | assert(!Val->use_empty() && "Input must have at least one use")(static_cast <bool> (!Val->use_empty() && "Input must have at least one use" ) ? void (0) : __assert_fail ("!Val->use_empty() && \"Input must have at least one use\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 5790, __extension__ __PRETTY_FUNCTION__ )); | |||
5791 | const Instruction *FirstUser = cast<Instruction>(*Val->user_begin()); | |||
5792 | bool IsSExt = isa<SExtInst>(FirstUser); | |||
5793 | Type *ExtTy = FirstUser->getType(); | |||
5794 | for (const User *U : Val->users()) { | |||
5795 | const Instruction *UI = cast<Instruction>(U); | |||
5796 | if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI))) | |||
5797 | return false; | |||
5798 | Type *CurTy = UI->getType(); | |||
5799 | // Same input and output types: Same instruction after CSE. | |||
5800 | if (CurTy == ExtTy) | |||
5801 | continue; | |||
5802 | ||||
5803 | // If IsSExt is true, we are in this situation: | |||
5804 | // a = Val | |||
5805 | // b = sext ty1 a to ty2 | |||
5806 | // c = sext ty1 a to ty3 | |||
5807 | // Assuming ty2 is shorter than ty3, this could be turned into: | |||
5808 | // a = Val | |||
5809 | // b = sext ty1 a to ty2 | |||
5810 | // c = sext ty2 b to ty3 | |||
5811 | // However, the last sext is not free. | |||
5812 | if (IsSExt) | |||
5813 | return false; | |||
5814 | ||||
5815 | // This is a ZExt, maybe this is free to extend from one type to another. | |||
5816 | // In that case, we would not account for a different use. | |||
5817 | Type *NarrowTy; | |||
5818 | Type *LargeTy; | |||
5819 | if (ExtTy->getScalarType()->getIntegerBitWidth() > | |||
5820 | CurTy->getScalarType()->getIntegerBitWidth()) { | |||
5821 | NarrowTy = CurTy; | |||
5822 | LargeTy = ExtTy; | |||
5823 | } else { | |||
5824 | NarrowTy = ExtTy; | |||
5825 | LargeTy = CurTy; | |||
5826 | } | |||
5827 | ||||
5828 | if (!TLI.isZExtFree(NarrowTy, LargeTy)) | |||
5829 | return false; | |||
5830 | } | |||
5831 | // All uses are the same or can be derived from one another for free. | |||
5832 | return true; | |||
5833 | } | |||
5834 | ||||
5835 | /// Try to speculatively promote extensions in \p Exts and continue | |||
5836 | /// promoting through newly promoted operands recursively as far as doing so is | |||
5837 | /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts. | |||
5838 | /// When some promotion happened, \p TPT contains the proper state to revert | |||
5839 | /// them. | |||
5840 | /// | |||
5841 | /// \return true if some promotion happened, false otherwise. | |||
5842 | bool CodeGenPrepare::tryToPromoteExts( | |||
5843 | TypePromotionTransaction &TPT, const SmallVectorImpl<Instruction *> &Exts, | |||
5844 | SmallVectorImpl<Instruction *> &ProfitablyMovedExts, | |||
5845 | unsigned CreatedInstsCost) { | |||
5846 | bool Promoted = false; | |||
5847 | ||||
5848 | // Iterate over all the extensions to try to promote them. | |||
5849 | for (auto *I : Exts) { | |||
5850 | // Early check if we directly have ext(load). | |||
5851 | if (isa<LoadInst>(I->getOperand(0))) { | |||
5852 | ProfitablyMovedExts.push_back(I); | |||
5853 | continue; | |||
5854 | } | |||
5855 | ||||
5856 | // Check whether or not we want to do any promotion. The reason we have | |||
5857 | // this check inside the for loop is to catch the case where an extension | |||
5858 | // is directly fed by a load because in such case the extension can be moved | |||
5859 | // up without any promotion on its operands. | |||
5860 | if (!TLI->enableExtLdPromotion() || DisableExtLdPromotion) | |||
5861 | return false; | |||
5862 | ||||
5863 | // Get the action to perform the promotion. | |||
5864 | TypePromotionHelper::Action TPH = | |||
5865 | TypePromotionHelper::getAction(I, InsertedInsts, *TLI, PromotedInsts); | |||
5866 | // Check if we can promote. | |||
5867 | if (!TPH) { | |||
5868 | // Save the current extension as we cannot move up through its operand. | |||
5869 | ProfitablyMovedExts.push_back(I); | |||
5870 | continue; | |||
5871 | } | |||
5872 | ||||
5873 | // Save the current state. | |||
5874 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
5875 | TPT.getRestorationPoint(); | |||
5876 | SmallVector<Instruction *, 4> NewExts; | |||
5877 | unsigned NewCreatedInstsCost = 0; | |||
5878 | unsigned ExtCost = !TLI->isExtFree(I); | |||
5879 | // Promote. | |||
5880 | Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost, | |||
5881 | &NewExts, nullptr, *TLI); | |||
5882 | assert(PromotedVal &&(static_cast <bool> (PromotedVal && "TypePromotionHelper should have filtered out those cases" ) ? void (0) : __assert_fail ("PromotedVal && \"TypePromotionHelper should have filtered out those cases\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 5883, __extension__ __PRETTY_FUNCTION__ )) | |||
5883 | "TypePromotionHelper should have filtered out those cases")(static_cast <bool> (PromotedVal && "TypePromotionHelper should have filtered out those cases" ) ? void (0) : __assert_fail ("PromotedVal && \"TypePromotionHelper should have filtered out those cases\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 5883, __extension__ __PRETTY_FUNCTION__ )); | |||
5884 | ||||
5885 | // We would be able to merge only one extension in a load. | |||
5886 | // Therefore, if we have more than 1 new extension we heuristically | |||
5887 | // cut this search path, because it means we degrade the code quality. | |||
5888 | // With exactly 2, the transformation is neutral, because we will merge | |||
5889 | // one extension but leave one. However, we optimistically keep going, | |||
5890 | // because the new extension may be removed too. | |||
5891 | long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost; | |||
5892 | // FIXME: It would be possible to propagate a negative value instead of | |||
5893 | // conservatively ceiling it to 0. | |||
5894 | TotalCreatedInstsCost = | |||
5895 | std::max((long long)0, (TotalCreatedInstsCost - ExtCost)); | |||
5896 | if (!StressExtLdPromotion && | |||
5897 | (TotalCreatedInstsCost > 1 || | |||
5898 | !isPromotedInstructionLegal(*TLI, *DL, PromotedVal))) { | |||
5899 | // This promotion is not profitable, rollback to the previous state, and | |||
5900 | // save the current extension in ProfitablyMovedExts as the latest | |||
5901 | // speculative promotion turned out to be unprofitable. | |||
5902 | TPT.rollback(LastKnownGood); | |||
5903 | ProfitablyMovedExts.push_back(I); | |||
5904 | continue; | |||
5905 | } | |||
5906 | // Continue promoting NewExts as far as doing so is profitable. | |||
5907 | SmallVector<Instruction *, 2> NewlyMovedExts; | |||
5908 | (void)tryToPromoteExts(TPT, NewExts, NewlyMovedExts, TotalCreatedInstsCost); | |||
5909 | bool NewPromoted = false; | |||
5910 | for (auto *ExtInst : NewlyMovedExts) { | |||
5911 | Instruction *MovedExt = cast<Instruction>(ExtInst); | |||
5912 | Value *ExtOperand = MovedExt->getOperand(0); | |||
5913 | // If we have reached to a load, we need this extra profitability check | |||
5914 | // as it could potentially be merged into an ext(load). | |||
5915 | if (isa<LoadInst>(ExtOperand) && | |||
5916 | !(StressExtLdPromotion || NewCreatedInstsCost <= ExtCost || | |||
5917 | (ExtOperand->hasOneUse() || hasSameExtUse(ExtOperand, *TLI)))) | |||
5918 | continue; | |||
5919 | ||||
5920 | ProfitablyMovedExts.push_back(MovedExt); | |||
5921 | NewPromoted = true; | |||
5922 | } | |||
5923 | ||||
5924 | // If none of speculative promotions for NewExts is profitable, rollback | |||
5925 | // and save the current extension (I) as the last profitable extension. | |||
5926 | if (!NewPromoted) { | |||
5927 | TPT.rollback(LastKnownGood); | |||
5928 | ProfitablyMovedExts.push_back(I); | |||
5929 | continue; | |||
5930 | } | |||
5931 | // The promotion is profitable. | |||
5932 | Promoted = true; | |||
5933 | } | |||
5934 | return Promoted; | |||
5935 | } | |||
5936 | ||||
5937 | /// Merging redundant sexts when one is dominating the other. | |||
5938 | bool CodeGenPrepare::mergeSExts(Function &F) { | |||
5939 | bool Changed = false; | |||
5940 | for (auto &Entry : ValToSExtendedUses) { | |||
5941 | SExts &Insts = Entry.second; | |||
5942 | SExts CurPts; | |||
5943 | for (Instruction *Inst : Insts) { | |||
5944 | if (RemovedInsts.count(Inst) || !isa<SExtInst>(Inst) || | |||
5945 | Inst->getOperand(0) != Entry.first) | |||
5946 | continue; | |||
5947 | bool inserted = false; | |||
5948 | for (auto &Pt : CurPts) { | |||
5949 | if (getDT(F).dominates(Inst, Pt)) { | |||
5950 | replaceAllUsesWith(Pt, Inst, FreshBBs, IsHugeFunc); | |||
5951 | RemovedInsts.insert(Pt); | |||
5952 | Pt->removeFromParent(); | |||
5953 | Pt = Inst; | |||
5954 | inserted = true; | |||
5955 | Changed = true; | |||
5956 | break; | |||
5957 | } | |||
5958 | if (!getDT(F).dominates(Pt, Inst)) | |||
5959 | // Give up if we need to merge in a common dominator as the | |||
5960 | // experiments show it is not profitable. | |||
5961 | continue; | |||
5962 | replaceAllUsesWith(Inst, Pt, FreshBBs, IsHugeFunc); | |||
5963 | RemovedInsts.insert(Inst); | |||
5964 | Inst->removeFromParent(); | |||
5965 | inserted = true; | |||
5966 | Changed = true; | |||
5967 | break; | |||
5968 | } | |||
5969 | if (!inserted) | |||
5970 | CurPts.push_back(Inst); | |||
5971 | } | |||
5972 | } | |||
5973 | return Changed; | |||
5974 | } | |||
5975 | ||||
5976 | // Splitting large data structures so that the GEPs accessing them can have | |||
5977 | // smaller offsets so that they can be sunk to the same blocks as their users. | |||
5978 | // For example, a large struct starting from %base is split into two parts | |||
5979 | // where the second part starts from %new_base. | |||
5980 | // | |||
5981 | // Before: | |||
5982 | // BB0: | |||
5983 | // %base = | |||
5984 | // | |||
5985 | // BB1: | |||
5986 | // %gep0 = gep %base, off0 | |||
5987 | // %gep1 = gep %base, off1 | |||
5988 | // %gep2 = gep %base, off2 | |||
5989 | // | |||
5990 | // BB2: | |||
5991 | // %load1 = load %gep0 | |||
5992 | // %load2 = load %gep1 | |||
5993 | // %load3 = load %gep2 | |||
5994 | // | |||
5995 | // After: | |||
5996 | // BB0: | |||
5997 | // %base = | |||
5998 | // %new_base = gep %base, off0 | |||
5999 | // | |||
6000 | // BB1: | |||
6001 | // %new_gep0 = %new_base | |||
6002 | // %new_gep1 = gep %new_base, off1 - off0 | |||
6003 | // %new_gep2 = gep %new_base, off2 - off0 | |||
6004 | // | |||
6005 | // BB2: | |||
6006 | // %load1 = load i32, i32* %new_gep0 | |||
6007 | // %load2 = load i32, i32* %new_gep1 | |||
6008 | // %load3 = load i32, i32* %new_gep2 | |||
6009 | // | |||
6010 | // %new_gep1 and %new_gep2 can be sunk to BB2 now after the splitting because | |||
6011 | // their offsets are smaller enough to fit into the addressing mode. | |||
6012 | bool CodeGenPrepare::splitLargeGEPOffsets() { | |||
6013 | bool Changed = false; | |||
6014 | for (auto &Entry : LargeOffsetGEPMap) { | |||
6015 | Value *OldBase = Entry.first; | |||
6016 | SmallVectorImpl<std::pair<AssertingVH<GetElementPtrInst>, int64_t>> | |||
6017 | &LargeOffsetGEPs = Entry.second; | |||
6018 | auto compareGEPOffset = | |||
6019 | [&](const std::pair<GetElementPtrInst *, int64_t> &LHS, | |||
6020 | const std::pair<GetElementPtrInst *, int64_t> &RHS) { | |||
6021 | if (LHS.first == RHS.first) | |||
6022 | return false; | |||
6023 | if (LHS.second != RHS.second) | |||
6024 | return LHS.second < RHS.second; | |||
6025 | return LargeOffsetGEPID[LHS.first] < LargeOffsetGEPID[RHS.first]; | |||
6026 | }; | |||
6027 | // Sorting all the GEPs of the same data structures based on the offsets. | |||
6028 | llvm::sort(LargeOffsetGEPs, compareGEPOffset); | |||
6029 | LargeOffsetGEPs.erase( | |||
6030 | std::unique(LargeOffsetGEPs.begin(), LargeOffsetGEPs.end()), | |||
6031 | LargeOffsetGEPs.end()); | |||
6032 | // Skip if all the GEPs have the same offsets. | |||
6033 | if (LargeOffsetGEPs.front().second == LargeOffsetGEPs.back().second) | |||
6034 | continue; | |||
6035 | GetElementPtrInst *BaseGEP = LargeOffsetGEPs.begin()->first; | |||
6036 | int64_t BaseOffset = LargeOffsetGEPs.begin()->second; | |||
6037 | Value *NewBaseGEP = nullptr; | |||
6038 | ||||
6039 | auto *LargeOffsetGEP = LargeOffsetGEPs.begin(); | |||
6040 | while (LargeOffsetGEP != LargeOffsetGEPs.end()) { | |||
6041 | GetElementPtrInst *GEP = LargeOffsetGEP->first; | |||
6042 | int64_t Offset = LargeOffsetGEP->second; | |||
6043 | if (Offset != BaseOffset) { | |||
6044 | TargetLowering::AddrMode AddrMode; | |||
6045 | AddrMode.HasBaseReg = true; | |||
6046 | AddrMode.BaseOffs = Offset - BaseOffset; | |||
6047 | // The result type of the GEP might not be the type of the memory | |||
6048 | // access. | |||
6049 | if (!TLI->isLegalAddressingMode(*DL, AddrMode, | |||
6050 | GEP->getResultElementType(), | |||
6051 | GEP->getAddressSpace())) { | |||
6052 | // We need to create a new base if the offset to the current base is | |||
6053 | // too large to fit into the addressing mode. So, a very large struct | |||
6054 | // may be split into several parts. | |||
6055 | BaseGEP = GEP; | |||
6056 | BaseOffset = Offset; | |||
6057 | NewBaseGEP = nullptr; | |||
6058 | } | |||
6059 | } | |||
6060 | ||||
6061 | // Generate a new GEP to replace the current one. | |||
6062 | LLVMContext &Ctx = GEP->getContext(); | |||
6063 | Type *IntPtrTy = DL->getIntPtrType(GEP->getType()); | |||
6064 | Type *I8PtrTy = | |||
6065 | Type::getInt8PtrTy(Ctx, GEP->getType()->getPointerAddressSpace()); | |||
6066 | Type *I8Ty = Type::getInt8Ty(Ctx); | |||
6067 | ||||
6068 | if (!NewBaseGEP) { | |||
6069 | // Create a new base if we don't have one yet. Find the insertion | |||
6070 | // pointer for the new base first. | |||
6071 | BasicBlock::iterator NewBaseInsertPt; | |||
6072 | BasicBlock *NewBaseInsertBB; | |||
6073 | if (auto *BaseI = dyn_cast<Instruction>(OldBase)) { | |||
6074 | // If the base of the struct is an instruction, the new base will be | |||
6075 | // inserted close to it. | |||
6076 | NewBaseInsertBB = BaseI->getParent(); | |||
6077 | if (isa<PHINode>(BaseI)) | |||
6078 | NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); | |||
6079 | else if (InvokeInst *Invoke = dyn_cast<InvokeInst>(BaseI)) { | |||
6080 | NewBaseInsertBB = | |||
6081 | SplitEdge(NewBaseInsertBB, Invoke->getNormalDest()); | |||
6082 | NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); | |||
6083 | } else | |||
6084 | NewBaseInsertPt = std::next(BaseI->getIterator()); | |||
6085 | } else { | |||
6086 | // If the current base is an argument or global value, the new base | |||
6087 | // will be inserted to the entry block. | |||
6088 | NewBaseInsertBB = &BaseGEP->getFunction()->getEntryBlock(); | |||
6089 | NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); | |||
6090 | } | |||
6091 | IRBuilder<> NewBaseBuilder(NewBaseInsertBB, NewBaseInsertPt); | |||
6092 | // Create a new base. | |||
6093 | Value *BaseIndex = ConstantInt::get(IntPtrTy, BaseOffset); | |||
6094 | NewBaseGEP = OldBase; | |||
6095 | if (NewBaseGEP->getType() != I8PtrTy) | |||
6096 | NewBaseGEP = NewBaseBuilder.CreatePointerCast(NewBaseGEP, I8PtrTy); | |||
6097 | NewBaseGEP = | |||
6098 | NewBaseBuilder.CreateGEP(I8Ty, NewBaseGEP, BaseIndex, "splitgep"); | |||
6099 | NewGEPBases.insert(NewBaseGEP); | |||
6100 | } | |||
6101 | ||||
6102 | IRBuilder<> Builder(GEP); | |||
6103 | Value *NewGEP = NewBaseGEP; | |||
6104 | if (Offset == BaseOffset) { | |||
6105 | if (GEP->getType() != I8PtrTy) | |||
6106 | NewGEP = Builder.CreatePointerCast(NewGEP, GEP->getType()); | |||
6107 | } else { | |||
6108 | // Calculate the new offset for the new GEP. | |||
6109 | Value *Index = ConstantInt::get(IntPtrTy, Offset - BaseOffset); | |||
6110 | NewGEP = Builder.CreateGEP(I8Ty, NewBaseGEP, Index); | |||
6111 | ||||
6112 | if (GEP->getType() != I8PtrTy) | |||
6113 | NewGEP = Builder.CreatePointerCast(NewGEP, GEP->getType()); | |||
6114 | } | |||
6115 | replaceAllUsesWith(GEP, NewGEP, FreshBBs, IsHugeFunc); | |||
6116 | LargeOffsetGEPID.erase(GEP); | |||
6117 | LargeOffsetGEP = LargeOffsetGEPs.erase(LargeOffsetGEP); | |||
6118 | GEP->eraseFromParent(); | |||
6119 | Changed = true; | |||
6120 | } | |||
6121 | } | |||
6122 | return Changed; | |||
6123 | } | |||
6124 | ||||
6125 | bool CodeGenPrepare::optimizePhiType( | |||
6126 | PHINode *I, SmallPtrSetImpl<PHINode *> &Visited, | |||
6127 | SmallPtrSetImpl<Instruction *> &DeletedInstrs) { | |||
6128 | // We are looking for a collection on interconnected phi nodes that together | |||
6129 | // only use loads/bitcasts and are used by stores/bitcasts, and the bitcasts | |||
6130 | // are of the same type. Convert the whole set of nodes to the type of the | |||
6131 | // bitcast. | |||
6132 | Type *PhiTy = I->getType(); | |||
6133 | Type *ConvertTy = nullptr; | |||
6134 | if (Visited.count(I) || | |||
6135 | (!I->getType()->isIntegerTy() && !I->getType()->isFloatingPointTy())) | |||
6136 | return false; | |||
6137 | ||||
6138 | SmallVector<Instruction *, 4> Worklist; | |||
6139 | Worklist.push_back(cast<Instruction>(I)); | |||
6140 | SmallPtrSet<PHINode *, 4> PhiNodes; | |||
6141 | SmallPtrSet<ConstantData *, 4> Constants; | |||
6142 | PhiNodes.insert(I); | |||
6143 | Visited.insert(I); | |||
6144 | SmallPtrSet<Instruction *, 4> Defs; | |||
6145 | SmallPtrSet<Instruction *, 4> Uses; | |||
6146 | // This works by adding extra bitcasts between load/stores and removing | |||
6147 | // existing bicasts. If we have a phi(bitcast(load)) or a store(bitcast(phi)) | |||
6148 | // we can get in the situation where we remove a bitcast in one iteration | |||
6149 | // just to add it again in the next. We need to ensure that at least one | |||
6150 | // bitcast we remove are anchored to something that will not change back. | |||
6151 | bool AnyAnchored = false; | |||
6152 | ||||
6153 | while (!Worklist.empty()) { | |||
6154 | Instruction *II = Worklist.pop_back_val(); | |||
6155 | ||||
6156 | if (auto *Phi = dyn_cast<PHINode>(II)) { | |||
6157 | // Handle Defs, which might also be PHI's | |||
6158 | for (Value *V : Phi->incoming_values()) { | |||
6159 | if (auto *OpPhi = dyn_cast<PHINode>(V)) { | |||
6160 | if (!PhiNodes.count(OpPhi)) { | |||
6161 | if (!Visited.insert(OpPhi).second) | |||
6162 | return false; | |||
6163 | PhiNodes.insert(OpPhi); | |||
6164 | Worklist.push_back(OpPhi); | |||
6165 | } | |||
6166 | } else if (auto *OpLoad = dyn_cast<LoadInst>(V)) { | |||
6167 | if (!OpLoad->isSimple()) | |||
6168 | return false; | |||
6169 | if (Defs.insert(OpLoad).second) | |||
6170 | Worklist.push_back(OpLoad); | |||
6171 | } else if (auto *OpEx = dyn_cast<ExtractElementInst>(V)) { | |||
6172 | if (Defs.insert(OpEx).second) | |||
6173 | Worklist.push_back(OpEx); | |||
6174 | } else if (auto *OpBC = dyn_cast<BitCastInst>(V)) { | |||
6175 | if (!ConvertTy) | |||
6176 | ConvertTy = OpBC->getOperand(0)->getType(); | |||
6177 | if (OpBC->getOperand(0)->getType() != ConvertTy) | |||
6178 | return false; | |||
6179 | if (Defs.insert(OpBC).second) { | |||
6180 | Worklist.push_back(OpBC); | |||
6181 | AnyAnchored |= !isa<LoadInst>(OpBC->getOperand(0)) && | |||
6182 | !isa<ExtractElementInst>(OpBC->getOperand(0)); | |||
6183 | } | |||
6184 | } else if (auto *OpC = dyn_cast<ConstantData>(V)) | |||
6185 | Constants.insert(OpC); | |||
6186 | else | |||
6187 | return false; | |||
6188 | } | |||
6189 | } | |||
6190 | ||||
6191 | // Handle uses which might also be phi's | |||
6192 | for (User *V : II->users()) { | |||
6193 | if (auto *OpPhi = dyn_cast<PHINode>(V)) { | |||
6194 | if (!PhiNodes.count(OpPhi)) { | |||
6195 | if (Visited.count(OpPhi)) | |||
6196 | return false; | |||
6197 | PhiNodes.insert(OpPhi); | |||
6198 | Visited.insert(OpPhi); | |||
6199 | Worklist.push_back(OpPhi); | |||
6200 | } | |||
6201 | } else if (auto *OpStore = dyn_cast<StoreInst>(V)) { | |||
6202 | if (!OpStore->isSimple() || OpStore->getOperand(0) != II) | |||
6203 | return false; | |||
6204 | Uses.insert(OpStore); | |||
6205 | } else if (auto *OpBC = dyn_cast<BitCastInst>(V)) { | |||
6206 | if (!ConvertTy) | |||
6207 | ConvertTy = OpBC->getType(); | |||
6208 | if (OpBC->getType() != ConvertTy) | |||
6209 | return false; | |||
6210 | Uses.insert(OpBC); | |||
6211 | AnyAnchored |= | |||
6212 | any_of(OpBC->users(), [](User *U) { return !isa<StoreInst>(U); }); | |||
6213 | } else { | |||
6214 | return false; | |||
6215 | } | |||
6216 | } | |||
6217 | } | |||
6218 | ||||
6219 | if (!ConvertTy || !AnyAnchored || | |||
6220 | !TLI->shouldConvertPhiType(PhiTy, ConvertTy)) | |||
6221 | return false; | |||
6222 | ||||
6223 | LLVM_DEBUG(dbgs() << "Converting " << *I << "\n and connected nodes to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Converting " << * I << "\n and connected nodes to " << *ConvertTy << "\n"; } } while (false) | |||
6224 | << *ConvertTy << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Converting " << * I << "\n and connected nodes to " << *ConvertTy << "\n"; } } while (false); | |||
6225 | ||||
6226 | // Create all the new phi nodes of the new type, and bitcast any loads to the | |||
6227 | // correct type. | |||
6228 | ValueToValueMap ValMap; | |||
6229 | for (ConstantData *C : Constants) | |||
6230 | ValMap[C] = ConstantExpr::getCast(Instruction::BitCast, C, ConvertTy); | |||
6231 | for (Instruction *D : Defs) { | |||
6232 | if (isa<BitCastInst>(D)) { | |||
6233 | ValMap[D] = D->getOperand(0); | |||
6234 | DeletedInstrs.insert(D); | |||
6235 | } else { | |||
6236 | ValMap[D] = | |||
6237 | new BitCastInst(D, ConvertTy, D->getName() + ".bc", D->getNextNode()); | |||
6238 | } | |||
6239 | } | |||
6240 | for (PHINode *Phi : PhiNodes) | |||
6241 | ValMap[Phi] = PHINode::Create(ConvertTy, Phi->getNumIncomingValues(), | |||
6242 | Phi->getName() + ".tc", Phi); | |||
6243 | // Pipe together all the PhiNodes. | |||
6244 | for (PHINode *Phi : PhiNodes) { | |||
6245 | PHINode *NewPhi = cast<PHINode>(ValMap[Phi]); | |||
6246 | for (int i = 0, e = Phi->getNumIncomingValues(); i < e; i++) | |||
6247 | NewPhi->addIncoming(ValMap[Phi->getIncomingValue(i)], | |||
6248 | Phi->getIncomingBlock(i)); | |||
6249 | Visited.insert(NewPhi); | |||
6250 | } | |||
6251 | // And finally pipe up the stores and bitcasts | |||
6252 | for (Instruction *U : Uses) { | |||
6253 | if (isa<BitCastInst>(U)) { | |||
6254 | DeletedInstrs.insert(U); | |||
6255 | replaceAllUsesWith(U, ValMap[U->getOperand(0)], FreshBBs, IsHugeFunc); | |||
6256 | } else { | |||
6257 | U->setOperand(0, | |||
6258 | new BitCastInst(ValMap[U->getOperand(0)], PhiTy, "bc", U)); | |||
6259 | } | |||
6260 | } | |||
6261 | ||||
6262 | // Save the removed phis to be deleted later. | |||
6263 | for (PHINode *Phi : PhiNodes) | |||
6264 | DeletedInstrs.insert(Phi); | |||
6265 | return true; | |||
6266 | } | |||
6267 | ||||
6268 | bool CodeGenPrepare::optimizePhiTypes(Function &F) { | |||
6269 | if (!OptimizePhiTypes) | |||
6270 | return false; | |||
6271 | ||||
6272 | bool Changed = false; | |||
6273 | SmallPtrSet<PHINode *, 4> Visited; | |||
6274 | SmallPtrSet<Instruction *, 4> DeletedInstrs; | |||
6275 | ||||
6276 | // Attempt to optimize all the phis in the functions to the correct type. | |||
6277 | for (auto &BB : F) | |||
6278 | for (auto &Phi : BB.phis()) | |||
6279 | Changed |= optimizePhiType(&Phi, Visited, DeletedInstrs); | |||
6280 | ||||
6281 | // Remove any old phi's that have been converted. | |||
6282 | for (auto *I : DeletedInstrs) { | |||
6283 | replaceAllUsesWith(I, PoisonValue::get(I->getType()), FreshBBs, IsHugeFunc); | |||
6284 | I->eraseFromParent(); | |||
6285 | } | |||
6286 | ||||
6287 | return Changed; | |||
6288 | } | |||
6289 | ||||
6290 | /// Return true, if an ext(load) can be formed from an extension in | |||
6291 | /// \p MovedExts. | |||
6292 | bool CodeGenPrepare::canFormExtLd( | |||
6293 | const SmallVectorImpl<Instruction *> &MovedExts, LoadInst *&LI, | |||
6294 | Instruction *&Inst, bool HasPromoted) { | |||
6295 | for (auto *MovedExtInst : MovedExts) { | |||
6296 | if (isa<LoadInst>(MovedExtInst->getOperand(0))) { | |||
6297 | LI = cast<LoadInst>(MovedExtInst->getOperand(0)); | |||
6298 | Inst = MovedExtInst; | |||
6299 | break; | |||
6300 | } | |||
6301 | } | |||
6302 | if (!LI) | |||
6303 | return false; | |||
6304 | ||||
6305 | // If they're already in the same block, there's nothing to do. | |||
6306 | // Make the cheap checks first if we did not promote. | |||
6307 | // If we promoted, we need to check if it is indeed profitable. | |||
6308 | if (!HasPromoted && LI->getParent() == Inst->getParent()) | |||
6309 | return false; | |||
6310 | ||||
6311 | return TLI->isExtLoad(LI, Inst, *DL); | |||
6312 | } | |||
6313 | ||||
6314 | /// Move a zext or sext fed by a load into the same basic block as the load, | |||
6315 | /// unless conditions are unfavorable. This allows SelectionDAG to fold the | |||
6316 | /// extend into the load. | |||
6317 | /// | |||
6318 | /// E.g., | |||
6319 | /// \code | |||
6320 | /// %ld = load i32* %addr | |||
6321 | /// %add = add nuw i32 %ld, 4 | |||
6322 | /// %zext = zext i32 %add to i64 | |||
6323 | // \endcode | |||
6324 | /// => | |||
6325 | /// \code | |||
6326 | /// %ld = load i32* %addr | |||
6327 | /// %zext = zext i32 %ld to i64 | |||
6328 | /// %add = add nuw i64 %zext, 4 | |||
6329 | /// \encode | |||
6330 | /// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which | |||
6331 | /// allow us to match zext(load i32*) to i64. | |||
6332 | /// | |||
6333 | /// Also, try to promote the computations used to obtain a sign extended | |||
6334 | /// value used into memory accesses. | |||
6335 | /// E.g., | |||
6336 | /// \code | |||
6337 | /// a = add nsw i32 b, 3 | |||
6338 | /// d = sext i32 a to i64 | |||
6339 | /// e = getelementptr ..., i64 d | |||
6340 | /// \endcode | |||
6341 | /// => | |||
6342 | /// \code | |||
6343 | /// f = sext i32 b to i64 | |||
6344 | /// a = add nsw i64 f, 3 | |||
6345 | /// e = getelementptr ..., i64 a | |||
6346 | /// \endcode | |||
6347 | /// | |||
6348 | /// \p Inst[in/out] the extension may be modified during the process if some | |||
6349 | /// promotions apply. | |||
6350 | bool CodeGenPrepare::optimizeExt(Instruction *&Inst) { | |||
6351 | bool AllowPromotionWithoutCommonHeader = false; | |||
6352 | /// See if it is an interesting sext operations for the address type | |||
6353 | /// promotion before trying to promote it, e.g., the ones with the right | |||
6354 | /// type and used in memory accesses. | |||
6355 | bool ATPConsiderable = TTI->shouldConsiderAddressTypePromotion( | |||
6356 | *Inst, AllowPromotionWithoutCommonHeader); | |||
6357 | TypePromotionTransaction TPT(RemovedInsts); | |||
6358 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
6359 | TPT.getRestorationPoint(); | |||
6360 | SmallVector<Instruction *, 1> Exts; | |||
6361 | SmallVector<Instruction *, 2> SpeculativelyMovedExts; | |||
6362 | Exts.push_back(Inst); | |||
6363 | ||||
6364 | bool HasPromoted = tryToPromoteExts(TPT, Exts, SpeculativelyMovedExts); | |||
6365 | ||||
6366 | // Look for a load being extended. | |||
6367 | LoadInst *LI = nullptr; | |||
6368 | Instruction *ExtFedByLoad; | |||
6369 | ||||
6370 | // Try to promote a chain of computation if it allows to form an extended | |||
6371 | // load. | |||
6372 | if (canFormExtLd(SpeculativelyMovedExts, LI, ExtFedByLoad, HasPromoted)) { | |||
6373 | assert(LI && ExtFedByLoad && "Expect a valid load and extension")(static_cast <bool> (LI && ExtFedByLoad && "Expect a valid load and extension") ? void (0) : __assert_fail ("LI && ExtFedByLoad && \"Expect a valid load and extension\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 6373, __extension__ __PRETTY_FUNCTION__ )); | |||
6374 | TPT.commit(); | |||
6375 | // Move the extend into the same block as the load. | |||
6376 | ExtFedByLoad->moveAfter(LI); | |||
6377 | ++NumExtsMoved; | |||
6378 | Inst = ExtFedByLoad; | |||
6379 | return true; | |||
6380 | } | |||
6381 | ||||
6382 | // Continue promoting SExts if known as considerable depending on targets. | |||
6383 | if (ATPConsiderable && | |||
6384 | performAddressTypePromotion(Inst, AllowPromotionWithoutCommonHeader, | |||
6385 | HasPromoted, TPT, SpeculativelyMovedExts)) | |||
6386 | return true; | |||
6387 | ||||
6388 | TPT.rollback(LastKnownGood); | |||
6389 | return false; | |||
6390 | } | |||
6391 | ||||
6392 | // Perform address type promotion if doing so is profitable. | |||
6393 | // If AllowPromotionWithoutCommonHeader == false, we should find other sext | |||
6394 | // instructions that sign extended the same initial value. However, if | |||
6395 | // AllowPromotionWithoutCommonHeader == true, we expect promoting the | |||
6396 | // extension is just profitable. | |||
6397 | bool CodeGenPrepare::performAddressTypePromotion( | |||
6398 | Instruction *&Inst, bool AllowPromotionWithoutCommonHeader, | |||
6399 | bool HasPromoted, TypePromotionTransaction &TPT, | |||
6400 | SmallVectorImpl<Instruction *> &SpeculativelyMovedExts) { | |||
6401 | bool Promoted = false; | |||
6402 | SmallPtrSet<Instruction *, 1> UnhandledExts; | |||
6403 | bool AllSeenFirst = true; | |||
6404 | for (auto *I : SpeculativelyMovedExts) { | |||
6405 | Value *HeadOfChain = I->getOperand(0); | |||
6406 | DenseMap<Value *, Instruction *>::iterator AlreadySeen = | |||
6407 | SeenChainsForSExt.find(HeadOfChain); | |||
6408 | // If there is an unhandled SExt which has the same header, try to promote | |||
6409 | // it as well. | |||
6410 | if (AlreadySeen != SeenChainsForSExt.end()) { | |||
6411 | if (AlreadySeen->second != nullptr) | |||
6412 | UnhandledExts.insert(AlreadySeen->second); | |||
6413 | AllSeenFirst = false; | |||
6414 | } | |||
6415 | } | |||
6416 | ||||
6417 | if (!AllSeenFirst || (AllowPromotionWithoutCommonHeader && | |||
6418 | SpeculativelyMovedExts.size() == 1)) { | |||
6419 | TPT.commit(); | |||
6420 | if (HasPromoted) | |||
6421 | Promoted = true; | |||
6422 | for (auto *I : SpeculativelyMovedExts) { | |||
6423 | Value *HeadOfChain = I->getOperand(0); | |||
6424 | SeenChainsForSExt[HeadOfChain] = nullptr; | |||
6425 | ValToSExtendedUses[HeadOfChain].push_back(I); | |||
6426 | } | |||
6427 | // Update Inst as promotion happen. | |||
6428 | Inst = SpeculativelyMovedExts.pop_back_val(); | |||
6429 | } else { | |||
6430 | // This is the first chain visited from the header, keep the current chain | |||
6431 | // as unhandled. Defer to promote this until we encounter another SExt | |||
6432 | // chain derived from the same header. | |||
6433 | for (auto *I : SpeculativelyMovedExts) { | |||
6434 | Value *HeadOfChain = I->getOperand(0); | |||
6435 | SeenChainsForSExt[HeadOfChain] = Inst; | |||
6436 | } | |||
6437 | return false; | |||
6438 | } | |||
6439 | ||||
6440 | if (!AllSeenFirst && !UnhandledExts.empty()) | |||
6441 | for (auto *VisitedSExt : UnhandledExts) { | |||
6442 | if (RemovedInsts.count(VisitedSExt)) | |||
6443 | continue; | |||
6444 | TypePromotionTransaction TPT(RemovedInsts); | |||
6445 | SmallVector<Instruction *, 1> Exts; | |||
6446 | SmallVector<Instruction *, 2> Chains; | |||
6447 | Exts.push_back(VisitedSExt); | |||
6448 | bool HasPromoted = tryToPromoteExts(TPT, Exts, Chains); | |||
6449 | TPT.commit(); | |||
6450 | if (HasPromoted) | |||
6451 | Promoted = true; | |||
6452 | for (auto *I : Chains) { | |||
6453 | Value *HeadOfChain = I->getOperand(0); | |||
6454 | // Mark this as handled. | |||
6455 | SeenChainsForSExt[HeadOfChain] = nullptr; | |||
6456 | ValToSExtendedUses[HeadOfChain].push_back(I); | |||
6457 | } | |||
6458 | } | |||
6459 | return Promoted; | |||
6460 | } | |||
6461 | ||||
6462 | bool CodeGenPrepare::optimizeExtUses(Instruction *I) { | |||
6463 | BasicBlock *DefBB = I->getParent(); | |||
6464 | ||||
6465 | // If the result of a {s|z}ext and its source are both live out, rewrite all | |||
6466 | // other uses of the source with result of extension. | |||
6467 | Value *Src = I->getOperand(0); | |||
6468 | if (Src->hasOneUse()) | |||
6469 | return false; | |||
6470 | ||||
6471 | // Only do this xform if truncating is free. | |||
6472 | if (!TLI->isTruncateFree(I->getType(), Src->getType())) | |||
6473 | return false; | |||
6474 | ||||
6475 | // Only safe to perform the optimization if the source is also defined in | |||
6476 | // this block. | |||
6477 | if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) | |||
6478 | return false; | |||
6479 | ||||
6480 | bool DefIsLiveOut = false; | |||
6481 | for (User *U : I->users()) { | |||
6482 | Instruction *UI = cast<Instruction>(U); | |||
6483 | ||||
6484 | // Figure out which BB this ext is used in. | |||
6485 | BasicBlock *UserBB = UI->getParent(); | |||
6486 | if (UserBB == DefBB) | |||
6487 | continue; | |||
6488 | DefIsLiveOut = true; | |||
6489 | break; | |||
6490 | } | |||
6491 | if (!DefIsLiveOut) | |||
6492 | return false; | |||
6493 | ||||
6494 | // Make sure none of the uses are PHI nodes. | |||
6495 | for (User *U : Src->users()) { | |||
6496 | Instruction *UI = cast<Instruction>(U); | |||
6497 | BasicBlock *UserBB = UI->getParent(); | |||
6498 | if (UserBB == DefBB) | |||
6499 | continue; | |||
6500 | // Be conservative. We don't want this xform to end up introducing | |||
6501 | // reloads just before load / store instructions. | |||
6502 | if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI)) | |||
6503 | return false; | |||
6504 | } | |||
6505 | ||||
6506 | // InsertedTruncs - Only insert one trunc in each block once. | |||
6507 | DenseMap<BasicBlock *, Instruction *> InsertedTruncs; | |||
6508 | ||||
6509 | bool MadeChange = false; | |||
6510 | for (Use &U : Src->uses()) { | |||
6511 | Instruction *User = cast<Instruction>(U.getUser()); | |||
6512 | ||||
6513 | // Figure out which BB this ext is used in. | |||
6514 | BasicBlock *UserBB = User->getParent(); | |||
6515 | if (UserBB == DefBB) | |||
6516 | continue; | |||
6517 | ||||
6518 | // Both src and def are live in this block. Rewrite the use. | |||
6519 | Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; | |||
6520 | ||||
6521 | if (!InsertedTrunc) { | |||
6522 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); | |||
6523 | assert(InsertPt != UserBB->end())(static_cast <bool> (InsertPt != UserBB->end()) ? void (0) : __assert_fail ("InsertPt != UserBB->end()", "llvm/lib/CodeGen/CodeGenPrepare.cpp" , 6523, __extension__ __PRETTY_FUNCTION__)); | |||
6524 | InsertedTrunc = new TruncInst(I, Src->getType(), "", &*InsertPt); | |||
6525 | InsertedInsts.insert(InsertedTrunc); | |||
6526 | } | |||
6527 | ||||
6528 | // Replace a use of the {s|z}ext source with a use of the result. | |||
6529 | U = InsertedTrunc; | |||
6530 | ++NumExtUses; | |||
6531 | MadeChange = true; | |||
6532 | } | |||
6533 | ||||
6534 | return MadeChange; | |||
6535 | } | |||
6536 | ||||
6537 | // Find loads whose uses only use some of the loaded value's bits. Add an "and" | |||
6538 | // just after the load if the target can fold this into one extload instruction, | |||
6539 | // with the hope of eliminating some of the other later "and" instructions using | |||
6540 | // the loaded value. "and"s that are made trivially redundant by the insertion | |||
6541 | // of the new "and" are removed by this function, while others (e.g. those whose | |||
6542 | // path from the load goes through a phi) are left for isel to potentially | |||
6543 | // remove. | |||
6544 | // | |||
6545 | // For example: | |||
6546 | // | |||
6547 | // b0: | |||
6548 | // x = load i32 | |||
6549 | // ... | |||
6550 | // b1: | |||
6551 | // y = and x, 0xff | |||
6552 | // z = use y | |||
6553 | // | |||
6554 | // becomes: | |||
6555 | // | |||
6556 | // b0: | |||
6557 | // x = load i32 | |||
6558 | // x' = and x, 0xff | |||
6559 | // ... | |||
6560 | // b1: | |||
6561 | // z = use x' | |||
6562 | // | |||
6563 | // whereas: | |||
6564 | // | |||
6565 | // b0: | |||
6566 | // x1 = load i32 | |||
6567 | // ... | |||
6568 | // b1: | |||
6569 | // x2 = load i32 | |||
6570 | // ... | |||
6571 | // b2: | |||
6572 | // x = phi x1, x2 | |||
6573 | // y = and x, 0xff | |||
6574 | // | |||
6575 | // becomes (after a call to optimizeLoadExt for each load): | |||
6576 | // | |||
6577 | // b0: | |||
6578 | // x1 = load i32 | |||
6579 | // x1' = and x1, 0xff | |||
6580 | // ... | |||
6581 | // b1: | |||
6582 | // x2 = load i32 | |||
6583 | // x2' = and x2, 0xff | |||
6584 | // ... | |||
6585 | // b2: | |||
6586 | // x = phi x1', x2' | |||
6587 | // y = and x, 0xff | |||
6588 | bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) { | |||
6589 | if (!Load->isSimple() || !Load->getType()->isIntOrPtrTy()) | |||
6590 | return false; | |||
6591 | ||||
6592 | // Skip loads we've already transformed. | |||
6593 | if (Load->hasOneUse() && | |||
6594 | InsertedInsts.count(cast<Instruction>(*Load->user_begin()))) | |||
6595 | return false; | |||
6596 | ||||
6597 | // Look at all uses of Load, looking through phis, to determine how many bits | |||
6598 | // of the loaded value are needed. | |||
6599 | SmallVector<Instruction *, 8> WorkList; | |||
6600 | SmallPtrSet<Instruction *, 16> Visited; | |||
6601 | SmallVector<Instruction *, 8> AndsToMaybeRemove; | |||
6602 | for (auto *U : Load->users()) | |||
6603 | WorkList.push_back(cast<Instruction>(U)); | |||
6604 | ||||
6605 | EVT LoadResultVT = TLI->getValueType(*DL, Load->getType()); | |||
6606 | unsigned BitWidth = LoadResultVT.getSizeInBits(); | |||
6607 | // If the BitWidth is 0, do not try to optimize the type | |||
6608 | if (BitWidth == 0) | |||
6609 | return false; | |||
6610 | ||||
6611 | APInt DemandBits(BitWidth, 0); | |||
6612 | APInt WidestAndBits(BitWidth, 0); | |||
6613 | ||||
6614 | while (!WorkList.empty()) { | |||
6615 | Instruction *I = WorkList.pop_back_val(); | |||
6616 | ||||
6617 | // Break use-def graph loops. | |||
6618 | if (!Visited.insert(I).second) | |||
6619 | continue; | |||
6620 | ||||
6621 | // For a PHI node, push all of its users. | |||
6622 | if (auto *Phi = dyn_cast<PHINode>(I)) { | |||
6623 | for (auto *U : Phi->users()) | |||
6624 | WorkList.push_back(cast<Instruction>(U)); | |||
6625 | continue; | |||
6626 | } | |||
6627 | ||||
6628 | switch (I->getOpcode()) { | |||
6629 | case Instruction::And: { | |||
6630 | auto *AndC = dyn_cast<ConstantInt>(I->getOperand(1)); | |||
6631 | if (!AndC) | |||
6632 | return false; | |||
6633 | APInt AndBits = AndC->getValue(); | |||
6634 | DemandBits |= AndBits; | |||
6635 | // Keep track of the widest and mask we see. | |||
6636 | if (AndBits.ugt(WidestAndBits)) | |||
6637 | WidestAndBits = AndBits; | |||
6638 | if (AndBits == WidestAndBits && I->getOperand(0) == Load) | |||
6639 | AndsToMaybeRemove.push_back(I); | |||
6640 | break; | |||
6641 | } | |||
6642 | ||||
6643 | case Instruction::Shl: { | |||
6644 | auto *ShlC = dyn_cast<ConstantInt>(I->getOperand(1)); | |||
6645 | if (!ShlC) | |||
6646 | return false; | |||
6647 | uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1); | |||
6648 | DemandBits.setLowBits(BitWidth - ShiftAmt); | |||
6649 | break; | |||
6650 | } | |||
6651 | ||||
6652 | case Instruction::Trunc: { | |||
6653 | EVT TruncVT = TLI->getValueType(*DL, I->getType()); | |||
6654 | unsigned TruncBitWidth = TruncVT.getSizeInBits(); | |||
6655 | DemandBits.setLowBits(TruncBitWidth); | |||
6656 | break; | |||
6657 | } | |||
6658 | ||||
6659 | default: | |||
6660 | return false; | |||
6661 | } | |||
6662 | } | |||
6663 | ||||
6664 | uint32_t ActiveBits = DemandBits.getActiveBits(); | |||
6665 | // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the | |||
6666 | // target even if isLoadExtLegal says an i1 EXTLOAD is valid. For example, | |||
6667 | // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but | |||
6668 | // (and (load x) 1) is not matched as a single instruction, rather as a LDR | |||
6669 | // followed by an AND. | |||
6670 | // TODO: Look into removing this restriction by fixing backends to either | |||
6671 | // return false for isLoadExtLegal for i1 or have them select this pattern to | |||
6672 | // a single instruction. | |||
6673 | // | |||
6674 | // Also avoid hoisting if we didn't see any ands with the exact DemandBits | |||
6675 | // mask, since these are the only ands that will be removed by isel. | |||
6676 | if (ActiveBits <= 1 || !DemandBits.isMask(ActiveBits) || | |||
6677 | WidestAndBits != DemandBits) | |||
6678 | return false; | |||
6679 | ||||
6680 | LLVMContext &Ctx = Load->getType()->getContext(); | |||
6681 | Type *TruncTy = Type::getIntNTy(Ctx, ActiveBits); | |||
6682 | EVT TruncVT = TLI->getValueType(*DL, TruncTy); | |||
6683 | ||||
6684 | // Reject cases that won't be matched as extloads. | |||
6685 | if (!LoadResultVT.bitsGT(TruncVT) || !TruncVT.isRound() || | |||
6686 | !TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT)) | |||
6687 | return false; | |||
6688 | ||||
6689 | IRBuilder<> Builder(Load->getNextNode()); | |||
6690 | auto *NewAnd = cast<Instruction>( | |||
6691 | Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits))); | |||
6692 | // Mark this instruction as "inserted by CGP", so that other | |||
6693 | // optimizations don't touch it. | |||
6694 | InsertedInsts.insert(NewAnd); | |||
6695 | ||||
6696 | // Replace all uses of load with new and (except for the use of load in the | |||
6697 | // new and itself). | |||
6698 | replaceAllUsesWith(Load, NewAnd, FreshBBs, IsHugeFunc); | |||
6699 | NewAnd->setOperand(0, Load); | |||
6700 | ||||
6701 | // Remove any and instructions that are now redundant. | |||
6702 | for (auto *And : AndsToMaybeRemove) | |||
6703 | // Check that the and mask is the same as the one we decided to put on the | |||
6704 | // new and. | |||
6705 | if (cast<ConstantInt>(And->getOperand(1))->getValue() == DemandBits) { | |||
6706 | replaceAllUsesWith(And, NewAnd, FreshBBs, IsHugeFunc); | |||
6707 | if (&*CurInstIterator == And) | |||
6708 | CurInstIterator = std::next(And->getIterator()); | |||
6709 | And->eraseFromParent(); | |||
6710 | ++NumAndUses; | |||
6711 | } | |||
6712 | ||||
6713 | ++NumAndsAdded; | |||
6714 | return true; | |||
6715 | } | |||
6716 | ||||
6717 | /// Check if V (an operand of a select instruction) is an expensive instruction | |||
6718 | /// that is only used once. | |||
6719 | static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V) { | |||
6720 | auto *I = dyn_cast<Instruction>(V); | |||
6721 | // If it's safe to speculatively execute, then it should not have side | |||
6722 | // effects; therefore, it's safe to sink and possibly *not* execute. | |||
6723 | return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) && | |||
6724 | TTI->isExpensiveToSpeculativelyExecute(I); | |||
6725 | } | |||
6726 | ||||
6727 | /// Returns true if a SelectInst should be turned into an explicit branch. | |||
6728 | static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI, | |||
6729 | const TargetLowering *TLI, | |||
6730 | SelectInst *SI) { | |||
6731 | // If even a predictable select is cheap, then a branch can't be cheaper. | |||
6732 | if (!TLI->isPredictableSelectExpensive()) | |||
6733 | return false; | |||
6734 | ||||
6735 | // FIXME: This should use the same heuristics as IfConversion to determine | |||
6736 | // whether a select is better represented as a branch. | |||
6737 | ||||
6738 | // If metadata tells us that the select condition is obviously predictable, | |||
6739 | // then we want to replace the select with a branch. | |||
6740 | uint64_t TrueWeight, FalseWeight; | |||
6741 | if (extractBranchWeights(*SI, TrueWeight, FalseWeight)) { | |||
6742 | uint64_t Max = std::max(TrueWeight, FalseWeight); | |||
6743 | uint64_t Sum = TrueWeight + FalseWeight; | |||
6744 | if (Sum != 0) { | |||
6745 | auto Probability = BranchProbability::getBranchProbability(Max, Sum); | |||
6746 | if (Probability > TTI->getPredictableBranchThreshold()) | |||
6747 | return true; | |||
6748 | } | |||
6749 | } | |||
6750 | ||||
6751 | CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); | |||
6752 | ||||
6753 | // If a branch is predictable, an out-of-order CPU can avoid blocking on its | |||
6754 | // comparison condition. If the compare has more than one use, there's | |||
6755 | // probably another cmov or setcc around, so it's not worth emitting a branch. | |||
6756 | if (!Cmp || !Cmp->hasOneUse()) | |||
6757 | return false; | |||
6758 | ||||
6759 | // If either operand of the select is expensive and only needed on one side | |||
6760 | // of the select, we should form a branch. | |||
6761 | if (sinkSelectOperand(TTI, SI->getTrueValue()) || | |||
6762 | sinkSelectOperand(TTI, SI->getFalseValue())) | |||
6763 | return true; | |||
6764 | ||||
6765 | return false; | |||
6766 | } | |||
6767 | ||||
6768 | /// If \p isTrue is true, return the true value of \p SI, otherwise return | |||
6769 | /// false value of \p SI. If the true/false value of \p SI is defined by any | |||
6770 | /// select instructions in \p Selects, look through the defining select | |||
6771 | /// instruction until the true/false value is not defined in \p Selects. | |||
6772 | static Value * | |||
6773 | getTrueOrFalseValue(SelectInst *SI, bool isTrue, | |||
6774 | const SmallPtrSet<const Instruction *, 2> &Selects) { | |||
6775 | Value *V = nullptr; | |||
6776 | ||||
6777 | for (SelectInst *DefSI = SI; DefSI != nullptr && Selects.count(DefSI); | |||
6778 | DefSI = dyn_cast<SelectInst>(V)) { | |||
6779 | assert(DefSI->getCondition() == SI->getCondition() &&(static_cast <bool> (DefSI->getCondition() == SI-> getCondition() && "The condition of DefSI does not match with SI" ) ? void (0) : __assert_fail ("DefSI->getCondition() == SI->getCondition() && \"The condition of DefSI does not match with SI\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 6780, __extension__ __PRETTY_FUNCTION__ )) | |||
6780 | "The condition of DefSI does not match with SI")(static_cast <bool> (DefSI->getCondition() == SI-> getCondition() && "The condition of DefSI does not match with SI" ) ? void (0) : __assert_fail ("DefSI->getCondition() == SI->getCondition() && \"The condition of DefSI does not match with SI\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 6780, __extension__ __PRETTY_FUNCTION__ )); | |||
6781 | V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue()); | |||
6782 | } | |||
6783 | ||||
6784 | assert(V && "Failed to get select true/false value")(static_cast <bool> (V && "Failed to get select true/false value" ) ? void (0) : __assert_fail ("V && \"Failed to get select true/false value\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 6784, __extension__ __PRETTY_FUNCTION__ )); | |||
6785 | return V; | |||
6786 | } | |||
6787 | ||||
6788 | bool CodeGenPrepare::optimizeShiftInst(BinaryOperator *Shift) { | |||
6789 | assert(Shift->isShift() && "Expected a shift")(static_cast <bool> (Shift->isShift() && "Expected a shift" ) ? void (0) : __assert_fail ("Shift->isShift() && \"Expected a shift\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 6789, __extension__ __PRETTY_FUNCTION__ )); | |||
6790 | ||||
6791 | // If this is (1) a vector shift, (2) shifts by scalars are cheaper than | |||
6792 | // general vector shifts, and (3) the shift amount is a select-of-splatted | |||
6793 | // values, hoist the shifts before the select: | |||
6794 | // shift Op0, (select Cond, TVal, FVal) --> | |||
6795 | // select Cond, (shift Op0, TVal), (shift Op0, FVal) | |||
6796 | // | |||
6797 | // This is inverting a generic IR transform when we know that the cost of a | |||
6798 | // general vector shift is more than the cost of 2 shift-by-scalars. | |||
6799 | // We can't do this effectively in SDAG because we may not be able to | |||
6800 | // determine if the select operands are splats from within a basic block. | |||
6801 | Type *Ty = Shift->getType(); | |||
6802 | if (!Ty->isVectorTy() || !TLI->isVectorShiftByScalarCheap(Ty)) | |||
6803 | return false; | |||
6804 | Value *Cond, *TVal, *FVal; | |||
6805 | if (!match(Shift->getOperand(1), | |||
6806 | m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal))))) | |||
6807 | return false; | |||
6808 | if (!isSplatValue(TVal) || !isSplatValue(FVal)) | |||
6809 | return false; | |||
6810 | ||||
6811 | IRBuilder<> Builder(Shift); | |||
6812 | BinaryOperator::BinaryOps Opcode = Shift->getOpcode(); | |||
6813 | Value *NewTVal = Builder.CreateBinOp(Opcode, Shift->getOperand(0), TVal); | |||
6814 | Value *NewFVal = Builder.CreateBinOp(Opcode, Shift->getOperand(0), FVal); | |||
6815 | Value *NewSel = Builder.CreateSelect(Cond, NewTVal, NewFVal); | |||
6816 | replaceAllUsesWith(Shift, NewSel, FreshBBs, IsHugeFunc); | |||
6817 | Shift->eraseFromParent(); | |||
6818 | return true; | |||
6819 | } | |||
6820 | ||||
6821 | bool CodeGenPrepare::optimizeFunnelShift(IntrinsicInst *Fsh) { | |||
6822 | Intrinsic::ID Opcode = Fsh->getIntrinsicID(); | |||
6823 | assert((Opcode == Intrinsic::fshl || Opcode == Intrinsic::fshr) &&(static_cast <bool> ((Opcode == Intrinsic::fshl || Opcode == Intrinsic::fshr) && "Expected a funnel shift") ? void (0) : __assert_fail ("(Opcode == Intrinsic::fshl || Opcode == Intrinsic::fshr) && \"Expected a funnel shift\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 6824, __extension__ __PRETTY_FUNCTION__ )) | |||
6824 | "Expected a funnel shift")(static_cast <bool> ((Opcode == Intrinsic::fshl || Opcode == Intrinsic::fshr) && "Expected a funnel shift") ? void (0) : __assert_fail ("(Opcode == Intrinsic::fshl || Opcode == Intrinsic::fshr) && \"Expected a funnel shift\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 6824, __extension__ __PRETTY_FUNCTION__ )); | |||
6825 | ||||
6826 | // If this is (1) a vector funnel shift, (2) shifts by scalars are cheaper | |||
6827 | // than general vector shifts, and (3) the shift amount is select-of-splatted | |||
6828 | // values, hoist the funnel shifts before the select: | |||
6829 | // fsh Op0, Op1, (select Cond, TVal, FVal) --> | |||
6830 | // select Cond, (fsh Op0, Op1, TVal), (fsh Op0, Op1, FVal) | |||
6831 | // | |||
6832 | // This is inverting a generic IR transform when we know that the cost of a | |||
6833 | // general vector shift is more than the cost of 2 shift-by-scalars. | |||
6834 | // We can't do this effectively in SDAG because we may not be able to | |||
6835 | // determine if the select operands are splats from within a basic block. | |||
6836 | Type *Ty = Fsh->getType(); | |||
6837 | if (!Ty->isVectorTy() || !TLI->isVectorShiftByScalarCheap(Ty)) | |||
6838 | return false; | |||
6839 | Value *Cond, *TVal, *FVal; | |||
6840 | if (!match(Fsh->getOperand(2), | |||
6841 | m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal))))) | |||
6842 | return false; | |||
6843 | if (!isSplatValue(TVal) || !isSplatValue(FVal)) | |||
6844 | return false; | |||
6845 | ||||
6846 | IRBuilder<> Builder(Fsh); | |||
6847 | Value *X = Fsh->getOperand(0), *Y = Fsh->getOperand(1); | |||
6848 | Value *NewTVal = Builder.CreateIntrinsic(Opcode, Ty, {X, Y, TVal}); | |||
6849 | Value *NewFVal = Builder.CreateIntrinsic(Opcode, Ty, {X, Y, FVal}); | |||
6850 | Value *NewSel = Builder.CreateSelect(Cond, NewTVal, NewFVal); | |||
6851 | replaceAllUsesWith(Fsh, NewSel, FreshBBs, IsHugeFunc); | |||
6852 | Fsh->eraseFromParent(); | |||
6853 | return true; | |||
6854 | } | |||
6855 | ||||
6856 | /// If we have a SelectInst that will likely profit from branch prediction, | |||
6857 | /// turn it into a branch. | |||
6858 | bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) { | |||
6859 | if (DisableSelectToBranch) | |||
6860 | return false; | |||
6861 | ||||
6862 | // If the SelectOptimize pass is enabled, selects have already been optimized. | |||
6863 | if (!getCGPassBuilderOption().DisableSelectOptimize) | |||
6864 | return false; | |||
6865 | ||||
6866 | // Find all consecutive select instructions that share the same condition. | |||
6867 | SmallVector<SelectInst *, 2> ASI; | |||
6868 | ASI.push_back(SI); | |||
6869 | for (BasicBlock::iterator It = ++BasicBlock::iterator(SI); | |||
6870 | It != SI->getParent()->end(); ++It) { | |||
6871 | SelectInst *I = dyn_cast<SelectInst>(&*It); | |||
6872 | if (I && SI->getCondition() == I->getCondition()) { | |||
6873 | ASI.push_back(I); | |||
6874 | } else { | |||
6875 | break; | |||
6876 | } | |||
6877 | } | |||
6878 | ||||
6879 | SelectInst *LastSI = ASI.back(); | |||
6880 | // Increment the current iterator to skip all the rest of select instructions | |||
6881 | // because they will be either "not lowered" or "all lowered" to branch. | |||
6882 | CurInstIterator = std::next(LastSI->getIterator()); | |||
6883 | ||||
6884 | bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1); | |||
6885 | ||||
6886 | // Can we convert the 'select' to CF ? | |||
6887 | if (VectorCond || SI->getMetadata(LLVMContext::MD_unpredictable)) | |||
6888 | return false; | |||
6889 | ||||
6890 | TargetLowering::SelectSupportKind SelectKind; | |||
6891 | if (SI->getType()->isVectorTy()) | |||
6892 | SelectKind = TargetLowering::ScalarCondVectorVal; | |||
6893 | else | |||
6894 | SelectKind = TargetLowering::ScalarValSelect; | |||
6895 | ||||
6896 | if (TLI->isSelectSupported(SelectKind) && | |||
6897 | (!isFormingBranchFromSelectProfitable(TTI, TLI, SI) || OptSize || | |||
6898 | llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI.get()))) | |||
6899 | return false; | |||
6900 | ||||
6901 | // The DominatorTree needs to be rebuilt by any consumers after this | |||
6902 | // transformation. We simply reset here rather than setting the ModifiedDT | |||
6903 | // flag to avoid restarting the function walk in runOnFunction for each | |||
6904 | // select optimized. | |||
6905 | DT.reset(); | |||
6906 | ||||
6907 | // Transform a sequence like this: | |||
6908 | // start: | |||
6909 | // %cmp = cmp uge i32 %a, %b | |||
6910 | // %sel = select i1 %cmp, i32 %c, i32 %d | |||
6911 | // | |||
6912 | // Into: | |||
6913 | // start: | |||
6914 | // %cmp = cmp uge i32 %a, %b | |||
6915 | // %cmp.frozen = freeze %cmp | |||
6916 | // br i1 %cmp.frozen, label %select.true, label %select.false | |||
6917 | // select.true: | |||
6918 | // br label %select.end | |||
6919 | // select.false: | |||
6920 | // br label %select.end | |||
6921 | // select.end: | |||
6922 | // %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ] | |||
6923 | // | |||
6924 | // %cmp should be frozen, otherwise it may introduce undefined behavior. | |||
6925 | // In addition, we may sink instructions that produce %c or %d from | |||
6926 | // the entry block into the destination(s) of the new branch. | |||
6927 | // If the true or false blocks do not contain a sunken instruction, that | |||
6928 | // block and its branch may be optimized away. In that case, one side of the | |||
6929 | // first branch will point directly to select.end, and the corresponding PHI | |||
6930 | // predecessor block will be the start block. | |||
6931 | ||||
6932 | // First, we split the block containing the select into 2 blocks. | |||
6933 | BasicBlock *StartBlock = SI->getParent(); | |||
6934 | BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(LastSI)); | |||
6935 | BasicBlock *EndBlock = StartBlock->splitBasicBlock(SplitPt, "select.end"); | |||
6936 | if (IsHugeFunc) | |||
6937 | FreshBBs.insert(EndBlock); | |||
6938 | BFI->setBlockFreq(EndBlock, BFI->getBlockFreq(StartBlock).getFrequency()); | |||
6939 | ||||
6940 | // Delete the unconditional branch that was just created by the split. | |||
6941 | StartBlock->getTerminator()->eraseFromParent(); | |||
6942 | ||||
6943 | // These are the new basic blocks for the conditional branch. | |||
6944 | // At least one will become an actual new basic block. | |||
6945 | BasicBlock *TrueBlock = nullptr; | |||
6946 | BasicBlock *FalseBlock = nullptr; | |||
6947 | BranchInst *TrueBranch = nullptr; | |||
6948 | BranchInst *FalseBranch = nullptr; | |||
6949 | ||||
6950 | // Sink expensive instructions into the conditional blocks to avoid executing | |||
6951 | // them speculatively. | |||
6952 | for (SelectInst *SI : ASI) { | |||
6953 | if (sinkSelectOperand(TTI, SI->getTrueValue())) { | |||
6954 | if (TrueBlock == nullptr) { | |||
6955 | TrueBlock = BasicBlock::Create(SI->getContext(), "select.true.sink", | |||
6956 | EndBlock->getParent(), EndBlock); | |||
6957 | TrueBranch = BranchInst::Create(EndBlock, TrueBlock); | |||
6958 | if (IsHugeFunc) | |||
6959 | FreshBBs.insert(TrueBlock); | |||
6960 | TrueBranch->setDebugLoc(SI->getDebugLoc()); | |||
6961 | } | |||
6962 | auto *TrueInst = cast<Instruction>(SI->getTrueValue()); | |||
6963 | TrueInst->moveBefore(TrueBranch); | |||
6964 | } | |||
6965 | if (sinkSelectOperand(TTI, SI->getFalseValue())) { | |||
6966 | if (FalseBlock == nullptr) { | |||
6967 | FalseBlock = BasicBlock::Create(SI->getContext(), "select.false.sink", | |||
6968 | EndBlock->getParent(), EndBlock); | |||
6969 | if (IsHugeFunc) | |||
6970 | FreshBBs.insert(FalseBlock); | |||
6971 | FalseBranch = BranchInst::Create(EndBlock, FalseBlock); | |||
6972 | FalseBranch->setDebugLoc(SI->getDebugLoc()); | |||
6973 | } | |||
6974 | auto *FalseInst = cast<Instruction>(SI->getFalseValue()); | |||
6975 | FalseInst->moveBefore(FalseBranch); | |||
6976 | } | |||
6977 | } | |||
6978 | ||||
6979 | // If there was nothing to sink, then arbitrarily choose the 'false' side | |||
6980 | // for a new input value to the PHI. | |||
6981 | if (TrueBlock == FalseBlock) { | |||
6982 | assert(TrueBlock == nullptr &&(static_cast <bool> (TrueBlock == nullptr && "Unexpected basic block transform while optimizing select" ) ? void (0) : __assert_fail ("TrueBlock == nullptr && \"Unexpected basic block transform while optimizing select\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 6983, __extension__ __PRETTY_FUNCTION__ )) | |||
6983 | "Unexpected basic block transform while optimizing select")(static_cast <bool> (TrueBlock == nullptr && "Unexpected basic block transform while optimizing select" ) ? void (0) : __assert_fail ("TrueBlock == nullptr && \"Unexpected basic block transform while optimizing select\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 6983, __extension__ __PRETTY_FUNCTION__ )); | |||
6984 | ||||
6985 | FalseBlock = BasicBlock::Create(SI->getContext(), "select.false", | |||
6986 | EndBlock->getParent(), EndBlock); | |||
6987 | if (IsHugeFunc) | |||
6988 | FreshBBs.insert(FalseBlock); | |||
6989 | auto *FalseBranch = BranchInst::Create(EndBlock, FalseBlock); | |||
6990 | FalseBranch->setDebugLoc(SI->getDebugLoc()); | |||
6991 | } | |||
6992 | ||||
6993 | // Insert the real conditional branch based on the original condition. | |||
6994 | // If we did not create a new block for one of the 'true' or 'false' paths | |||
6995 | // of the condition, it means that side of the branch goes to the end block | |||
6996 | // directly and the path originates from the start block from the point of | |||
6997 | // view of the new PHI. | |||
6998 | BasicBlock *TT, *FT; | |||
6999 | if (TrueBlock == nullptr) { | |||
7000 | TT = EndBlock; | |||
7001 | FT = FalseBlock; | |||
7002 | TrueBlock = StartBlock; | |||
7003 | } else if (FalseBlock == nullptr) { | |||
7004 | TT = TrueBlock; | |||
7005 | FT = EndBlock; | |||
7006 | FalseBlock = StartBlock; | |||
7007 | } else { | |||
7008 | TT = TrueBlock; | |||
7009 | FT = FalseBlock; | |||
7010 | } | |||
7011 | IRBuilder<> IB(SI); | |||
7012 | auto *CondFr = IB.CreateFreeze(SI->getCondition(), SI->getName() + ".frozen"); | |||
7013 | IB.CreateCondBr(CondFr, TT, FT, SI); | |||
7014 | ||||
7015 | SmallPtrSet<const Instruction *, 2> INS; | |||
7016 | INS.insert(ASI.begin(), ASI.end()); | |||
7017 | // Use reverse iterator because later select may use the value of the | |||
7018 | // earlier select, and we need to propagate value through earlier select | |||
7019 | // to get the PHI operand. | |||
7020 | for (SelectInst *SI : llvm::reverse(ASI)) { | |||
7021 | // The select itself is replaced with a PHI Node. | |||
7022 | PHINode *PN = PHINode::Create(SI->getType(), 2, "", &EndBlock->front()); | |||
7023 | PN->takeName(SI); | |||
7024 | PN->addIncoming(getTrueOrFalseValue(SI, true, INS), TrueBlock); | |||
7025 | PN->addIncoming(getTrueOrFalseValue(SI, false, INS), FalseBlock); | |||
7026 | PN->setDebugLoc(SI->getDebugLoc()); | |||
7027 | ||||
7028 | replaceAllUsesWith(SI, PN, FreshBBs, IsHugeFunc); | |||
7029 | SI->eraseFromParent(); | |||
7030 | INS.erase(SI); | |||
7031 | ++NumSelectsExpanded; | |||
7032 | } | |||
7033 | ||||
7034 | // Instruct OptimizeBlock to skip to the next block. | |||
7035 | CurInstIterator = StartBlock->end(); | |||
7036 | return true; | |||
7037 | } | |||
7038 | ||||
7039 | /// Some targets only accept certain types for splat inputs. For example a VDUP | |||
7040 | /// in MVE takes a GPR (integer) register, and the instruction that incorporate | |||
7041 | /// a VDUP (such as a VADD qd, qm, rm) also require a gpr register. | |||
7042 | bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) { | |||
7043 | // Accept shuf(insertelem(undef/poison, val, 0), undef/poison, <0,0,..>) only | |||
7044 | if (!match(SVI, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()), | |||
7045 | m_Undef(), m_ZeroMask()))) | |||
7046 | return false; | |||
7047 | Type *NewType = TLI->shouldConvertSplatType(SVI); | |||
7048 | if (!NewType) | |||
7049 | return false; | |||
7050 | ||||
7051 | auto *SVIVecType = cast<FixedVectorType>(SVI->getType()); | |||
7052 | assert(!NewType->isVectorTy() && "Expected a scalar type!")(static_cast <bool> (!NewType->isVectorTy() && "Expected a scalar type!") ? void (0) : __assert_fail ("!NewType->isVectorTy() && \"Expected a scalar type!\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7052, __extension__ __PRETTY_FUNCTION__ )); | |||
7053 | assert(NewType->getScalarSizeInBits() == SVIVecType->getScalarSizeInBits() &&(static_cast <bool> (NewType->getScalarSizeInBits() == SVIVecType->getScalarSizeInBits() && "Expected a type of the same size!" ) ? void (0) : __assert_fail ("NewType->getScalarSizeInBits() == SVIVecType->getScalarSizeInBits() && \"Expected a type of the same size!\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7054, __extension__ __PRETTY_FUNCTION__ )) | |||
7054 | "Expected a type of the same size!")(static_cast <bool> (NewType->getScalarSizeInBits() == SVIVecType->getScalarSizeInBits() && "Expected a type of the same size!" ) ? void (0) : __assert_fail ("NewType->getScalarSizeInBits() == SVIVecType->getScalarSizeInBits() && \"Expected a type of the same size!\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7054, __extension__ __PRETTY_FUNCTION__ )); | |||
7055 | auto *NewVecType = | |||
7056 | FixedVectorType::get(NewType, SVIVecType->getNumElements()); | |||
7057 | ||||
7058 | // Create a bitcast (shuffle (insert (bitcast(..)))) | |||
7059 | IRBuilder<> Builder(SVI->getContext()); | |||
7060 | Builder.SetInsertPoint(SVI); | |||
7061 | Value *BC1 = Builder.CreateBitCast( | |||
7062 | cast<Instruction>(SVI->getOperand(0))->getOperand(1), NewType); | |||
7063 | Value *Shuffle = Builder.CreateVectorSplat(NewVecType->getNumElements(), BC1); | |||
7064 | Value *BC2 = Builder.CreateBitCast(Shuffle, SVIVecType); | |||
7065 | ||||
7066 | replaceAllUsesWith(SVI, BC2, FreshBBs, IsHugeFunc); | |||
7067 | RecursivelyDeleteTriviallyDeadInstructions( | |||
7068 | SVI, TLInfo, nullptr, | |||
7069 | [&](Value *V) { removeAllAssertingVHReferences(V); }); | |||
7070 | ||||
7071 | // Also hoist the bitcast up to its operand if it they are not in the same | |||
7072 | // block. | |||
7073 | if (auto *BCI = dyn_cast<Instruction>(BC1)) | |||
7074 | if (auto *Op = dyn_cast<Instruction>(BCI->getOperand(0))) | |||
7075 | if (BCI->getParent() != Op->getParent() && !isa<PHINode>(Op) && | |||
7076 | !Op->isTerminator() && !Op->isEHPad()) | |||
7077 | BCI->moveAfter(Op); | |||
7078 | ||||
7079 | return true; | |||
7080 | } | |||
7081 | ||||
7082 | bool CodeGenPrepare::tryToSinkFreeOperands(Instruction *I) { | |||
7083 | // If the operands of I can be folded into a target instruction together with | |||
7084 | // I, duplicate and sink them. | |||
7085 | SmallVector<Use *, 4> OpsToSink; | |||
7086 | if (!TLI->shouldSinkOperands(I, OpsToSink)) | |||
7087 | return false; | |||
7088 | ||||
7089 | // OpsToSink can contain multiple uses in a use chain (e.g. | |||
7090 | // (%u1 with %u1 = shufflevector), (%u2 with %u2 = zext %u1)). The dominating | |||
7091 | // uses must come first, so we process the ops in reverse order so as to not | |||
7092 | // create invalid IR. | |||
7093 | BasicBlock *TargetBB = I->getParent(); | |||
7094 | bool Changed = false; | |||
7095 | SmallVector<Use *, 4> ToReplace; | |||
7096 | Instruction *InsertPoint = I; | |||
7097 | DenseMap<const Instruction *, unsigned long> InstOrdering; | |||
7098 | unsigned long InstNumber = 0; | |||
7099 | for (const auto &I : *TargetBB) | |||
7100 | InstOrdering[&I] = InstNumber++; | |||
7101 | ||||
7102 | for (Use *U : reverse(OpsToSink)) { | |||
7103 | auto *UI = cast<Instruction>(U->get()); | |||
7104 | if (isa<PHINode>(UI)) | |||
7105 | continue; | |||
7106 | if (UI->getParent() == TargetBB) { | |||
7107 | if (InstOrdering[UI] < InstOrdering[InsertPoint]) | |||
7108 | InsertPoint = UI; | |||
7109 | continue; | |||
7110 | } | |||
7111 | ToReplace.push_back(U); | |||
7112 | } | |||
7113 | ||||
7114 | SetVector<Instruction *> MaybeDead; | |||
7115 | DenseMap<Instruction *, Instruction *> NewInstructions; | |||
7116 | for (Use *U : ToReplace) { | |||
7117 | auto *UI = cast<Instruction>(U->get()); | |||
7118 | Instruction *NI = UI->clone(); | |||
7119 | ||||
7120 | if (IsHugeFunc) { | |||
7121 | // Now we clone an instruction, its operands' defs may sink to this BB | |||
7122 | // now. So we put the operands defs' BBs into FreshBBs to do optmization. | |||
7123 | for (unsigned I = 0; I < NI->getNumOperands(); ++I) { | |||
7124 | auto *OpDef = dyn_cast<Instruction>(NI->getOperand(I)); | |||
7125 | if (!OpDef) | |||
7126 | continue; | |||
7127 | FreshBBs.insert(OpDef->getParent()); | |||
7128 | } | |||
7129 | } | |||
7130 | ||||
7131 | NewInstructions[UI] = NI; | |||
7132 | MaybeDead.insert(UI); | |||
7133 | LLVM_DEBUG(dbgs() << "Sinking " << *UI << " to user " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Sinking " << *UI << " to user " << *I << "\n"; } } while (false ); | |||
7134 | NI->insertBefore(InsertPoint); | |||
7135 | InsertPoint = NI; | |||
7136 | InsertedInsts.insert(NI); | |||
7137 | ||||
7138 | // Update the use for the new instruction, making sure that we update the | |||
7139 | // sunk instruction uses, if it is part of a chain that has already been | |||
7140 | // sunk. | |||
7141 | Instruction *OldI = cast<Instruction>(U->getUser()); | |||
7142 | if (NewInstructions.count(OldI)) | |||
7143 | NewInstructions[OldI]->setOperand(U->getOperandNo(), NI); | |||
7144 | else | |||
7145 | U->set(NI); | |||
7146 | Changed = true; | |||
7147 | } | |||
7148 | ||||
7149 | // Remove instructions that are dead after sinking. | |||
7150 | for (auto *I : MaybeDead) { | |||
7151 | if (!I->hasNUsesOrMore(1)) { | |||
7152 | LLVM_DEBUG(dbgs() << "Removing dead instruction: " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Removing dead instruction: " << *I << "\n"; } } while (false); | |||
7153 | I->eraseFromParent(); | |||
7154 | } | |||
7155 | } | |||
7156 | ||||
7157 | return Changed; | |||
7158 | } | |||
7159 | ||||
7160 | bool CodeGenPrepare::optimizeSwitchType(SwitchInst *SI) { | |||
7161 | Value *Cond = SI->getCondition(); | |||
7162 | Type *OldType = Cond->getType(); | |||
7163 | LLVMContext &Context = Cond->getContext(); | |||
7164 | EVT OldVT = TLI->getValueType(*DL, OldType); | |||
7165 | MVT RegType = TLI->getPreferredSwitchConditionType(Context, OldVT); | |||
7166 | unsigned RegWidth = RegType.getSizeInBits(); | |||
7167 | ||||
7168 | if (RegWidth <= cast<IntegerType>(OldType)->getBitWidth()) | |||
7169 | return false; | |||
7170 | ||||
7171 | // If the register width is greater than the type width, expand the condition | |||
7172 | // of the switch instruction and each case constant to the width of the | |||
7173 | // register. By widening the type of the switch condition, subsequent | |||
7174 | // comparisons (for case comparisons) will not need to be extended to the | |||
7175 | // preferred register width, so we will potentially eliminate N-1 extends, | |||
7176 | // where N is the number of cases in the switch. | |||
7177 | auto *NewType = Type::getIntNTy(Context, RegWidth); | |||
7178 | ||||
7179 | // Extend the switch condition and case constants using the target preferred | |||
7180 | // extend unless the switch condition is a function argument with an extend | |||
7181 | // attribute. In that case, we can avoid an unnecessary mask/extension by | |||
7182 | // matching the argument extension instead. | |||
7183 | Instruction::CastOps ExtType = Instruction::ZExt; | |||
7184 | // Some targets prefer SExt over ZExt. | |||
7185 | if (TLI->isSExtCheaperThanZExt(OldVT, RegType)) | |||
7186 | ExtType = Instruction::SExt; | |||
7187 | ||||
7188 | if (auto *Arg = dyn_cast<Argument>(Cond)) { | |||
7189 | if (Arg->hasSExtAttr()) | |||
7190 | ExtType = Instruction::SExt; | |||
7191 | if (Arg->hasZExtAttr()) | |||
7192 | ExtType = Instruction::ZExt; | |||
7193 | } | |||
7194 | ||||
7195 | auto *ExtInst = CastInst::Create(ExtType, Cond, NewType); | |||
7196 | ExtInst->insertBefore(SI); | |||
7197 | ExtInst->setDebugLoc(SI->getDebugLoc()); | |||
7198 | SI->setCondition(ExtInst); | |||
7199 | for (auto Case : SI->cases()) { | |||
7200 | const APInt &NarrowConst = Case.getCaseValue()->getValue(); | |||
7201 | APInt WideConst = (ExtType == Instruction::ZExt) | |||
7202 | ? NarrowConst.zext(RegWidth) | |||
7203 | : NarrowConst.sext(RegWidth); | |||
7204 | Case.setValue(ConstantInt::get(Context, WideConst)); | |||
7205 | } | |||
7206 | ||||
7207 | return true; | |||
7208 | } | |||
7209 | ||||
7210 | bool CodeGenPrepare::optimizeSwitchPhiConstants(SwitchInst *SI) { | |||
7211 | // The SCCP optimization tends to produce code like this: | |||
7212 | // switch(x) { case 42: phi(42, ...) } | |||
7213 | // Materializing the constant for the phi-argument needs instructions; So we | |||
7214 | // change the code to: | |||
7215 | // switch(x) { case 42: phi(x, ...) } | |||
7216 | ||||
7217 | Value *Condition = SI->getCondition(); | |||
7218 | // Avoid endless loop in degenerate case. | |||
7219 | if (isa<ConstantInt>(*Condition)) | |||
7220 | return false; | |||
7221 | ||||
7222 | bool Changed = false; | |||
7223 | BasicBlock *SwitchBB = SI->getParent(); | |||
7224 | Type *ConditionType = Condition->getType(); | |||
7225 | ||||
7226 | for (const SwitchInst::CaseHandle &Case : SI->cases()) { | |||
7227 | ConstantInt *CaseValue = Case.getCaseValue(); | |||
7228 | BasicBlock *CaseBB = Case.getCaseSuccessor(); | |||
7229 | // Set to true if we previously checked that `CaseBB` is only reached by | |||
7230 | // a single case from this switch. | |||
7231 | bool CheckedForSinglePred = false; | |||
7232 | for (PHINode &PHI : CaseBB->phis()) { | |||
7233 | Type *PHIType = PHI.getType(); | |||
7234 | // If ZExt is free then we can also catch patterns like this: | |||
7235 | // switch((i32)x) { case 42: phi((i64)42, ...); } | |||
7236 | // and replace `(i64)42` with `zext i32 %x to i64`. | |||
7237 | bool TryZExt = | |||
7238 | PHIType->isIntegerTy() && | |||
7239 | PHIType->getIntegerBitWidth() > ConditionType->getIntegerBitWidth() && | |||
7240 | TLI->isZExtFree(ConditionType, PHIType); | |||
7241 | if (PHIType == ConditionType || TryZExt) { | |||
7242 | // Set to true to skip this case because of multiple preds. | |||
7243 | bool SkipCase = false; | |||
7244 | Value *Replacement = nullptr; | |||
7245 | for (unsigned I = 0, E = PHI.getNumIncomingValues(); I != E; I++) { | |||
7246 | Value *PHIValue = PHI.getIncomingValue(I); | |||
7247 | if (PHIValue != CaseValue) { | |||
7248 | if (!TryZExt) | |||
7249 | continue; | |||
7250 | ConstantInt *PHIValueInt = dyn_cast<ConstantInt>(PHIValue); | |||
7251 | if (!PHIValueInt || | |||
7252 | PHIValueInt->getValue() != | |||
7253 | CaseValue->getValue().zext(PHIType->getIntegerBitWidth())) | |||
7254 | continue; | |||
7255 | } | |||
7256 | if (PHI.getIncomingBlock(I) != SwitchBB) | |||
7257 | continue; | |||
7258 | // We cannot optimize if there are multiple case labels jumping to | |||
7259 | // this block. This check may get expensive when there are many | |||
7260 | // case labels so we test for it last. | |||
7261 | if (!CheckedForSinglePred) { | |||
7262 | CheckedForSinglePred = true; | |||
7263 | if (SI->findCaseDest(CaseBB) == nullptr) { | |||
7264 | SkipCase = true; | |||
7265 | break; | |||
7266 | } | |||
7267 | } | |||
7268 | ||||
7269 | if (Replacement == nullptr) { | |||
7270 | if (PHIValue == CaseValue) { | |||
7271 | Replacement = Condition; | |||
7272 | } else { | |||
7273 | IRBuilder<> Builder(SI); | |||
7274 | Replacement = Builder.CreateZExt(Condition, PHIType); | |||
7275 | } | |||
7276 | } | |||
7277 | PHI.setIncomingValue(I, Replacement); | |||
7278 | Changed = true; | |||
7279 | } | |||
7280 | if (SkipCase) | |||
7281 | break; | |||
7282 | } | |||
7283 | } | |||
7284 | } | |||
7285 | return Changed; | |||
7286 | } | |||
7287 | ||||
7288 | bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) { | |||
7289 | bool Changed = optimizeSwitchType(SI); | |||
7290 | Changed |= optimizeSwitchPhiConstants(SI); | |||
7291 | return Changed; | |||
7292 | } | |||
7293 | ||||
7294 | namespace { | |||
7295 | ||||
7296 | /// Helper class to promote a scalar operation to a vector one. | |||
7297 | /// This class is used to move downward extractelement transition. | |||
7298 | /// E.g., | |||
7299 | /// a = vector_op <2 x i32> | |||
7300 | /// b = extractelement <2 x i32> a, i32 0 | |||
7301 | /// c = scalar_op b | |||
7302 | /// store c | |||
7303 | /// | |||
7304 | /// => | |||
7305 | /// a = vector_op <2 x i32> | |||
7306 | /// c = vector_op a (equivalent to scalar_op on the related lane) | |||
7307 | /// * d = extractelement <2 x i32> c, i32 0 | |||
7308 | /// * store d | |||
7309 | /// Assuming both extractelement and store can be combine, we get rid of the | |||
7310 | /// transition. | |||
7311 | class VectorPromoteHelper { | |||
7312 | /// DataLayout associated with the current module. | |||
7313 | const DataLayout &DL; | |||
7314 | ||||
7315 | /// Used to perform some checks on the legality of vector operations. | |||
7316 | const TargetLowering &TLI; | |||
7317 | ||||
7318 | /// Used to estimated the cost of the promoted chain. | |||
7319 | const TargetTransformInfo &TTI; | |||
7320 | ||||
7321 | /// The transition being moved downwards. | |||
7322 | Instruction *Transition; | |||
7323 | ||||
7324 | /// The sequence of instructions to be promoted. | |||
7325 | SmallVector<Instruction *, 4> InstsToBePromoted; | |||
7326 | ||||
7327 | /// Cost of combining a store and an extract. | |||
7328 | unsigned StoreExtractCombineCost; | |||
7329 | ||||
7330 | /// Instruction that will be combined with the transition. | |||
7331 | Instruction *CombineInst = nullptr; | |||
7332 | ||||
7333 | /// The instruction that represents the current end of the transition. | |||
7334 | /// Since we are faking the promotion until we reach the end of the chain | |||
7335 | /// of computation, we need a way to get the current end of the transition. | |||
7336 | Instruction *getEndOfTransition() const { | |||
7337 | if (InstsToBePromoted.empty()) | |||
7338 | return Transition; | |||
7339 | return InstsToBePromoted.back(); | |||
7340 | } | |||
7341 | ||||
7342 | /// Return the index of the original value in the transition. | |||
7343 | /// E.g., for "extractelement <2 x i32> c, i32 1" the original value, | |||
7344 | /// c, is at index 0. | |||
7345 | unsigned getTransitionOriginalValueIdx() const { | |||
7346 | assert(isa<ExtractElementInst>(Transition) &&(static_cast <bool> (isa<ExtractElementInst>(Transition ) && "Other kind of transitions are not supported yet" ) ? void (0) : __assert_fail ("isa<ExtractElementInst>(Transition) && \"Other kind of transitions are not supported yet\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7347, __extension__ __PRETTY_FUNCTION__ )) | |||
7347 | "Other kind of transitions are not supported yet")(static_cast <bool> (isa<ExtractElementInst>(Transition ) && "Other kind of transitions are not supported yet" ) ? void (0) : __assert_fail ("isa<ExtractElementInst>(Transition) && \"Other kind of transitions are not supported yet\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7347, __extension__ __PRETTY_FUNCTION__ )); | |||
7348 | return 0; | |||
7349 | } | |||
7350 | ||||
7351 | /// Return the index of the index in the transition. | |||
7352 | /// E.g., for "extractelement <2 x i32> c, i32 0" the index | |||
7353 | /// is at index 1. | |||
7354 | unsigned getTransitionIdx() const { | |||
7355 | assert(isa<ExtractElementInst>(Transition) &&(static_cast <bool> (isa<ExtractElementInst>(Transition ) && "Other kind of transitions are not supported yet" ) ? void (0) : __assert_fail ("isa<ExtractElementInst>(Transition) && \"Other kind of transitions are not supported yet\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7356, __extension__ __PRETTY_FUNCTION__ )) | |||
7356 | "Other kind of transitions are not supported yet")(static_cast <bool> (isa<ExtractElementInst>(Transition ) && "Other kind of transitions are not supported yet" ) ? void (0) : __assert_fail ("isa<ExtractElementInst>(Transition) && \"Other kind of transitions are not supported yet\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7356, __extension__ __PRETTY_FUNCTION__ )); | |||
7357 | return 1; | |||
7358 | } | |||
7359 | ||||
7360 | /// Get the type of the transition. | |||
7361 | /// This is the type of the original value. | |||
7362 | /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the | |||
7363 | /// transition is <2 x i32>. | |||
7364 | Type *getTransitionType() const { | |||
7365 | return Transition->getOperand(getTransitionOriginalValueIdx())->getType(); | |||
7366 | } | |||
7367 | ||||
7368 | /// Promote \p ToBePromoted by moving \p Def downward through. | |||
7369 | /// I.e., we have the following sequence: | |||
7370 | /// Def = Transition <ty1> a to <ty2> | |||
7371 | /// b = ToBePromoted <ty2> Def, ... | |||
7372 | /// => | |||
7373 | /// b = ToBePromoted <ty1> a, ... | |||
7374 | /// Def = Transition <ty1> ToBePromoted to <ty2> | |||
7375 | void promoteImpl(Instruction *ToBePromoted); | |||
7376 | ||||
7377 | /// Check whether or not it is profitable to promote all the | |||
7378 | /// instructions enqueued to be promoted. | |||
7379 | bool isProfitableToPromote() { | |||
7380 | Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx()); | |||
7381 | unsigned Index = isa<ConstantInt>(ValIdx) | |||
7382 | ? cast<ConstantInt>(ValIdx)->getZExtValue() | |||
7383 | : -1; | |||
7384 | Type *PromotedType = getTransitionType(); | |||
7385 | ||||
7386 | StoreInst *ST = cast<StoreInst>(CombineInst); | |||
7387 | unsigned AS = ST->getPointerAddressSpace(); | |||
7388 | // Check if this store is supported. | |||
7389 | if (!TLI.allowsMisalignedMemoryAccesses( | |||
7390 | TLI.getValueType(DL, ST->getValueOperand()->getType()), AS, | |||
7391 | ST->getAlign())) { | |||
7392 | // If this is not supported, there is no way we can combine | |||
7393 | // the extract with the store. | |||
7394 | return false; | |||
7395 | } | |||
7396 | ||||
7397 | // The scalar chain of computation has to pay for the transition | |||
7398 | // scalar to vector. | |||
7399 | // The vector chain has to account for the combining cost. | |||
7400 | enum TargetTransformInfo::TargetCostKind CostKind = | |||
7401 | TargetTransformInfo::TCK_RecipThroughput; | |||
7402 | InstructionCost ScalarCost = | |||
7403 | TTI.getVectorInstrCost(*Transition, PromotedType, CostKind, Index); | |||
7404 | InstructionCost VectorCost = StoreExtractCombineCost; | |||
7405 | for (const auto &Inst : InstsToBePromoted) { | |||
7406 | // Compute the cost. | |||
7407 | // By construction, all instructions being promoted are arithmetic ones. | |||
7408 | // Moreover, one argument is a constant that can be viewed as a splat | |||
7409 | // constant. | |||
7410 | Value *Arg0 = Inst->getOperand(0); | |||
7411 | bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) || | |||
7412 | isa<ConstantFP>(Arg0); | |||
7413 | TargetTransformInfo::OperandValueInfo Arg0Info, Arg1Info; | |||
7414 | if (IsArg0Constant) | |||
7415 | Arg0Info.Kind = TargetTransformInfo::OK_UniformConstantValue; | |||
7416 | else | |||
7417 | Arg1Info.Kind = TargetTransformInfo::OK_UniformConstantValue; | |||
7418 | ||||
7419 | ScalarCost += TTI.getArithmeticInstrCost( | |||
7420 | Inst->getOpcode(), Inst->getType(), CostKind, Arg0Info, Arg1Info); | |||
7421 | VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType, | |||
7422 | CostKind, Arg0Info, Arg1Info); | |||
7423 | } | |||
7424 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Estimated cost of computation to be promoted:\nScalar: " << ScalarCost << "\nVector: " << VectorCost << '\n'; } } while (false) | |||
7425 | dbgs() << "Estimated cost of computation to be promoted:\nScalar: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Estimated cost of computation to be promoted:\nScalar: " << ScalarCost << "\nVector: " << VectorCost << '\n'; } } while (false) | |||
7426 | << ScalarCost << "\nVector: " << VectorCost << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Estimated cost of computation to be promoted:\nScalar: " << ScalarCost << "\nVector: " << VectorCost << '\n'; } } while (false); | |||
7427 | return ScalarCost > VectorCost; | |||
7428 | } | |||
7429 | ||||
7430 | /// Generate a constant vector with \p Val with the same | |||
7431 | /// number of elements as the transition. | |||
7432 | /// \p UseSplat defines whether or not \p Val should be replicated | |||
7433 | /// across the whole vector. | |||
7434 | /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>, | |||
7435 | /// otherwise we generate a vector with as many undef as possible: | |||
7436 | /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only | |||
7437 | /// used at the index of the extract. | |||
7438 | Value *getConstantVector(Constant *Val, bool UseSplat) const { | |||
7439 | unsigned ExtractIdx = std::numeric_limits<unsigned>::max(); | |||
7440 | if (!UseSplat) { | |||
7441 | // If we cannot determine where the constant must be, we have to | |||
7442 | // use a splat constant. | |||
7443 | Value *ValExtractIdx = Transition->getOperand(getTransitionIdx()); | |||
7444 | if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx)) | |||
7445 | ExtractIdx = CstVal->getSExtValue(); | |||
7446 | else | |||
7447 | UseSplat = true; | |||
7448 | } | |||
7449 | ||||
7450 | ElementCount EC = cast<VectorType>(getTransitionType())->getElementCount(); | |||
7451 | if (UseSplat) | |||
7452 | return ConstantVector::getSplat(EC, Val); | |||
7453 | ||||
7454 | if (!EC.isScalable()) { | |||
7455 | SmallVector<Constant *, 4> ConstVec; | |||
7456 | UndefValue *UndefVal = UndefValue::get(Val->getType()); | |||
7457 | for (unsigned Idx = 0; Idx != EC.getKnownMinValue(); ++Idx) { | |||
7458 | if (Idx == ExtractIdx) | |||
7459 | ConstVec.push_back(Val); | |||
7460 | else | |||
7461 | ConstVec.push_back(UndefVal); | |||
7462 | } | |||
7463 | return ConstantVector::get(ConstVec); | |||
7464 | } else | |||
7465 | llvm_unreachable(::llvm::llvm_unreachable_internal("Generate scalable vector for non-splat is unimplemented" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7466) | |||
7466 | "Generate scalable vector for non-splat is unimplemented")::llvm::llvm_unreachable_internal("Generate scalable vector for non-splat is unimplemented" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7466); | |||
7467 | } | |||
7468 | ||||
7469 | /// Check if promoting to a vector type an operand at \p OperandIdx | |||
7470 | /// in \p Use can trigger undefined behavior. | |||
7471 | static bool canCauseUndefinedBehavior(const Instruction *Use, | |||
7472 | unsigned OperandIdx) { | |||
7473 | // This is not safe to introduce undef when the operand is on | |||
7474 | // the right hand side of a division-like instruction. | |||
7475 | if (OperandIdx != 1) | |||
7476 | return false; | |||
7477 | switch (Use->getOpcode()) { | |||
7478 | default: | |||
7479 | return false; | |||
7480 | case Instruction::SDiv: | |||
7481 | case Instruction::UDiv: | |||
7482 | case Instruction::SRem: | |||
7483 | case Instruction::URem: | |||
7484 | return true; | |||
7485 | case Instruction::FDiv: | |||
7486 | case Instruction::FRem: | |||
7487 | return !Use->hasNoNaNs(); | |||
7488 | } | |||
7489 | llvm_unreachable(nullptr)::llvm::llvm_unreachable_internal(nullptr, "llvm/lib/CodeGen/CodeGenPrepare.cpp" , 7489); | |||
7490 | } | |||
7491 | ||||
7492 | public: | |||
7493 | VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI, | |||
7494 | const TargetTransformInfo &TTI, Instruction *Transition, | |||
7495 | unsigned CombineCost) | |||
7496 | : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition), | |||
7497 | StoreExtractCombineCost(CombineCost) { | |||
7498 | assert(Transition && "Do not know how to promote null")(static_cast <bool> (Transition && "Do not know how to promote null" ) ? void (0) : __assert_fail ("Transition && \"Do not know how to promote null\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7498, __extension__ __PRETTY_FUNCTION__ )); | |||
7499 | } | |||
7500 | ||||
7501 | /// Check if we can promote \p ToBePromoted to \p Type. | |||
7502 | bool canPromote(const Instruction *ToBePromoted) const { | |||
7503 | // We could support CastInst too. | |||
7504 | return isa<BinaryOperator>(ToBePromoted); | |||
7505 | } | |||
7506 | ||||
7507 | /// Check if it is profitable to promote \p ToBePromoted | |||
7508 | /// by moving downward the transition through. | |||
7509 | bool shouldPromote(const Instruction *ToBePromoted) const { | |||
7510 | // Promote only if all the operands can be statically expanded. | |||
7511 | // Indeed, we do not want to introduce any new kind of transitions. | |||
7512 | for (const Use &U : ToBePromoted->operands()) { | |||
7513 | const Value *Val = U.get(); | |||
7514 | if (Val == getEndOfTransition()) { | |||
7515 | // If the use is a division and the transition is on the rhs, | |||
7516 | // we cannot promote the operation, otherwise we may create a | |||
7517 | // division by zero. | |||
7518 | if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())) | |||
7519 | return false; | |||
7520 | continue; | |||
7521 | } | |||
7522 | if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) && | |||
7523 | !isa<ConstantFP>(Val)) | |||
7524 | return false; | |||
7525 | } | |||
7526 | // Check that the resulting operation is legal. | |||
7527 | int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode()); | |||
7528 | if (!ISDOpcode) | |||
7529 | return false; | |||
7530 | return StressStoreExtract || | |||
7531 | TLI.isOperationLegalOrCustom( | |||
7532 | ISDOpcode, TLI.getValueType(DL, getTransitionType(), true)); | |||
7533 | } | |||
7534 | ||||
7535 | /// Check whether or not \p Use can be combined | |||
7536 | /// with the transition. | |||
7537 | /// I.e., is it possible to do Use(Transition) => AnotherUse? | |||
7538 | bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); } | |||
7539 | ||||
7540 | /// Record \p ToBePromoted as part of the chain to be promoted. | |||
7541 | void enqueueForPromotion(Instruction *ToBePromoted) { | |||
7542 | InstsToBePromoted.push_back(ToBePromoted); | |||
7543 | } | |||
7544 | ||||
7545 | /// Set the instruction that will be combined with the transition. | |||
7546 | void recordCombineInstruction(Instruction *ToBeCombined) { | |||
7547 | assert(canCombine(ToBeCombined) && "Unsupported instruction to combine")(static_cast <bool> (canCombine(ToBeCombined) && "Unsupported instruction to combine") ? void (0) : __assert_fail ("canCombine(ToBeCombined) && \"Unsupported instruction to combine\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7547, __extension__ __PRETTY_FUNCTION__ )); | |||
7548 | CombineInst = ToBeCombined; | |||
7549 | } | |||
7550 | ||||
7551 | /// Promote all the instructions enqueued for promotion if it is | |||
7552 | /// is profitable. | |||
7553 | /// \return True if the promotion happened, false otherwise. | |||
7554 | bool promote() { | |||
7555 | // Check if there is something to promote. | |||
7556 | // Right now, if we do not have anything to combine with, | |||
7557 | // we assume the promotion is not profitable. | |||
7558 | if (InstsToBePromoted.empty() || !CombineInst) | |||
7559 | return false; | |||
7560 | ||||
7561 | // Check cost. | |||
7562 | if (!StressStoreExtract && !isProfitableToPromote()) | |||
7563 | return false; | |||
7564 | ||||
7565 | // Promote. | |||
7566 | for (auto &ToBePromoted : InstsToBePromoted) | |||
7567 | promoteImpl(ToBePromoted); | |||
7568 | InstsToBePromoted.clear(); | |||
7569 | return true; | |||
7570 | } | |||
7571 | }; | |||
7572 | ||||
7573 | } // end anonymous namespace | |||
7574 | ||||
7575 | void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) { | |||
7576 | // At this point, we know that all the operands of ToBePromoted but Def | |||
7577 | // can be statically promoted. | |||
7578 | // For Def, we need to use its parameter in ToBePromoted: | |||
7579 | // b = ToBePromoted ty1 a | |||
7580 | // Def = Transition ty1 b to ty2 | |||
7581 | // Move the transition down. | |||
7582 | // 1. Replace all uses of the promoted operation by the transition. | |||
7583 | // = ... b => = ... Def. | |||
7584 | assert(ToBePromoted->getType() == Transition->getType() &&(static_cast <bool> (ToBePromoted->getType() == Transition ->getType() && "The type of the result of the transition does not match " "the final type") ? void (0) : __assert_fail ("ToBePromoted->getType() == Transition->getType() && \"The type of the result of the transition does not match \" \"the final type\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7586, __extension__ __PRETTY_FUNCTION__ )) | |||
7585 | "The type of the result of the transition does not match "(static_cast <bool> (ToBePromoted->getType() == Transition ->getType() && "The type of the result of the transition does not match " "the final type") ? void (0) : __assert_fail ("ToBePromoted->getType() == Transition->getType() && \"The type of the result of the transition does not match \" \"the final type\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7586, __extension__ __PRETTY_FUNCTION__ )) | |||
7586 | "the final type")(static_cast <bool> (ToBePromoted->getType() == Transition ->getType() && "The type of the result of the transition does not match " "the final type") ? void (0) : __assert_fail ("ToBePromoted->getType() == Transition->getType() && \"The type of the result of the transition does not match \" \"the final type\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7586, __extension__ __PRETTY_FUNCTION__ )); | |||
7587 | ToBePromoted->replaceAllUsesWith(Transition); | |||
7588 | // 2. Update the type of the uses. | |||
7589 | // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def. | |||
7590 | Type *TransitionTy = getTransitionType(); | |||
7591 | ToBePromoted->mutateType(TransitionTy); | |||
7592 | // 3. Update all the operands of the promoted operation with promoted | |||
7593 | // operands. | |||
7594 | // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a. | |||
7595 | for (Use &U : ToBePromoted->operands()) { | |||
7596 | Value *Val = U.get(); | |||
7597 | Value *NewVal = nullptr; | |||
7598 | if (Val == Transition) | |||
7599 | NewVal = Transition->getOperand(getTransitionOriginalValueIdx()); | |||
7600 | else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) || | |||
7601 | isa<ConstantFP>(Val)) { | |||
7602 | // Use a splat constant if it is not safe to use undef. | |||
7603 | NewVal = getConstantVector( | |||
7604 | cast<Constant>(Val), | |||
7605 | isa<UndefValue>(Val) || | |||
7606 | canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())); | |||
7607 | } else | |||
7608 | llvm_unreachable("Did you modified shouldPromote and forgot to update "::llvm::llvm_unreachable_internal("Did you modified shouldPromote and forgot to update " "this?", "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7609) | |||
7609 | "this?")::llvm::llvm_unreachable_internal("Did you modified shouldPromote and forgot to update " "this?", "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7609); | |||
7610 | ToBePromoted->setOperand(U.getOperandNo(), NewVal); | |||
7611 | } | |||
7612 | Transition->moveAfter(ToBePromoted); | |||
7613 | Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted); | |||
7614 | } | |||
7615 | ||||
7616 | /// Some targets can do store(extractelement) with one instruction. | |||
7617 | /// Try to push the extractelement towards the stores when the target | |||
7618 | /// has this feature and this is profitable. | |||
7619 | bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) { | |||
7620 | unsigned CombineCost = std::numeric_limits<unsigned>::max(); | |||
7621 | if (DisableStoreExtract || | |||
7622 | (!StressStoreExtract && | |||
7623 | !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(), | |||
7624 | Inst->getOperand(1), CombineCost))) | |||
7625 | return false; | |||
7626 | ||||
7627 | // At this point we know that Inst is a vector to scalar transition. | |||
7628 | // Try to move it down the def-use chain, until: | |||
7629 | // - We can combine the transition with its single use | |||
7630 | // => we got rid of the transition. | |||
7631 | // - We escape the current basic block | |||
7632 | // => we would need to check that we are moving it at a cheaper place and | |||
7633 | // we do not do that for now. | |||
7634 | BasicBlock *Parent = Inst->getParent(); | |||
7635 | LLVM_DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Found an interesting transition: " << *Inst << '\n'; } } while (false); | |||
7636 | VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost); | |||
7637 | // If the transition has more than one use, assume this is not going to be | |||
7638 | // beneficial. | |||
7639 | while (Inst->hasOneUse()) { | |||
7640 | Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin()); | |||
7641 | LLVM_DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Use: " << *ToBePromoted << '\n'; } } while (false); | |||
7642 | ||||
7643 | if (ToBePromoted->getParent() != Parent) { | |||
7644 | LLVM_DEBUG(dbgs() << "Instruction to promote is in a different block ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Instruction to promote is in a different block (" << ToBePromoted->getParent()->getName() << ") than the transition (" << Parent->getName() << ").\n"; } } while (false) | |||
7645 | << ToBePromoted->getParent()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Instruction to promote is in a different block (" << ToBePromoted->getParent()->getName() << ") than the transition (" << Parent->getName() << ").\n"; } } while (false) | |||
7646 | << ") than the transition (" << Parent->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Instruction to promote is in a different block (" << ToBePromoted->getParent()->getName() << ") than the transition (" << Parent->getName() << ").\n"; } } while (false) | |||
7647 | << ").\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Instruction to promote is in a different block (" << ToBePromoted->getParent()->getName() << ") than the transition (" << Parent->getName() << ").\n"; } } while (false); | |||
7648 | return false; | |||
7649 | } | |||
7650 | ||||
7651 | if (VPH.canCombine(ToBePromoted)) { | |||
7652 | LLVM_DEBUG(dbgs() << "Assume " << *Inst << '\n'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Assume " << *Inst << '\n' << "will be combined with: " << *ToBePromoted << '\n'; } } while (false) | |||
7653 | << "will be combined with: " << *ToBePromoted << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Assume " << *Inst << '\n' << "will be combined with: " << *ToBePromoted << '\n'; } } while (false); | |||
7654 | VPH.recordCombineInstruction(ToBePromoted); | |||
7655 | bool Changed = VPH.promote(); | |||
7656 | NumStoreExtractExposed += Changed; | |||
7657 | return Changed; | |||
7658 | } | |||
7659 | ||||
7660 | LLVM_DEBUG(dbgs() << "Try promoting.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Try promoting.\n"; } } while (false); | |||
7661 | if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted)) | |||
7662 | return false; | |||
7663 | ||||
7664 | LLVM_DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Promoting is possible... Enqueue for promotion!\n" ; } } while (false); | |||
7665 | ||||
7666 | VPH.enqueueForPromotion(ToBePromoted); | |||
7667 | Inst = ToBePromoted; | |||
7668 | } | |||
7669 | return false; | |||
7670 | } | |||
7671 | ||||
7672 | /// For the instruction sequence of store below, F and I values | |||
7673 | /// are bundled together as an i64 value before being stored into memory. | |||
7674 | /// Sometimes it is more efficient to generate separate stores for F and I, | |||
7675 | /// which can remove the bitwise instructions or sink them to colder places. | |||
7676 | /// | |||
7677 | /// (store (or (zext (bitcast F to i32) to i64), | |||
7678 | /// (shl (zext I to i64), 32)), addr) --> | |||
7679 | /// (store F, addr) and (store I, addr+4) | |||
7680 | /// | |||
7681 | /// Similarly, splitting for other merged store can also be beneficial, like: | |||
7682 | /// For pair of {i32, i32}, i64 store --> two i32 stores. | |||
7683 | /// For pair of {i32, i16}, i64 store --> two i32 stores. | |||
7684 | /// For pair of {i16, i16}, i32 store --> two i16 stores. | |||
7685 | /// For pair of {i16, i8}, i32 store --> two i16 stores. | |||
7686 | /// For pair of {i8, i8}, i16 store --> two i8 stores. | |||
7687 | /// | |||
7688 | /// We allow each target to determine specifically which kind of splitting is | |||
7689 | /// supported. | |||
7690 | /// | |||
7691 | /// The store patterns are commonly seen from the simple code snippet below | |||
7692 | /// if only std::make_pair(...) is sroa transformed before inlined into hoo. | |||
7693 | /// void goo(const std::pair<int, float> &); | |||
7694 | /// hoo() { | |||
7695 | /// ... | |||
7696 | /// goo(std::make_pair(tmp, ftmp)); | |||
7697 | /// ... | |||
7698 | /// } | |||
7699 | /// | |||
7700 | /// Although we already have similar splitting in DAG Combine, we duplicate | |||
7701 | /// it in CodeGenPrepare to catch the case in which pattern is across | |||
7702 | /// multiple BBs. The logic in DAG Combine is kept to catch case generated | |||
7703 | /// during code expansion. | |||
7704 | static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL, | |||
7705 | const TargetLowering &TLI) { | |||
7706 | // Handle simple but common cases only. | |||
7707 | Type *StoreType = SI.getValueOperand()->getType(); | |||
7708 | ||||
7709 | // The code below assumes shifting a value by <number of bits>, | |||
7710 | // whereas scalable vectors would have to be shifted by | |||
7711 | // <2log(vscale) + number of bits> in order to store the | |||
7712 | // low/high parts. Bailing out for now. | |||
7713 | if (StoreType->isScalableTy()) | |||
7714 | return false; | |||
7715 | ||||
7716 | if (!DL.typeSizeEqualsStoreSize(StoreType) || | |||
7717 | DL.getTypeSizeInBits(StoreType) == 0) | |||
7718 | return false; | |||
7719 | ||||
7720 | unsigned HalfValBitSize = DL.getTypeSizeInBits(StoreType) / 2; | |||
7721 | Type *SplitStoreType = Type::getIntNTy(SI.getContext(), HalfValBitSize); | |||
7722 | if (!DL.typeSizeEqualsStoreSize(SplitStoreType)) | |||
7723 | return false; | |||
7724 | ||||
7725 | // Don't split the store if it is volatile. | |||
7726 | if (SI.isVolatile()) | |||
7727 | return false; | |||
7728 | ||||
7729 | // Match the following patterns: | |||
7730 | // (store (or (zext LValue to i64), | |||
7731 | // (shl (zext HValue to i64), 32)), HalfValBitSize) | |||
7732 | // or | |||
7733 | // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize) | |||
7734 | // (zext LValue to i64), | |||
7735 | // Expect both operands of OR and the first operand of SHL have only | |||
7736 | // one use. | |||
7737 | Value *LValue, *HValue; | |||
7738 | if (!match(SI.getValueOperand(), | |||
7739 | m_c_Or(m_OneUse(m_ZExt(m_Value(LValue))), | |||
7740 | m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue))), | |||
7741 | m_SpecificInt(HalfValBitSize)))))) | |||
7742 | return false; | |||
7743 | ||||
7744 | // Check LValue and HValue are int with size less or equal than 32. | |||
7745 | if (!LValue->getType()->isIntegerTy() || | |||
7746 | DL.getTypeSizeInBits(LValue->getType()) > HalfValBitSize || | |||
7747 | !HValue->getType()->isIntegerTy() || | |||
7748 | DL.getTypeSizeInBits(HValue->getType()) > HalfValBitSize) | |||
7749 | return false; | |||
7750 | ||||
7751 | // If LValue/HValue is a bitcast instruction, use the EVT before bitcast | |||
7752 | // as the input of target query. | |||
7753 | auto *LBC = dyn_cast<BitCastInst>(LValue); | |||
7754 | auto *HBC = dyn_cast<BitCastInst>(HValue); | |||
7755 | EVT LowTy = LBC ? EVT::getEVT(LBC->getOperand(0)->getType()) | |||
7756 | : EVT::getEVT(LValue->getType()); | |||
7757 | EVT HighTy = HBC ? EVT::getEVT(HBC->getOperand(0)->getType()) | |||
7758 | : EVT::getEVT(HValue->getType()); | |||
7759 | if (!ForceSplitStore && !TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy)) | |||
7760 | return false; | |||
7761 | ||||
7762 | // Start to split store. | |||
7763 | IRBuilder<> Builder(SI.getContext()); | |||
7764 | Builder.SetInsertPoint(&SI); | |||
7765 | ||||
7766 | // If LValue/HValue is a bitcast in another BB, create a new one in current | |||
7767 | // BB so it may be merged with the splitted stores by dag combiner. | |||
7768 | if (LBC && LBC->getParent() != SI.getParent()) | |||
7769 | LValue = Builder.CreateBitCast(LBC->getOperand(0), LBC->getType()); | |||
7770 | if (HBC && HBC->getParent() != SI.getParent()) | |||
7771 | HValue = Builder.CreateBitCast(HBC->getOperand(0), HBC->getType()); | |||
7772 | ||||
7773 | bool IsLE = SI.getModule()->getDataLayout().isLittleEndian(); | |||
7774 | auto CreateSplitStore = [&](Value *V, bool Upper) { | |||
7775 | V = Builder.CreateZExtOrBitCast(V, SplitStoreType); | |||
7776 | Value *Addr = Builder.CreateBitCast( | |||
7777 | SI.getOperand(1), | |||
7778 | SplitStoreType->getPointerTo(SI.getPointerAddressSpace())); | |||
7779 | Align Alignment = SI.getAlign(); | |||
7780 | const bool IsOffsetStore = (IsLE && Upper) || (!IsLE && !Upper); | |||
7781 | if (IsOffsetStore) { | |||
7782 | Addr = Builder.CreateGEP( | |||
7783 | SplitStoreType, Addr, | |||
7784 | ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1)); | |||
7785 | ||||
7786 | // When splitting the store in half, naturally one half will retain the | |||
7787 | // alignment of the original wider store, regardless of whether it was | |||
7788 | // over-aligned or not, while the other will require adjustment. | |||
7789 | Alignment = commonAlignment(Alignment, HalfValBitSize / 8); | |||
7790 | } | |||
7791 | Builder.CreateAlignedStore(V, Addr, Alignment); | |||
7792 | }; | |||
7793 | ||||
7794 | CreateSplitStore(LValue, false); | |||
7795 | CreateSplitStore(HValue, true); | |||
7796 | ||||
7797 | // Delete the old store. | |||
7798 | SI.eraseFromParent(); | |||
7799 | return true; | |||
7800 | } | |||
7801 | ||||
7802 | // Return true if the GEP has two operands, the first operand is of a sequential | |||
7803 | // type, and the second operand is a constant. | |||
7804 | static bool GEPSequentialConstIndexed(GetElementPtrInst *GEP) { | |||
7805 | gep_type_iterator I = gep_type_begin(*GEP); | |||
7806 | return GEP->getNumOperands() == 2 && I.isSequential() && | |||
7807 | isa<ConstantInt>(GEP->getOperand(1)); | |||
7808 | } | |||
7809 | ||||
7810 | // Try unmerging GEPs to reduce liveness interference (register pressure) across | |||
7811 | // IndirectBr edges. Since IndirectBr edges tend to touch on many blocks, | |||
7812 | // reducing liveness interference across those edges benefits global register | |||
7813 | // allocation. Currently handles only certain cases. | |||
7814 | // | |||
7815 | // For example, unmerge %GEPI and %UGEPI as below. | |||
7816 | // | |||
7817 | // ---------- BEFORE ---------- | |||
7818 | // SrcBlock: | |||
7819 | // ... | |||
7820 | // %GEPIOp = ... | |||
7821 | // ... | |||
7822 | // %GEPI = gep %GEPIOp, Idx | |||
7823 | // ... | |||
7824 | // indirectbr ... [ label %DstB0, label %DstB1, ... label %DstBi ... ] | |||
7825 | // (* %GEPI is alive on the indirectbr edges due to other uses ahead) | |||
7826 | // (* %GEPIOp is alive on the indirectbr edges only because of it's used by | |||
7827 | // %UGEPI) | |||
7828 | // | |||
7829 | // DstB0: ... (there may be a gep similar to %UGEPI to be unmerged) | |||
7830 | // DstB1: ... (there may be a gep similar to %UGEPI to be unmerged) | |||
7831 | // ... | |||
7832 | // | |||
7833 | // DstBi: | |||
7834 | // ... | |||
7835 | // %UGEPI = gep %GEPIOp, UIdx | |||
7836 | // ... | |||
7837 | // --------------------------- | |||
7838 | // | |||
7839 | // ---------- AFTER ---------- | |||
7840 | // SrcBlock: | |||
7841 | // ... (same as above) | |||
7842 | // (* %GEPI is still alive on the indirectbr edges) | |||
7843 | // (* %GEPIOp is no longer alive on the indirectbr edges as a result of the | |||
7844 | // unmerging) | |||
7845 | // ... | |||
7846 | // | |||
7847 | // DstBi: | |||
7848 | // ... | |||
7849 | // %UGEPI = gep %GEPI, (UIdx-Idx) | |||
7850 | // ... | |||
7851 | // --------------------------- | |||
7852 | // | |||
7853 | // The register pressure on the IndirectBr edges is reduced because %GEPIOp is | |||
7854 | // no longer alive on them. | |||
7855 | // | |||
7856 | // We try to unmerge GEPs here in CodGenPrepare, as opposed to limiting merging | |||
7857 | // of GEPs in the first place in InstCombiner::visitGetElementPtrInst() so as | |||
7858 | // not to disable further simplications and optimizations as a result of GEP | |||
7859 | // merging. | |||
7860 | // | |||
7861 | // Note this unmerging may increase the length of the data flow critical path | |||
7862 | // (the path from %GEPIOp to %UGEPI would go through %GEPI), which is a tradeoff | |||
7863 | // between the register pressure and the length of data-flow critical | |||
7864 | // path. Restricting this to the uncommon IndirectBr case would minimize the | |||
7865 | // impact of potentially longer critical path, if any, and the impact on compile | |||
7866 | // time. | |||
7867 | static bool tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst *GEPI, | |||
7868 | const TargetTransformInfo *TTI) { | |||
7869 | BasicBlock *SrcBlock = GEPI->getParent(); | |||
7870 | // Check that SrcBlock ends with an IndirectBr. If not, give up. The common | |||
7871 | // (non-IndirectBr) cases exit early here. | |||
7872 | if (!isa<IndirectBrInst>(SrcBlock->getTerminator())) | |||
7873 | return false; | |||
7874 | // Check that GEPI is a simple gep with a single constant index. | |||
7875 | if (!GEPSequentialConstIndexed(GEPI)) | |||
7876 | return false; | |||
7877 | ConstantInt *GEPIIdx = cast<ConstantInt>(GEPI->getOperand(1)); | |||
7878 | // Check that GEPI is a cheap one. | |||
7879 | if (TTI->getIntImmCost(GEPIIdx->getValue(), GEPIIdx->getType(), | |||
7880 | TargetTransformInfo::TCK_SizeAndLatency) > | |||
7881 | TargetTransformInfo::TCC_Basic) | |||
7882 | return false; | |||
7883 | Value *GEPIOp = GEPI->getOperand(0); | |||
7884 | // Check that GEPIOp is an instruction that's also defined in SrcBlock. | |||
7885 | if (!isa<Instruction>(GEPIOp)) | |||
7886 | return false; | |||
7887 | auto *GEPIOpI = cast<Instruction>(GEPIOp); | |||
7888 | if (GEPIOpI->getParent() != SrcBlock) | |||
7889 | return false; | |||
7890 | // Check that GEP is used outside the block, meaning it's alive on the | |||
7891 | // IndirectBr edge(s). | |||
7892 | if (llvm::none_of(GEPI->users(), [&](User *Usr) { | |||
7893 | if (auto *I = dyn_cast<Instruction>(Usr)) { | |||
7894 | if (I->getParent() != SrcBlock) { | |||
7895 | return true; | |||
7896 | } | |||
7897 | } | |||
7898 | return false; | |||
7899 | })) | |||
7900 | return false; | |||
7901 | // The second elements of the GEP chains to be unmerged. | |||
7902 | std::vector<GetElementPtrInst *> UGEPIs; | |||
7903 | // Check each user of GEPIOp to check if unmerging would make GEPIOp not alive | |||
7904 | // on IndirectBr edges. | |||
7905 | for (User *Usr : GEPIOp->users()) { | |||
7906 | if (Usr == GEPI) | |||
7907 | continue; | |||
7908 | // Check if Usr is an Instruction. If not, give up. | |||
7909 | if (!isa<Instruction>(Usr)) | |||
7910 | return false; | |||
7911 | auto *UI = cast<Instruction>(Usr); | |||
7912 | // Check if Usr in the same block as GEPIOp, which is fine, skip. | |||
7913 | if (UI->getParent() == SrcBlock) | |||
7914 | continue; | |||
7915 | // Check if Usr is a GEP. If not, give up. | |||
7916 | if (!isa<GetElementPtrInst>(Usr)) | |||
7917 | return false; | |||
7918 | auto *UGEPI = cast<GetElementPtrInst>(Usr); | |||
7919 | // Check if UGEPI is a simple gep with a single constant index and GEPIOp is | |||
7920 | // the pointer operand to it. If so, record it in the vector. If not, give | |||
7921 | // up. | |||
7922 | if (!GEPSequentialConstIndexed(UGEPI)) | |||
7923 | return false; | |||
7924 | if (UGEPI->getOperand(0) != GEPIOp) | |||
7925 | return false; | |||
7926 | if (GEPIIdx->getType() != | |||
7927 | cast<ConstantInt>(UGEPI->getOperand(1))->getType()) | |||
7928 | return false; | |||
7929 | ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); | |||
7930 | if (TTI->getIntImmCost(UGEPIIdx->getValue(), UGEPIIdx->getType(), | |||
7931 | TargetTransformInfo::TCK_SizeAndLatency) > | |||
7932 | TargetTransformInfo::TCC_Basic) | |||
7933 | return false; | |||
7934 | UGEPIs.push_back(UGEPI); | |||
7935 | } | |||
7936 | if (UGEPIs.size() == 0) | |||
7937 | return false; | |||
7938 | // Check the materializing cost of (Uidx-Idx). | |||
7939 | for (GetElementPtrInst *UGEPI : UGEPIs) { | |||
7940 | ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); | |||
7941 | APInt NewIdx = UGEPIIdx->getValue() - GEPIIdx->getValue(); | |||
7942 | InstructionCost ImmCost = TTI->getIntImmCost( | |||
7943 | NewIdx, GEPIIdx->getType(), TargetTransformInfo::TCK_SizeAndLatency); | |||
7944 | if (ImmCost > TargetTransformInfo::TCC_Basic) | |||
7945 | return false; | |||
7946 | } | |||
7947 | // Now unmerge between GEPI and UGEPIs. | |||
7948 | for (GetElementPtrInst *UGEPI : UGEPIs) { | |||
7949 | UGEPI->setOperand(0, GEPI); | |||
7950 | ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); | |||
7951 | Constant *NewUGEPIIdx = ConstantInt::get( | |||
7952 | GEPIIdx->getType(), UGEPIIdx->getValue() - GEPIIdx->getValue()); | |||
7953 | UGEPI->setOperand(1, NewUGEPIIdx); | |||
7954 | // If GEPI is not inbounds but UGEPI is inbounds, change UGEPI to not | |||
7955 | // inbounds to avoid UB. | |||
7956 | if (!GEPI->isInBounds()) { | |||
7957 | UGEPI->setIsInBounds(false); | |||
7958 | } | |||
7959 | } | |||
7960 | // After unmerging, verify that GEPIOp is actually only used in SrcBlock (not | |||
7961 | // alive on IndirectBr edges). | |||
7962 | assert(llvm::none_of(GEPIOp->users(),(static_cast <bool> (llvm::none_of(GEPIOp->users(), [ &](User *Usr) { return cast<Instruction>(Usr)->getParent () != SrcBlock; }) && "GEPIOp is used outside SrcBlock" ) ? void (0) : __assert_fail ("llvm::none_of(GEPIOp->users(), [&](User *Usr) { return cast<Instruction>(Usr)->getParent() != SrcBlock; }) && \"GEPIOp is used outside SrcBlock\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7966, __extension__ __PRETTY_FUNCTION__ )) | |||
7963 | [&](User *Usr) {(static_cast <bool> (llvm::none_of(GEPIOp->users(), [ &](User *Usr) { return cast<Instruction>(Usr)->getParent () != SrcBlock; }) && "GEPIOp is used outside SrcBlock" ) ? void (0) : __assert_fail ("llvm::none_of(GEPIOp->users(), [&](User *Usr) { return cast<Instruction>(Usr)->getParent() != SrcBlock; }) && \"GEPIOp is used outside SrcBlock\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7966, __extension__ __PRETTY_FUNCTION__ )) | |||
7964 | return cast<Instruction>(Usr)->getParent() != SrcBlock;(static_cast <bool> (llvm::none_of(GEPIOp->users(), [ &](User *Usr) { return cast<Instruction>(Usr)->getParent () != SrcBlock; }) && "GEPIOp is used outside SrcBlock" ) ? void (0) : __assert_fail ("llvm::none_of(GEPIOp->users(), [&](User *Usr) { return cast<Instruction>(Usr)->getParent() != SrcBlock; }) && \"GEPIOp is used outside SrcBlock\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7966, __extension__ __PRETTY_FUNCTION__ )) | |||
7965 | }) &&(static_cast <bool> (llvm::none_of(GEPIOp->users(), [ &](User *Usr) { return cast<Instruction>(Usr)->getParent () != SrcBlock; }) && "GEPIOp is used outside SrcBlock" ) ? void (0) : __assert_fail ("llvm::none_of(GEPIOp->users(), [&](User *Usr) { return cast<Instruction>(Usr)->getParent() != SrcBlock; }) && \"GEPIOp is used outside SrcBlock\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7966, __extension__ __PRETTY_FUNCTION__ )) | |||
7966 | "GEPIOp is used outside SrcBlock")(static_cast <bool> (llvm::none_of(GEPIOp->users(), [ &](User *Usr) { return cast<Instruction>(Usr)->getParent () != SrcBlock; }) && "GEPIOp is used outside SrcBlock" ) ? void (0) : __assert_fail ("llvm::none_of(GEPIOp->users(), [&](User *Usr) { return cast<Instruction>(Usr)->getParent() != SrcBlock; }) && \"GEPIOp is used outside SrcBlock\"" , "llvm/lib/CodeGen/CodeGenPrepare.cpp", 7966, __extension__ __PRETTY_FUNCTION__ )); | |||
7967 | return true; | |||
7968 | } | |||
7969 | ||||
7970 | static bool optimizeBranch(BranchInst *Branch, const TargetLowering &TLI, | |||
7971 | SmallSet<BasicBlock *, 32> &FreshBBs, | |||
7972 | bool IsHugeFunc) { | |||
7973 | // Try and convert | |||
7974 | // %c = icmp ult %x, 8 | |||
7975 | // br %c, bla, blb | |||
7976 | // %tc = lshr %x, 3 | |||
7977 | // to | |||
7978 | // %tc = lshr %x, 3 | |||
7979 | // %c = icmp eq %tc, 0 | |||
7980 | // br %c, bla, blb | |||
7981 | // Creating the cmp to zero can be better for the backend, especially if the | |||
7982 | // lshr produces flags that can be used automatically. | |||
7983 | if (!TLI.preferZeroCompareBranch() || !Branch->isConditional()) | |||
7984 | return false; | |||
7985 | ||||
7986 | ICmpInst *Cmp = dyn_cast<ICmpInst>(Branch->getCondition()); | |||
7987 | if (!Cmp || !isa<ConstantInt>(Cmp->getOperand(1)) || !Cmp->hasOneUse()) | |||
7988 | return false; | |||
7989 | ||||
7990 | Value *X = Cmp->getOperand(0); | |||
7991 | APInt CmpC = cast<ConstantInt>(Cmp->getOperand(1))->getValue(); | |||
7992 | ||||
7993 | for (auto *U : X->users()) { | |||
7994 | Instruction *UI = dyn_cast<Instruction>(U); | |||
7995 | // A quick dominance check | |||
7996 | if (!UI || | |||
7997 | (UI->getParent() != Branch->getParent() && | |||
7998 | UI->getParent() != Branch->getSuccessor(0) && | |||
7999 | UI->getParent() != Branch->getSuccessor(1)) || | |||
8000 | (UI->getParent() != Branch->getParent() && | |||
8001 | !UI->getParent()->getSinglePredecessor())) | |||
8002 | continue; | |||
8003 | ||||
8004 | if (CmpC.isPowerOf2() && Cmp->getPredicate() == ICmpInst::ICMP_ULT && | |||
8005 | match(UI, m_Shr(m_Specific(X), m_SpecificInt(CmpC.logBase2())))) { | |||
8006 | IRBuilder<> Builder(Branch); | |||
8007 | if (UI->getParent() != Branch->getParent()) | |||
8008 | UI->moveBefore(Branch); | |||
8009 | Value *NewCmp = Builder.CreateCmp(ICmpInst::ICMP_EQ, UI, | |||
8010 | ConstantInt::get(UI->getType(), 0)); | |||
8011 | LLVM_DEBUG(dbgs() << "Converting " << *Cmp << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Converting " << * Cmp << "\n"; } } while (false); | |||
8012 | LLVM_DEBUG(dbgs() << " to compare on zero: " << *NewCmp << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << " to compare on zero: " << *NewCmp << "\n"; } } while (false); | |||
8013 | replaceAllUsesWith(Cmp, NewCmp, FreshBBs, IsHugeFunc); | |||
8014 | return true; | |||
8015 | } | |||
8016 | if (Cmp->isEquality() && | |||
8017 | (match(UI, m_Add(m_Specific(X), m_SpecificInt(-CmpC))) || | |||
8018 | match(UI, m_Sub(m_Specific(X), m_SpecificInt(CmpC))))) { | |||
8019 | IRBuilder<> Builder(Branch); | |||
8020 | if (UI->getParent() != Branch->getParent()) | |||
8021 | UI->moveBefore(Branch); | |||
8022 | Value *NewCmp = Builder.CreateCmp(Cmp->getPredicate(), UI, | |||
8023 | ConstantInt::get(UI->getType(), 0)); | |||
8024 | LLVM_DEBUG(dbgs() << "Converting " << *Cmp << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Converting " << * Cmp << "\n"; } } while (false); | |||
8025 | LLVM_DEBUG(dbgs() << " to compare on zero: " << *NewCmp << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << " to compare on zero: " << *NewCmp << "\n"; } } while (false); | |||
8026 | replaceAllUsesWith(Cmp, NewCmp, FreshBBs, IsHugeFunc); | |||
8027 | return true; | |||
8028 | } | |||
8029 | } | |||
8030 | return false; | |||
8031 | } | |||
8032 | ||||
8033 | bool CodeGenPrepare::optimizeInst(Instruction *I, ModifyDT &ModifiedDT) { | |||
8034 | // Bail out if we inserted the instruction to prevent optimizations from | |||
8035 | // stepping on each other's toes. | |||
8036 | if (InsertedInsts.count(I)) | |||
8037 | return false; | |||
8038 | ||||
8039 | // TODO: Move into the switch on opcode below here. | |||
8040 | if (PHINode *P = dyn_cast<PHINode>(I)) { | |||
8041 | // It is possible for very late stage optimizations (such as SimplifyCFG) | |||
8042 | // to introduce PHI nodes too late to be cleaned up. If we detect such a | |||
8043 | // trivial PHI, go ahead and zap it here. | |||
8044 | if (Value *V = simplifyInstruction(P, {*DL, TLInfo})) { | |||
8045 | LargeOffsetGEPMap.erase(P); | |||
8046 | replaceAllUsesWith(P, V, FreshBBs, IsHugeFunc); | |||
8047 | P->eraseFromParent(); | |||
8048 | ++NumPHIsElim; | |||
8049 | return true; | |||
8050 | } | |||
8051 | return false; | |||
8052 | } | |||
8053 | ||||
8054 | if (CastInst *CI = dyn_cast<CastInst>(I)) { | |||
8055 | // If the source of the cast is a constant, then this should have | |||
8056 | // already been constant folded. The only reason NOT to constant fold | |||
8057 | // it is if something (e.g. LSR) was careful to place the constant | |||
8058 | // evaluation in a block other than then one that uses it (e.g. to hoist | |||
8059 | // the address of globals out of a loop). If this is the case, we don't | |||
8060 | // want to forward-subst the cast. | |||
8061 | if (isa<Constant>(CI->getOperand(0))) | |||
8062 | return false; | |||
8063 | ||||
8064 | if (OptimizeNoopCopyExpression(CI, *TLI, *DL)) | |||
8065 | return true; | |||
8066 | ||||
8067 | if ((isa<UIToFPInst>(I) || isa<FPToUIInst>(I) || isa<TruncInst>(I)) && | |||
8068 | TLI->optimizeExtendOrTruncateConversion(I, | |||
8069 | LI->getLoopFor(I->getParent()))) | |||
8070 | return true; | |||
8071 | ||||
8072 | if (isa<ZExtInst>(I) || isa<SExtInst>(I)) { | |||
8073 | /// Sink a zext or sext into its user blocks if the target type doesn't | |||
8074 | /// fit in one register | |||
8075 | if (TLI->getTypeAction(CI->getContext(), | |||
8076 | TLI->getValueType(*DL, CI->getType())) == | |||
8077 | TargetLowering::TypeExpandInteger) { | |||
8078 | return SinkCast(CI); | |||
8079 | } else { | |||
8080 | if (TLI->optimizeExtendOrTruncateConversion( | |||
8081 | I, LI->getLoopFor(I->getParent()))) | |||
8082 | return true; | |||
8083 | ||||
8084 | bool MadeChange = optimizeExt(I); | |||
8085 | return MadeChange | optimizeExtUses(I); | |||
8086 | } | |||
8087 | } | |||
8088 | return false; | |||
8089 | } | |||
8090 | ||||
8091 | if (auto *Cmp = dyn_cast<CmpInst>(I)) | |||
8092 | if (optimizeCmp(Cmp, ModifiedDT)) | |||
8093 | return true; | |||
8094 | ||||
8095 | if (LoadInst *LI = dyn_cast<LoadInst>(I)) { | |||
8096 | LI->setMetadata(LLVMContext::MD_invariant_group, nullptr); | |||
8097 | bool Modified = optimizeLoadExt(LI); | |||
8098 | unsigned AS = LI->getPointerAddressSpace(); | |||
8099 | Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS); | |||
8100 | return Modified; | |||
8101 | } | |||
8102 | ||||
8103 | if (StoreInst *SI = dyn_cast<StoreInst>(I)) { | |||
8104 | if (splitMergedValStore(*SI, *DL, *TLI)) | |||
8105 | return true; | |||
8106 | SI->setMetadata(LLVMContext::MD_invariant_group, nullptr); | |||
8107 | unsigned AS = SI->getPointerAddressSpace(); | |||
8108 | return optimizeMemoryInst(I, SI->getOperand(1), | |||
8109 | SI->getOperand(0)->getType(), AS); | |||
8110 | } | |||
8111 | ||||
8112 | if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { | |||
8113 | unsigned AS = RMW->getPointerAddressSpace(); | |||
8114 | return optimizeMemoryInst(I, RMW->getPointerOperand(), RMW->getType(), AS); | |||
8115 | } | |||
8116 | ||||
8117 | if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(I)) { | |||
8118 | unsigned AS = CmpX->getPointerAddressSpace(); | |||
8119 | return optimizeMemoryInst(I, CmpX->getPointerOperand(), | |||
8120 | CmpX->getCompareOperand()->getType(), AS); | |||
8121 | } | |||
8122 | ||||
8123 | BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I); | |||
8124 | ||||
8125 | if (BinOp && BinOp->getOpcode() == Instruction::And && EnableAndCmpSinking && | |||
8126 | sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts)) | |||
8127 | return true; | |||
8128 | ||||
8129 | // TODO: Move this into the switch on opcode - it handles shifts already. | |||
8130 | if (BinOp && (BinOp->getOpcode() == Instruction::AShr || | |||
8131 | BinOp->getOpcode() == Instruction::LShr)) { | |||
8132 | ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1)); | |||
8133 | if (CI && TLI->hasExtractBitsInsn()) | |||
8134 | if (OptimizeExtractBits(BinOp, CI, *TLI, *DL)) | |||
8135 | return true; | |||
8136 | } | |||
8137 | ||||
8138 | if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { | |||
8139 | if (GEPI->hasAllZeroIndices()) { | |||
8140 | /// The GEP operand must be a pointer, so must its result -> BitCast | |||
8141 | Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), | |||
8142 | GEPI->getName(), GEPI); | |||
8143 | NC->setDebugLoc(GEPI->getDebugLoc()); | |||
8144 | replaceAllUsesWith(GEPI, NC, FreshBBs, IsHugeFunc); | |||
8145 | GEPI->eraseFromParent(); | |||
8146 | ++NumGEPsElim; | |||
8147 | optimizeInst(NC, ModifiedDT); | |||
8148 | return true; | |||
8149 | } | |||
8150 | if (tryUnmergingGEPsAcrossIndirectBr(GEPI, TTI)) { | |||
8151 | return true; | |||
8152 | } | |||
8153 | return false; | |||
8154 | } | |||
8155 | ||||
8156 | if (FreezeInst *FI = dyn_cast<FreezeInst>(I)) { | |||
8157 | // freeze(icmp a, const)) -> icmp (freeze a), const | |||
8158 | // This helps generate efficient conditional jumps. | |||
8159 | Instruction *CmpI = nullptr; | |||
8160 | if (ICmpInst *II = dyn_cast<ICmpInst>(FI->getOperand(0))) | |||
8161 | CmpI = II; | |||
8162 | else if (FCmpInst *F = dyn_cast<FCmpInst>(FI->getOperand(0))) | |||
8163 | CmpI = F->getFastMathFlags().none() ? F : nullptr; | |||
8164 | ||||
8165 | if (CmpI && CmpI->hasOneUse()) { | |||
8166 | auto Op0 = CmpI->getOperand(0), Op1 = CmpI->getOperand(1); | |||
8167 | bool Const0 = isa<ConstantInt>(Op0) || isa<ConstantFP>(Op0) || | |||
8168 | isa<ConstantPointerNull>(Op0); | |||
8169 | bool Const1 = isa<ConstantInt>(Op1) || isa<ConstantFP>(Op1) || | |||
8170 | isa<ConstantPointerNull>(Op1); | |||
8171 | if (Const0 || Const1) { | |||
8172 | if (!Const0 || !Const1) { | |||
8173 | auto *F = new FreezeInst(Const0 ? Op1 : Op0, "", CmpI); | |||
8174 | F->takeName(FI); | |||
8175 | CmpI->setOperand(Const0 ? 1 : 0, F); | |||
8176 | } | |||
8177 | replaceAllUsesWith(FI, CmpI, FreshBBs, IsHugeFunc); | |||
8178 | FI->eraseFromParent(); | |||
8179 | return true; | |||
8180 | } | |||
8181 | } | |||
8182 | return false; | |||
8183 | } | |||
8184 | ||||
8185 | if (tryToSinkFreeOperands(I)) | |||
8186 | return true; | |||
8187 | ||||
8188 | switch (I->getOpcode()) { | |||
8189 | case Instruction::Shl: | |||
8190 | case Instruction::LShr: | |||
8191 | case Instruction::AShr: | |||
8192 | return optimizeShiftInst(cast<BinaryOperator>(I)); | |||
8193 | case Instruction::Call: | |||
8194 | return optimizeCallInst(cast<CallInst>(I), ModifiedDT); | |||
8195 | case Instruction::Select: | |||
8196 | return optimizeSelectInst(cast<SelectInst>(I)); | |||
8197 | case Instruction::ShuffleVector: | |||
8198 | return optimizeShuffleVectorInst(cast<ShuffleVectorInst>(I)); | |||
8199 | case Instruction::Switch: | |||
8200 | return optimizeSwitchInst(cast<SwitchInst>(I)); | |||
8201 | case Instruction::ExtractElement: | |||
8202 | return optimizeExtractElementInst(cast<ExtractElementInst>(I)); | |||
8203 | case Instruction::Br: | |||
8204 | return optimizeBranch(cast<BranchInst>(I), *TLI, FreshBBs, IsHugeFunc); | |||
8205 | } | |||
8206 | ||||
8207 | return false; | |||
8208 | } | |||
8209 | ||||
8210 | /// Given an OR instruction, check to see if this is a bitreverse | |||
8211 | /// idiom. If so, insert the new intrinsic and return true. | |||
8212 | bool CodeGenPrepare::makeBitReverse(Instruction &I) { | |||
8213 | if (!I.getType()->isIntegerTy() || | |||
8214 | !TLI->isOperationLegalOrCustom(ISD::BITREVERSE, | |||
8215 | TLI->getValueType(*DL, I.getType(), true))) | |||
8216 | return false; | |||
8217 | ||||
8218 | SmallVector<Instruction *, 4> Insts; | |||
8219 | if (!recognizeBSwapOrBitReverseIdiom(&I, false, true, Insts)) | |||
8220 | return false; | |||
8221 | Instruction *LastInst = Insts.back(); | |||
8222 | replaceAllUsesWith(&I, LastInst, FreshBBs, IsHugeFunc); | |||
8223 | RecursivelyDeleteTriviallyDeadInstructions( | |||
8224 | &I, TLInfo, nullptr, | |||
8225 | [&](Value *V) { removeAllAssertingVHReferences(V); }); | |||
8226 | return true; | |||
8227 | } | |||
8228 | ||||
8229 | // In this pass we look for GEP and cast instructions that are used | |||
8230 | // across basic blocks and rewrite them to improve basic-block-at-a-time | |||
8231 | // selection. | |||
8232 | bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, ModifyDT &ModifiedDT) { | |||
8233 | SunkAddrs.clear(); | |||
8234 | bool MadeChange = false; | |||
8235 | ||||
8236 | do { | |||
8237 | CurInstIterator = BB.begin(); | |||
8238 | ModifiedDT = ModifyDT::NotModifyDT; | |||
8239 | while (CurInstIterator != BB.end()) { | |||
8240 | MadeChange |= optimizeInst(&*CurInstIterator++, ModifiedDT); | |||
8241 | if (ModifiedDT != ModifyDT::NotModifyDT) { | |||
8242 | // For huge function we tend to quickly go though the inner optmization | |||
8243 | // opportunities in the BB. So we go back to the BB head to re-optimize | |||
8244 | // each instruction instead of go back to the function head. | |||
8245 | if (IsHugeFunc) { | |||
8246 | DT.reset(); | |||
8247 | getDT(*BB.getParent()); | |||
8248 | break; | |||
8249 | } else { | |||
8250 | return true; | |||
8251 | } | |||
8252 | } | |||
8253 | } | |||
8254 | } while (ModifiedDT == ModifyDT::ModifyInstDT); | |||
8255 | ||||
8256 | bool MadeBitReverse = true; | |||
8257 | while (MadeBitReverse) { | |||
8258 | MadeBitReverse = false; | |||
8259 | for (auto &I : reverse(BB)) { | |||
8260 | if (makeBitReverse(I)) { | |||
8261 | MadeBitReverse = MadeChange = true; | |||
8262 | break; | |||
8263 | } | |||
8264 | } | |||
8265 | } | |||
8266 | MadeChange |= dupRetToEnableTailCallOpts(&BB, ModifiedDT); | |||
8267 | ||||
8268 | return MadeChange; | |||
8269 | } | |||
8270 | ||||
8271 | // Some CGP optimizations may move or alter what's computed in a block. Check | |||
8272 | // whether a dbg.value intrinsic could be pointed at a more appropriate operand. | |||
8273 | bool CodeGenPrepare::fixupDbgValue(Instruction *I) { | |||
8274 | assert(isa<DbgValueInst>(I))(static_cast <bool> (isa<DbgValueInst>(I)) ? void (0) : __assert_fail ("isa<DbgValueInst>(I)", "llvm/lib/CodeGen/CodeGenPrepare.cpp" , 8274, __extension__ __PRETTY_FUNCTION__)); | |||
8275 | DbgValueInst &DVI = *cast<DbgValueInst>(I); | |||
8276 | ||||
8277 | // Does this dbg.value refer to a sunk address calculation? | |||
8278 | bool AnyChange = false; | |||
8279 | SmallDenseSet<Value *> LocationOps(DVI.location_ops().begin(), | |||
8280 | DVI.location_ops().end()); | |||
8281 | for (Value *Location : LocationOps) { | |||
8282 | WeakTrackingVH SunkAddrVH = SunkAddrs[Location]; | |||
8283 | Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr; | |||
8284 | if (SunkAddr) { | |||
8285 | // Point dbg.value at locally computed address, which should give the best | |||
8286 | // opportunity to be accurately lowered. This update may change the type | |||
8287 | // of pointer being referred to; however this makes no difference to | |||
8288 | // debugging information, and we can't generate bitcasts that may affect | |||
8289 | // codegen. | |||
8290 | DVI.replaceVariableLocationOp(Location, SunkAddr); | |||
8291 | AnyChange = true; | |||
8292 | } | |||
8293 | } | |||
8294 | return AnyChange; | |||
8295 | } | |||
8296 | ||||
8297 | // A llvm.dbg.value may be using a value before its definition, due to | |||
8298 | // optimizations in this pass and others. Scan for such dbg.values, and rescue | |||
8299 | // them by moving the dbg.value to immediately after the value definition. | |||
8300 | // FIXME: Ideally this should never be necessary, and this has the potential | |||
8301 | // to re-order dbg.value intrinsics. | |||
8302 | bool CodeGenPrepare::placeDbgValues(Function &F) { | |||
8303 | bool MadeChange = false; | |||
8304 | DominatorTree DT(F); | |||
8305 | ||||
8306 | for (BasicBlock &BB : F) { | |||
8307 | for (Instruction &Insn : llvm::make_early_inc_range(BB)) { | |||
8308 | DbgValueInst *DVI = dyn_cast<DbgValueInst>(&Insn); | |||
8309 | if (!DVI) | |||
8310 | continue; | |||
8311 | ||||
8312 | SmallVector<Instruction *, 4> VIs; | |||
8313 | for (Value *V : DVI->getValues()) | |||
8314 | if (Instruction *VI = dyn_cast_or_null<Instruction>(V)) | |||
8315 | VIs.push_back(VI); | |||
8316 | ||||
8317 | // This DVI may depend on multiple instructions, complicating any | |||
8318 | // potential sink. This block takes the defensive approach, opting to | |||
8319 | // "undef" the DVI if it has more than one instruction and any of them do | |||
8320 | // not dominate DVI. | |||
8321 | for (Instruction *VI : VIs) { | |||
8322 | if (VI->isTerminator()) | |||
8323 | continue; | |||
8324 | ||||
8325 | // If VI is a phi in a block with an EHPad terminator, we can't insert | |||
8326 | // after it. | |||
8327 | if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad()) | |||
8328 | continue; | |||
8329 | ||||
8330 | // If the defining instruction dominates the dbg.value, we do not need | |||
8331 | // to move the dbg.value. | |||
8332 | if (DT.dominates(VI, DVI)) | |||
8333 | continue; | |||
8334 | ||||
8335 | // If we depend on multiple instructions and any of them doesn't | |||
8336 | // dominate this DVI, we probably can't salvage it: moving it to | |||
8337 | // after any of the instructions could cause us to lose the others. | |||
8338 | if (VIs.size() > 1) { | |||
8339 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Unable to find valid location for Debug Value, undefing:\n" << *DVI; } } while (false) | |||
8340 | dbgs()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Unable to find valid location for Debug Value, undefing:\n" << *DVI; } } while (false) | |||
8341 | << "Unable to find valid location for Debug Value, undefing:\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Unable to find valid location for Debug Value, undefing:\n" << *DVI; } } while (false) | |||
8342 | << *DVI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Unable to find valid location for Debug Value, undefing:\n" << *DVI; } } while (false); | |||
8343 | DVI->setKillLocation(); | |||
8344 | break; | |||
8345 | } | |||
8346 | ||||
8347 | LLVM_DEBUG(dbgs() << "Moving Debug Value before :\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI; } } while (false) | |||
8348 | << *DVI << ' ' << *VI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI; } } while (false); | |||
8349 | DVI->removeFromParent(); | |||
8350 | if (isa<PHINode>(VI)) | |||
8351 | DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt()); | |||
8352 | else | |||
8353 | DVI->insertAfter(VI); | |||
8354 | MadeChange = true; | |||
8355 | ++NumDbgValueMoved; | |||
8356 | } | |||
8357 | } | |||
8358 | } | |||
8359 | return MadeChange; | |||
8360 | } | |||
8361 | ||||
8362 | // Group scattered pseudo probes in a block to favor SelectionDAG. Scattered | |||
8363 | // probes can be chained dependencies of other regular DAG nodes and block DAG | |||
8364 | // combine optimizations. | |||
8365 | bool CodeGenPrepare::placePseudoProbes(Function &F) { | |||
8366 | bool MadeChange = false; | |||
8367 | for (auto &Block : F) { | |||
8368 | // Move the rest probes to the beginning of the block. | |||
8369 | auto FirstInst = Block.getFirstInsertionPt(); | |||
8370 | while (FirstInst != Block.end() && FirstInst->isDebugOrPseudoInst()) | |||
8371 | ++FirstInst; | |||
8372 | BasicBlock::iterator I(FirstInst); | |||
8373 | I++; | |||
8374 | while (I != Block.end()) { | |||
8375 | if (auto *II = dyn_cast<PseudoProbeInst>(I++)) { | |||
8376 | II->moveBefore(&*FirstInst); | |||
8377 | MadeChange = true; | |||
8378 | } | |||
8379 | } | |||
8380 | } | |||
8381 | return MadeChange; | |||
8382 | } | |||
8383 | ||||
8384 | /// Scale down both weights to fit into uint32_t. | |||
8385 | static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) { | |||
8386 | uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse; | |||
8387 | uint32_t Scale = (NewMax / std::numeric_limits<uint32_t>::max()) + 1; | |||
8388 | NewTrue = NewTrue / Scale; | |||
8389 | NewFalse = NewFalse / Scale; | |||
8390 | } | |||
8391 | ||||
8392 | /// Some targets prefer to split a conditional branch like: | |||
8393 | /// \code | |||
8394 | /// %0 = icmp ne i32 %a, 0 | |||
8395 | /// %1 = icmp ne i32 %b, 0 | |||
8396 | /// %or.cond = or i1 %0, %1 | |||
8397 | /// br i1 %or.cond, label %TrueBB, label %FalseBB | |||
8398 | /// \endcode | |||
8399 | /// into multiple branch instructions like: | |||
8400 | /// \code | |||
8401 | /// bb1: | |||
8402 | /// %0 = icmp ne i32 %a, 0 | |||
8403 | /// br i1 %0, label %TrueBB, label %bb2 | |||
8404 | /// bb2: | |||
8405 | /// %1 = icmp ne i32 %b, 0 | |||
8406 | /// br i1 %1, label %TrueBB, label %FalseBB | |||
8407 | /// \endcode | |||
8408 | /// This usually allows instruction selection to do even further optimizations | |||
8409 | /// and combine the compare with the branch instruction. Currently this is | |||
8410 | /// applied for targets which have "cheap" jump instructions. | |||
8411 | /// | |||
8412 | /// FIXME: Remove the (equivalent?) implementation in SelectionDAG. | |||
8413 | /// | |||
8414 | bool CodeGenPrepare::splitBranchCondition(Function &F, ModifyDT &ModifiedDT) { | |||
8415 | if (!TM->Options.EnableFastISel || TLI->isJumpExpensive()) | |||
8416 | return false; | |||
8417 | ||||
8418 | bool MadeChange = false; | |||
8419 | for (auto &BB : F) { | |||
8420 | // Does this BB end with the following? | |||
8421 | // %cond1 = icmp|fcmp|binary instruction ... | |||
8422 | // %cond2 = icmp|fcmp|binary instruction ... | |||
8423 | // %cond.or = or|and i1 %cond1, cond2 | |||
8424 | // br i1 %cond.or label %dest1, label %dest2" | |||
8425 | Instruction *LogicOp; | |||
8426 | BasicBlock *TBB, *FBB; | |||
8427 | if (!match(BB.getTerminator(), | |||
8428 | m_Br(m_OneUse(m_Instruction(LogicOp)), TBB, FBB))) | |||
8429 | continue; | |||
8430 | ||||
8431 | auto *Br1 = cast<BranchInst>(BB.getTerminator()); | |||
8432 | if (Br1->getMetadata(LLVMContext::MD_unpredictable)) | |||
8433 | continue; | |||
8434 | ||||
8435 | // The merging of mostly empty BB can cause a degenerate branch. | |||
8436 | if (TBB == FBB) | |||
8437 | continue; | |||
8438 | ||||
8439 | unsigned Opc; | |||
8440 | Value *Cond1, *Cond2; | |||
8441 | if (match(LogicOp, | |||
8442 | m_LogicalAnd(m_OneUse(m_Value(Cond1)), m_OneUse(m_Value(Cond2))))) | |||
8443 | Opc = Instruction::And; | |||
8444 | else if (match(LogicOp, m_LogicalOr(m_OneUse(m_Value(Cond1)), | |||
8445 | m_OneUse(m_Value(Cond2))))) | |||
8446 | Opc = Instruction::Or; | |||
8447 | else | |||
8448 | continue; | |||
8449 | ||||
8450 | auto IsGoodCond = [](Value *Cond) { | |||
8451 | return match( | |||
8452 | Cond, | |||
8453 | m_CombineOr(m_Cmp(), m_CombineOr(m_LogicalAnd(m_Value(), m_Value()), | |||
8454 | m_LogicalOr(m_Value(), m_Value())))); | |||
8455 | }; | |||
8456 | if (!IsGoodCond(Cond1) || !IsGoodCond(Cond2)) | |||
8457 | continue; | |||
8458 | ||||
8459 | LLVM_DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Before branch condition splitting\n" ; BB.dump(); } } while (false); | |||
8460 | ||||
8461 | // Create a new BB. | |||
8462 | auto *TmpBB = | |||
8463 | BasicBlock::Create(BB.getContext(), BB.getName() + ".cond.split", | |||
8464 | BB.getParent(), BB.getNextNode()); | |||
8465 | if (IsHugeFunc) | |||
8466 | FreshBBs.insert(TmpBB); | |||
8467 | ||||
8468 | // Update original basic block by using the first condition directly by the | |||
8469 | // branch instruction and removing the no longer needed and/or instruction. | |||
8470 | Br1->setCondition(Cond1); | |||
8471 | LogicOp->eraseFromParent(); | |||
8472 | ||||
8473 | // Depending on the condition we have to either replace the true or the | |||
8474 | // false successor of the original branch instruction. | |||
8475 | if (Opc == Instruction::And) | |||
8476 | Br1->setSuccessor(0, TmpBB); | |||
8477 | else | |||
8478 | Br1->setSuccessor(1, TmpBB); | |||
8479 | ||||
8480 | // Fill in the new basic block. | |||
8481 | auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB); | |||
8482 | if (auto *I = dyn_cast<Instruction>(Cond2)) { | |||
8483 | I->removeFromParent(); | |||
8484 | I->insertBefore(Br2); | |||
8485 | } | |||
8486 | ||||
8487 | // Update PHI nodes in both successors. The original BB needs to be | |||
8488 | // replaced in one successor's PHI nodes, because the branch comes now from | |||
8489 | // the newly generated BB (NewBB). In the other successor we need to add one | |||
8490 | // incoming edge to the PHI nodes, because both branch instructions target | |||
8491 | // now the same successor. Depending on the original branch condition | |||
8492 | // (and/or) we have to swap the successors (TrueDest, FalseDest), so that | |||
8493 | // we perform the correct update for the PHI nodes. | |||
8494 | // This doesn't change the successor order of the just created branch | |||
8495 | // instruction (or any other instruction). | |||
8496 | if (Opc == Instruction::Or) | |||
8497 | std::swap(TBB, FBB); | |||
8498 | ||||
8499 | // Replace the old BB with the new BB. | |||
8500 | TBB->replacePhiUsesWith(&BB, TmpBB); | |||
8501 | ||||
8502 | // Add another incoming edge from the new BB. | |||
8503 | for (PHINode &PN : FBB->phis()) { | |||
8504 | auto *Val = PN.getIncomingValueForBlock(&BB); | |||
8505 | PN.addIncoming(Val, TmpBB); | |||
8506 | } | |||
8507 | ||||
8508 | // Update the branch weights (from SelectionDAGBuilder:: | |||
8509 | // FindMergedConditions). | |||
8510 | if (Opc == Instruction::Or) { | |||
8511 | // Codegen X | Y as: | |||
8512 | // BB1: | |||
8513 | // jmp_if_X TBB | |||
8514 | // jmp TmpBB | |||
8515 | // TmpBB: | |||
8516 | // jmp_if_Y TBB | |||
8517 | // jmp FBB | |||
8518 | // | |||
8519 | ||||
8520 | // We have flexibility in setting Prob for BB1 and Prob for NewBB. | |||
8521 | // The requirement is that | |||
8522 | // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) | |||
8523 | // = TrueProb for original BB. | |||
8524 | // Assuming the original weights are A and B, one choice is to set BB1's | |||
8525 | // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice | |||
8526 | // assumes that | |||
8527 | // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. | |||
8528 | // Another choice is to assume TrueProb for BB1 equals to TrueProb for | |||
8529 | // TmpBB, but the math is more complicated. | |||
8530 | uint64_t TrueWeight, FalseWeight; | |||
8531 | if (extractBranchWeights(*Br1, TrueWeight, FalseWeight)) { | |||
8532 | uint64_t NewTrueWeight = TrueWeight; | |||
8533 | uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight; | |||
8534 | scaleWeights(NewTrueWeight, NewFalseWeight); | |||
8535 | Br1->setMetadata(LLVMContext::MD_prof, | |||
8536 | MDBuilder(Br1->getContext()) | |||
8537 | .createBranchWeights(TrueWeight, FalseWeight)); | |||
8538 | ||||
8539 | NewTrueWeight = TrueWeight; | |||
8540 | NewFalseWeight = 2 * FalseWeight; | |||
8541 | scaleWeights(NewTrueWeight, NewFalseWeight); | |||
8542 | Br2->setMetadata(LLVMContext::MD_prof, | |||
8543 | MDBuilder(Br2->getContext()) | |||
8544 | .createBranchWeights(TrueWeight, FalseWeight)); | |||
8545 | } | |||
8546 | } else { | |||
8547 | // Codegen X & Y as: | |||
8548 | // BB1: | |||
8549 | // jmp_if_X TmpBB | |||
8550 | // jmp FBB | |||
8551 | // TmpBB: | |||
8552 | // jmp_if_Y TBB | |||
8553 | // jmp FBB | |||
8554 | // | |||
8555 | // This requires creation of TmpBB after CurBB. | |||
8556 | ||||
8557 | // We have flexibility in setting Prob for BB1 and Prob for TmpBB. | |||
8558 | // The requirement is that | |||
8559 | // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) | |||
8560 | // = FalseProb for original BB. | |||
8561 | // Assuming the original weights are A and B, one choice is to set BB1's | |||
8562 | // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice | |||
8563 | // assumes that | |||
8564 | // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB. | |||
8565 | uint64_t TrueWeight, FalseWeight; | |||
8566 | if (extractBranchWeights(*Br1, TrueWeight, FalseWeight)) { | |||
8567 | uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight; | |||
8568 | uint64_t NewFalseWeight = FalseWeight; | |||
8569 | scaleWeights(NewTrueWeight, NewFalseWeight); | |||
8570 | Br1->setMetadata(LLVMContext::MD_prof, | |||
8571 | MDBuilder(Br1->getContext()) | |||
8572 | .createBranchWeights(TrueWeight, FalseWeight)); | |||
8573 | ||||
8574 | NewTrueWeight = 2 * TrueWeight; | |||
8575 | NewFalseWeight = FalseWeight; | |||
8576 | scaleWeights(NewTrueWeight, NewFalseWeight); | |||
8577 | Br2->setMetadata(LLVMContext::MD_prof, | |||
8578 | MDBuilder(Br2->getContext()) | |||
8579 | .createBranchWeights(TrueWeight, FalseWeight)); | |||
8580 | } | |||
8581 | } | |||
8582 | ||||
8583 | ModifiedDT = ModifyDT::ModifyBBDT; | |||
8584 | MadeChange = true; | |||
8585 | ||||
8586 | LLVM_DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "After branch condition splitting\n" ; BB.dump(); TmpBB->dump(); } } while (false) | |||
8587 | TmpBB->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "After branch condition splitting\n" ; BB.dump(); TmpBB->dump(); } } while (false); | |||
8588 | } | |||
8589 | return MadeChange; | |||
8590 | } |