File: | lib/CodeGen/CodeGenPrepare.cpp |
Warning: | line 3135, column 41 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// | |||
2 | // | |||
3 | // The LLVM Compiler Infrastructure | |||
4 | // | |||
5 | // This file is distributed under the University of Illinois Open Source | |||
6 | // License. See LICENSE.TXT for details. | |||
7 | // | |||
8 | //===----------------------------------------------------------------------===// | |||
9 | // | |||
10 | // This pass munges the code in the input function to better prepare it for | |||
11 | // SelectionDAG-based code generation. This works around limitations in it's | |||
12 | // basic-block-at-a-time approach. It should eventually be removed. | |||
13 | // | |||
14 | //===----------------------------------------------------------------------===// | |||
15 | ||||
16 | #include "llvm/ADT/APInt.h" | |||
17 | #include "llvm/ADT/ArrayRef.h" | |||
18 | #include "llvm/ADT/DenseMap.h" | |||
19 | #include "llvm/ADT/PointerIntPair.h" | |||
20 | #include "llvm/ADT/STLExtras.h" | |||
21 | #include "llvm/ADT/SmallPtrSet.h" | |||
22 | #include "llvm/ADT/SmallVector.h" | |||
23 | #include "llvm/ADT/Statistic.h" | |||
24 | #include "llvm/Analysis/BlockFrequencyInfo.h" | |||
25 | #include "llvm/Analysis/BranchProbabilityInfo.h" | |||
26 | #include "llvm/Analysis/ConstantFolding.h" | |||
27 | #include "llvm/Analysis/InstructionSimplify.h" | |||
28 | #include "llvm/Analysis/LoopInfo.h" | |||
29 | #include "llvm/Analysis/MemoryBuiltins.h" | |||
30 | #include "llvm/Analysis/ProfileSummaryInfo.h" | |||
31 | #include "llvm/Analysis/TargetLibraryInfo.h" | |||
32 | #include "llvm/Analysis/TargetTransformInfo.h" | |||
33 | #include "llvm/Transforms/Utils/Local.h" | |||
34 | #include "llvm/Analysis/ValueTracking.h" | |||
35 | #include "llvm/CodeGen/Analysis.h" | |||
36 | #include "llvm/CodeGen/ISDOpcodes.h" | |||
37 | #include "llvm/CodeGen/SelectionDAGNodes.h" | |||
38 | #include "llvm/CodeGen/TargetLowering.h" | |||
39 | #include "llvm/CodeGen/TargetPassConfig.h" | |||
40 | #include "llvm/CodeGen/TargetSubtargetInfo.h" | |||
41 | #include "llvm/CodeGen/ValueTypes.h" | |||
42 | #include "llvm/Config/llvm-config.h" | |||
43 | #include "llvm/IR/Argument.h" | |||
44 | #include "llvm/IR/Attributes.h" | |||
45 | #include "llvm/IR/BasicBlock.h" | |||
46 | #include "llvm/IR/CallSite.h" | |||
47 | #include "llvm/IR/Constant.h" | |||
48 | #include "llvm/IR/Constants.h" | |||
49 | #include "llvm/IR/DataLayout.h" | |||
50 | #include "llvm/IR/DerivedTypes.h" | |||
51 | #include "llvm/IR/Dominators.h" | |||
52 | #include "llvm/IR/Function.h" | |||
53 | #include "llvm/IR/GetElementPtrTypeIterator.h" | |||
54 | #include "llvm/IR/GlobalValue.h" | |||
55 | #include "llvm/IR/GlobalVariable.h" | |||
56 | #include "llvm/IR/IRBuilder.h" | |||
57 | #include "llvm/IR/InlineAsm.h" | |||
58 | #include "llvm/IR/InstrTypes.h" | |||
59 | #include "llvm/IR/Instruction.h" | |||
60 | #include "llvm/IR/Instructions.h" | |||
61 | #include "llvm/IR/IntrinsicInst.h" | |||
62 | #include "llvm/IR/Intrinsics.h" | |||
63 | #include "llvm/IR/LLVMContext.h" | |||
64 | #include "llvm/IR/MDBuilder.h" | |||
65 | #include "llvm/IR/Module.h" | |||
66 | #include "llvm/IR/Operator.h" | |||
67 | #include "llvm/IR/PatternMatch.h" | |||
68 | #include "llvm/IR/Statepoint.h" | |||
69 | #include "llvm/IR/Type.h" | |||
70 | #include "llvm/IR/Use.h" | |||
71 | #include "llvm/IR/User.h" | |||
72 | #include "llvm/IR/Value.h" | |||
73 | #include "llvm/IR/ValueHandle.h" | |||
74 | #include "llvm/IR/ValueMap.h" | |||
75 | #include "llvm/Pass.h" | |||
76 | #include "llvm/Support/BlockFrequency.h" | |||
77 | #include "llvm/Support/BranchProbability.h" | |||
78 | #include "llvm/Support/Casting.h" | |||
79 | #include "llvm/Support/CommandLine.h" | |||
80 | #include "llvm/Support/Compiler.h" | |||
81 | #include "llvm/Support/Debug.h" | |||
82 | #include "llvm/Support/ErrorHandling.h" | |||
83 | #include "llvm/Support/MachineValueType.h" | |||
84 | #include "llvm/Support/MathExtras.h" | |||
85 | #include "llvm/Support/raw_ostream.h" | |||
86 | #include "llvm/Target/TargetMachine.h" | |||
87 | #include "llvm/Target/TargetOptions.h" | |||
88 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" | |||
89 | #include "llvm/Transforms/Utils/BypassSlowDivision.h" | |||
90 | #include "llvm/Transforms/Utils/SimplifyLibCalls.h" | |||
91 | #include <algorithm> | |||
92 | #include <cassert> | |||
93 | #include <cstdint> | |||
94 | #include <iterator> | |||
95 | #include <limits> | |||
96 | #include <memory> | |||
97 | #include <utility> | |||
98 | #include <vector> | |||
99 | ||||
100 | using namespace llvm; | |||
101 | using namespace llvm::PatternMatch; | |||
102 | ||||
103 | #define DEBUG_TYPE"codegenprepare" "codegenprepare" | |||
104 | ||||
105 | STATISTIC(NumBlocksElim, "Number of blocks eliminated")static llvm::Statistic NumBlocksElim = {"codegenprepare", "NumBlocksElim" , "Number of blocks eliminated", {0}, {false}}; | |||
106 | STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated")static llvm::Statistic NumPHIsElim = {"codegenprepare", "NumPHIsElim" , "Number of trivial PHIs eliminated", {0}, {false}}; | |||
107 | STATISTIC(NumGEPsElim, "Number of GEPs converted to casts")static llvm::Statistic NumGEPsElim = {"codegenprepare", "NumGEPsElim" , "Number of GEPs converted to casts", {0}, {false}}; | |||
108 | STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "static llvm::Statistic NumCmpUses = {"codegenprepare", "NumCmpUses" , "Number of uses of Cmp expressions replaced with uses of " "sunken Cmps" , {0}, {false}} | |||
109 | "sunken Cmps")static llvm::Statistic NumCmpUses = {"codegenprepare", "NumCmpUses" , "Number of uses of Cmp expressions replaced with uses of " "sunken Cmps" , {0}, {false}}; | |||
110 | STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "static llvm::Statistic NumCastUses = {"codegenprepare", "NumCastUses" , "Number of uses of Cast expressions replaced with uses " "of sunken Casts" , {0}, {false}} | |||
111 | "of sunken Casts")static llvm::Statistic NumCastUses = {"codegenprepare", "NumCastUses" , "Number of uses of Cast expressions replaced with uses " "of sunken Casts" , {0}, {false}}; | |||
112 | STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "static llvm::Statistic NumMemoryInsts = {"codegenprepare", "NumMemoryInsts" , "Number of memory instructions whose address " "computations were sunk" , {0}, {false}} | |||
113 | "computations were sunk")static llvm::Statistic NumMemoryInsts = {"codegenprepare", "NumMemoryInsts" , "Number of memory instructions whose address " "computations were sunk" , {0}, {false}}; | |||
114 | STATISTIC(NumMemoryInstsPhiCreated,static llvm::Statistic NumMemoryInstsPhiCreated = {"codegenprepare" , "NumMemoryInstsPhiCreated", "Number of phis created when address " "computations were sunk to memory instructions", {0}, {false }} | |||
115 | "Number of phis created when address "static llvm::Statistic NumMemoryInstsPhiCreated = {"codegenprepare" , "NumMemoryInstsPhiCreated", "Number of phis created when address " "computations were sunk to memory instructions", {0}, {false }} | |||
116 | "computations were sunk to memory instructions")static llvm::Statistic NumMemoryInstsPhiCreated = {"codegenprepare" , "NumMemoryInstsPhiCreated", "Number of phis created when address " "computations were sunk to memory instructions", {0}, {false }}; | |||
117 | STATISTIC(NumMemoryInstsSelectCreated,static llvm::Statistic NumMemoryInstsSelectCreated = {"codegenprepare" , "NumMemoryInstsSelectCreated", "Number of select created when address " "computations were sunk to memory instructions", {0}, {false }} | |||
118 | "Number of select created when address "static llvm::Statistic NumMemoryInstsSelectCreated = {"codegenprepare" , "NumMemoryInstsSelectCreated", "Number of select created when address " "computations were sunk to memory instructions", {0}, {false }} | |||
119 | "computations were sunk to memory instructions")static llvm::Statistic NumMemoryInstsSelectCreated = {"codegenprepare" , "NumMemoryInstsSelectCreated", "Number of select created when address " "computations were sunk to memory instructions", {0}, {false }}; | |||
120 | STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads")static llvm::Statistic NumExtsMoved = {"codegenprepare", "NumExtsMoved" , "Number of [s|z]ext instructions combined with loads", {0}, {false}}; | |||
121 | STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized")static llvm::Statistic NumExtUses = {"codegenprepare", "NumExtUses" , "Number of uses of [s|z]ext instructions optimized", {0}, { false}}; | |||
122 | STATISTIC(NumAndsAdded,static llvm::Statistic NumAndsAdded = {"codegenprepare", "NumAndsAdded" , "Number of and mask instructions added to form ext loads", { 0}, {false}} | |||
123 | "Number of and mask instructions added to form ext loads")static llvm::Statistic NumAndsAdded = {"codegenprepare", "NumAndsAdded" , "Number of and mask instructions added to form ext loads", { 0}, {false}}; | |||
124 | STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized")static llvm::Statistic NumAndUses = {"codegenprepare", "NumAndUses" , "Number of uses of and mask instructions optimized", {0}, { false}}; | |||
125 | STATISTIC(NumRetsDup, "Number of return instructions duplicated")static llvm::Statistic NumRetsDup = {"codegenprepare", "NumRetsDup" , "Number of return instructions duplicated", {0}, {false}}; | |||
126 | STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved")static llvm::Statistic NumDbgValueMoved = {"codegenprepare", "NumDbgValueMoved" , "Number of debug value instructions moved", {0}, {false}}; | |||
127 | STATISTIC(NumSelectsExpanded, "Number of selects turned into branches")static llvm::Statistic NumSelectsExpanded = {"codegenprepare" , "NumSelectsExpanded", "Number of selects turned into branches" , {0}, {false}}; | |||
128 | STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed")static llvm::Statistic NumStoreExtractExposed = {"codegenprepare" , "NumStoreExtractExposed", "Number of store(extractelement) exposed" , {0}, {false}}; | |||
129 | ||||
130 | static cl::opt<bool> DisableBranchOpts( | |||
131 | "disable-cgp-branch-opts", cl::Hidden, cl::init(false), | |||
132 | cl::desc("Disable branch optimizations in CodeGenPrepare")); | |||
133 | ||||
134 | static cl::opt<bool> | |||
135 | DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false), | |||
136 | cl::desc("Disable GC optimizations in CodeGenPrepare")); | |||
137 | ||||
138 | static cl::opt<bool> DisableSelectToBranch( | |||
139 | "disable-cgp-select2branch", cl::Hidden, cl::init(false), | |||
140 | cl::desc("Disable select to branch conversion.")); | |||
141 | ||||
142 | static cl::opt<bool> AddrSinkUsingGEPs( | |||
143 | "addr-sink-using-gep", cl::Hidden, cl::init(true), | |||
144 | cl::desc("Address sinking in CGP using GEPs.")); | |||
145 | ||||
146 | static cl::opt<bool> EnableAndCmpSinking( | |||
147 | "enable-andcmp-sinking", cl::Hidden, cl::init(true), | |||
148 | cl::desc("Enable sinkinig and/cmp into branches.")); | |||
149 | ||||
150 | static cl::opt<bool> DisableStoreExtract( | |||
151 | "disable-cgp-store-extract", cl::Hidden, cl::init(false), | |||
152 | cl::desc("Disable store(extract) optimizations in CodeGenPrepare")); | |||
153 | ||||
154 | static cl::opt<bool> StressStoreExtract( | |||
155 | "stress-cgp-store-extract", cl::Hidden, cl::init(false), | |||
156 | cl::desc("Stress test store(extract) optimizations in CodeGenPrepare")); | |||
157 | ||||
158 | static cl::opt<bool> DisableExtLdPromotion( | |||
159 | "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), | |||
160 | cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " | |||
161 | "CodeGenPrepare")); | |||
162 | ||||
163 | static cl::opt<bool> StressExtLdPromotion( | |||
164 | "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), | |||
165 | cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " | |||
166 | "optimization in CodeGenPrepare")); | |||
167 | ||||
168 | static cl::opt<bool> DisablePreheaderProtect( | |||
169 | "disable-preheader-prot", cl::Hidden, cl::init(false), | |||
170 | cl::desc("Disable protection against removing loop preheaders")); | |||
171 | ||||
172 | static cl::opt<bool> ProfileGuidedSectionPrefix( | |||
173 | "profile-guided-section-prefix", cl::Hidden, cl::init(true), cl::ZeroOrMore, | |||
174 | cl::desc("Use profile info to add section prefix for hot/cold functions")); | |||
175 | ||||
176 | static cl::opt<unsigned> FreqRatioToSkipMerge( | |||
177 | "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2), | |||
178 | cl::desc("Skip merging empty blocks if (frequency of empty block) / " | |||
179 | "(frequency of destination block) is greater than this ratio")); | |||
180 | ||||
181 | static cl::opt<bool> ForceSplitStore( | |||
182 | "force-split-store", cl::Hidden, cl::init(false), | |||
183 | cl::desc("Force store splitting no matter what the target query says.")); | |||
184 | ||||
185 | static cl::opt<bool> | |||
186 | EnableTypePromotionMerge("cgp-type-promotion-merge", cl::Hidden, | |||
187 | cl::desc("Enable merging of redundant sexts when one is dominating" | |||
188 | " the other."), cl::init(true)); | |||
189 | ||||
190 | static cl::opt<bool> DisableComplexAddrModes( | |||
191 | "disable-complex-addr-modes", cl::Hidden, cl::init(false), | |||
192 | cl::desc("Disables combining addressing modes with different parts " | |||
193 | "in optimizeMemoryInst.")); | |||
194 | ||||
195 | static cl::opt<bool> | |||
196 | AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden, cl::init(false), | |||
197 | cl::desc("Allow creation of Phis in Address sinking.")); | |||
198 | ||||
199 | static cl::opt<bool> | |||
200 | AddrSinkNewSelects("addr-sink-new-select", cl::Hidden, cl::init(true), | |||
201 | cl::desc("Allow creation of selects in Address sinking.")); | |||
202 | ||||
203 | static cl::opt<bool> AddrSinkCombineBaseReg( | |||
204 | "addr-sink-combine-base-reg", cl::Hidden, cl::init(true), | |||
205 | cl::desc("Allow combining of BaseReg field in Address sinking.")); | |||
206 | ||||
207 | static cl::opt<bool> AddrSinkCombineBaseGV( | |||
208 | "addr-sink-combine-base-gv", cl::Hidden, cl::init(true), | |||
209 | cl::desc("Allow combining of BaseGV field in Address sinking.")); | |||
210 | ||||
211 | static cl::opt<bool> AddrSinkCombineBaseOffs( | |||
212 | "addr-sink-combine-base-offs", cl::Hidden, cl::init(true), | |||
213 | cl::desc("Allow combining of BaseOffs field in Address sinking.")); | |||
214 | ||||
215 | static cl::opt<bool> AddrSinkCombineScaledReg( | |||
216 | "addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true), | |||
217 | cl::desc("Allow combining of ScaledReg field in Address sinking.")); | |||
218 | ||||
219 | static cl::opt<bool> | |||
220 | EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden, | |||
221 | cl::init(true), | |||
222 | cl::desc("Enable splitting large offset of GEP.")); | |||
223 | ||||
224 | namespace { | |||
225 | ||||
226 | enum ExtType { | |||
227 | ZeroExtension, // Zero extension has been seen. | |||
228 | SignExtension, // Sign extension has been seen. | |||
229 | BothExtension // This extension type is used if we saw sext after | |||
230 | // ZeroExtension had been set, or if we saw zext after | |||
231 | // SignExtension had been set. It makes the type | |||
232 | // information of a promoted instruction invalid. | |||
233 | }; | |||
234 | ||||
235 | using SetOfInstrs = SmallPtrSet<Instruction *, 16>; | |||
236 | using TypeIsSExt = PointerIntPair<Type *, 2, ExtType>; | |||
237 | using InstrToOrigTy = DenseMap<Instruction *, TypeIsSExt>; | |||
238 | using SExts = SmallVector<Instruction *, 16>; | |||
239 | using ValueToSExts = DenseMap<Value *, SExts>; | |||
240 | ||||
241 | class TypePromotionTransaction; | |||
242 | ||||
243 | class CodeGenPrepare : public FunctionPass { | |||
244 | const TargetMachine *TM = nullptr; | |||
245 | const TargetSubtargetInfo *SubtargetInfo; | |||
246 | const TargetLowering *TLI = nullptr; | |||
247 | const TargetRegisterInfo *TRI; | |||
248 | const TargetTransformInfo *TTI = nullptr; | |||
249 | const TargetLibraryInfo *TLInfo; | |||
250 | const LoopInfo *LI; | |||
251 | std::unique_ptr<BlockFrequencyInfo> BFI; | |||
252 | std::unique_ptr<BranchProbabilityInfo> BPI; | |||
253 | ||||
254 | /// As we scan instructions optimizing them, this is the next instruction | |||
255 | /// to optimize. Transforms that can invalidate this should update it. | |||
256 | BasicBlock::iterator CurInstIterator; | |||
257 | ||||
258 | /// Keeps track of non-local addresses that have been sunk into a block. | |||
259 | /// This allows us to avoid inserting duplicate code for blocks with | |||
260 | /// multiple load/stores of the same address. The usage of WeakTrackingVH | |||
261 | /// enables SunkAddrs to be treated as a cache whose entries can be | |||
262 | /// invalidated if a sunken address computation has been erased. | |||
263 | ValueMap<Value*, WeakTrackingVH> SunkAddrs; | |||
264 | ||||
265 | /// Keeps track of all instructions inserted for the current function. | |||
266 | SetOfInstrs InsertedInsts; | |||
267 | ||||
268 | /// Keeps track of the type of the related instruction before their | |||
269 | /// promotion for the current function. | |||
270 | InstrToOrigTy PromotedInsts; | |||
271 | ||||
272 | /// Keep track of instructions removed during promotion. | |||
273 | SetOfInstrs RemovedInsts; | |||
274 | ||||
275 | /// Keep track of sext chains based on their initial value. | |||
276 | DenseMap<Value *, Instruction *> SeenChainsForSExt; | |||
277 | ||||
278 | /// Keep track of GEPs accessing the same data structures such as structs or | |||
279 | /// arrays that are candidates to be split later because of their large | |||
280 | /// size. | |||
281 | MapVector< | |||
282 | AssertingVH<Value>, | |||
283 | SmallVector<std::pair<AssertingVH<GetElementPtrInst>, int64_t>, 32>> | |||
284 | LargeOffsetGEPMap; | |||
285 | ||||
286 | /// Keep track of new GEP base after splitting the GEPs having large offset. | |||
287 | SmallSet<AssertingVH<Value>, 2> NewGEPBases; | |||
288 | ||||
289 | /// Map serial numbers to Large offset GEPs. | |||
290 | DenseMap<AssertingVH<GetElementPtrInst>, int> LargeOffsetGEPID; | |||
291 | ||||
292 | /// Keep track of SExt promoted. | |||
293 | ValueToSExts ValToSExtendedUses; | |||
294 | ||||
295 | /// True if CFG is modified in any way. | |||
296 | bool ModifiedDT; | |||
297 | ||||
298 | /// True if optimizing for size. | |||
299 | bool OptSize; | |||
300 | ||||
301 | /// DataLayout for the Function being processed. | |||
302 | const DataLayout *DL = nullptr; | |||
303 | ||||
304 | public: | |||
305 | static char ID; // Pass identification, replacement for typeid | |||
306 | ||||
307 | CodeGenPrepare() : FunctionPass(ID) { | |||
308 | initializeCodeGenPreparePass(*PassRegistry::getPassRegistry()); | |||
309 | } | |||
310 | ||||
311 | bool runOnFunction(Function &F) override; | |||
312 | ||||
313 | StringRef getPassName() const override { return "CodeGen Prepare"; } | |||
314 | ||||
315 | void getAnalysisUsage(AnalysisUsage &AU) const override { | |||
316 | // FIXME: When we can selectively preserve passes, preserve the domtree. | |||
317 | AU.addRequired<ProfileSummaryInfoWrapperPass>(); | |||
318 | AU.addRequired<TargetLibraryInfoWrapperPass>(); | |||
319 | AU.addRequired<TargetTransformInfoWrapperPass>(); | |||
320 | AU.addRequired<LoopInfoWrapperPass>(); | |||
321 | } | |||
322 | ||||
323 | private: | |||
324 | bool eliminateFallThrough(Function &F); | |||
325 | bool eliminateMostlyEmptyBlocks(Function &F); | |||
326 | BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB); | |||
327 | bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; | |||
328 | void eliminateMostlyEmptyBlock(BasicBlock *BB); | |||
329 | bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB, | |||
330 | bool isPreheader); | |||
331 | bool optimizeBlock(BasicBlock &BB, bool &ModifiedDT); | |||
332 | bool optimizeInst(Instruction *I, bool &ModifiedDT); | |||
333 | bool optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, | |||
334 | Type *AccessTy, unsigned AddrSpace); | |||
335 | bool optimizeInlineAsmInst(CallInst *CS); | |||
336 | bool optimizeCallInst(CallInst *CI, bool &ModifiedDT); | |||
337 | bool optimizeExt(Instruction *&I); | |||
338 | bool optimizeExtUses(Instruction *I); | |||
339 | bool optimizeLoadExt(LoadInst *Load); | |||
340 | bool optimizeSelectInst(SelectInst *SI); | |||
341 | bool optimizeShuffleVectorInst(ShuffleVectorInst *SVI); | |||
342 | bool optimizeSwitchInst(SwitchInst *SI); | |||
343 | bool optimizeExtractElementInst(Instruction *Inst); | |||
344 | bool dupRetToEnableTailCallOpts(BasicBlock *BB); | |||
345 | bool placeDbgValues(Function &F); | |||
346 | bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts, | |||
347 | LoadInst *&LI, Instruction *&Inst, bool HasPromoted); | |||
348 | bool tryToPromoteExts(TypePromotionTransaction &TPT, | |||
349 | const SmallVectorImpl<Instruction *> &Exts, | |||
350 | SmallVectorImpl<Instruction *> &ProfitablyMovedExts, | |||
351 | unsigned CreatedInstsCost = 0); | |||
352 | bool mergeSExts(Function &F); | |||
353 | bool splitLargeGEPOffsets(); | |||
354 | bool performAddressTypePromotion( | |||
355 | Instruction *&Inst, | |||
356 | bool AllowPromotionWithoutCommonHeader, | |||
357 | bool HasPromoted, TypePromotionTransaction &TPT, | |||
358 | SmallVectorImpl<Instruction *> &SpeculativelyMovedExts); | |||
359 | bool splitBranchCondition(Function &F); | |||
360 | bool simplifyOffsetableRelocate(Instruction &I); | |||
361 | }; | |||
362 | ||||
363 | } // end anonymous namespace | |||
364 | ||||
365 | char CodeGenPrepare::ID = 0; | |||
366 | ||||
367 | INITIALIZE_PASS_BEGIN(CodeGenPrepare, DEBUG_TYPE,static void *initializeCodeGenPreparePassOnce(PassRegistry & Registry) { | |||
368 | "Optimize for code generation", false, false)static void *initializeCodeGenPreparePassOnce(PassRegistry & Registry) { | |||
369 | INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)initializeProfileSummaryInfoWrapperPassPass(Registry); | |||
370 | INITIALIZE_PASS_END(CodeGenPrepare, DEBUG_TYPE,PassInfo *PI = new PassInfo( "Optimize for code generation", "codegenprepare" , &CodeGenPrepare::ID, PassInfo::NormalCtor_t(callDefaultCtor <CodeGenPrepare>), false, false); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeCodeGenPreparePassFlag ; void llvm::initializeCodeGenPreparePass(PassRegistry &Registry ) { llvm::call_once(InitializeCodeGenPreparePassFlag, initializeCodeGenPreparePassOnce , std::ref(Registry)); } | |||
371 | "Optimize for code generation", false, false)PassInfo *PI = new PassInfo( "Optimize for code generation", "codegenprepare" , &CodeGenPrepare::ID, PassInfo::NormalCtor_t(callDefaultCtor <CodeGenPrepare>), false, false); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeCodeGenPreparePassFlag ; void llvm::initializeCodeGenPreparePass(PassRegistry &Registry ) { llvm::call_once(InitializeCodeGenPreparePassFlag, initializeCodeGenPreparePassOnce , std::ref(Registry)); } | |||
372 | ||||
373 | FunctionPass *llvm::createCodeGenPreparePass() { return new CodeGenPrepare(); } | |||
374 | ||||
375 | bool CodeGenPrepare::runOnFunction(Function &F) { | |||
376 | if (skipFunction(F)) | |||
377 | return false; | |||
378 | ||||
379 | DL = &F.getParent()->getDataLayout(); | |||
380 | ||||
381 | bool EverMadeChange = false; | |||
382 | // Clear per function information. | |||
383 | InsertedInsts.clear(); | |||
384 | PromotedInsts.clear(); | |||
385 | ||||
386 | ModifiedDT = false; | |||
387 | if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) { | |||
388 | TM = &TPC->getTM<TargetMachine>(); | |||
389 | SubtargetInfo = TM->getSubtargetImpl(F); | |||
390 | TLI = SubtargetInfo->getTargetLowering(); | |||
391 | TRI = SubtargetInfo->getRegisterInfo(); | |||
392 | } | |||
393 | TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); | |||
394 | TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); | |||
395 | LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); | |||
396 | BPI.reset(new BranchProbabilityInfo(F, *LI)); | |||
397 | BFI.reset(new BlockFrequencyInfo(F, *BPI, *LI)); | |||
398 | OptSize = F.optForSize(); | |||
399 | ||||
400 | ProfileSummaryInfo *PSI = | |||
401 | getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); | |||
402 | if (ProfileGuidedSectionPrefix) { | |||
403 | if (PSI->isFunctionHotInCallGraph(&F, *BFI)) | |||
404 | F.setSectionPrefix(".hot"); | |||
405 | else if (PSI->isFunctionColdInCallGraph(&F, *BFI)) | |||
406 | F.setSectionPrefix(".unlikely"); | |||
407 | } | |||
408 | ||||
409 | /// This optimization identifies DIV instructions that can be | |||
410 | /// profitably bypassed and carried out with a shorter, faster divide. | |||
411 | if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI && | |||
412 | TLI->isSlowDivBypassed()) { | |||
413 | const DenseMap<unsigned int, unsigned int> &BypassWidths = | |||
414 | TLI->getBypassSlowDivWidths(); | |||
415 | BasicBlock* BB = &*F.begin(); | |||
416 | while (BB != nullptr) { | |||
417 | // bypassSlowDivision may create new BBs, but we don't want to reapply the | |||
418 | // optimization to those blocks. | |||
419 | BasicBlock* Next = BB->getNextNode(); | |||
420 | EverMadeChange |= bypassSlowDivision(BB, BypassWidths); | |||
421 | BB = Next; | |||
422 | } | |||
423 | } | |||
424 | ||||
425 | // Eliminate blocks that contain only PHI nodes and an | |||
426 | // unconditional branch. | |||
427 | EverMadeChange |= eliminateMostlyEmptyBlocks(F); | |||
428 | ||||
429 | if (!DisableBranchOpts) | |||
430 | EverMadeChange |= splitBranchCondition(F); | |||
431 | ||||
432 | // Split some critical edges where one of the sources is an indirect branch, | |||
433 | // to help generate sane code for PHIs involving such edges. | |||
434 | EverMadeChange |= SplitIndirectBrCriticalEdges(F); | |||
435 | ||||
436 | bool MadeChange = true; | |||
437 | while (MadeChange) { | |||
438 | MadeChange = false; | |||
439 | for (Function::iterator I = F.begin(); I != F.end(); ) { | |||
440 | BasicBlock *BB = &*I++; | |||
441 | bool ModifiedDTOnIteration = false; | |||
442 | MadeChange |= optimizeBlock(*BB, ModifiedDTOnIteration); | |||
443 | ||||
444 | // Restart BB iteration if the dominator tree of the Function was changed | |||
445 | if (ModifiedDTOnIteration) | |||
446 | break; | |||
447 | } | |||
448 | if (EnableTypePromotionMerge && !ValToSExtendedUses.empty()) | |||
449 | MadeChange |= mergeSExts(F); | |||
450 | if (!LargeOffsetGEPMap.empty()) | |||
451 | MadeChange |= splitLargeGEPOffsets(); | |||
452 | ||||
453 | // Really free removed instructions during promotion. | |||
454 | for (Instruction *I : RemovedInsts) | |||
455 | I->deleteValue(); | |||
456 | ||||
457 | EverMadeChange |= MadeChange; | |||
458 | SeenChainsForSExt.clear(); | |||
459 | ValToSExtendedUses.clear(); | |||
460 | RemovedInsts.clear(); | |||
461 | LargeOffsetGEPMap.clear(); | |||
462 | LargeOffsetGEPID.clear(); | |||
463 | } | |||
464 | ||||
465 | SunkAddrs.clear(); | |||
466 | ||||
467 | if (!DisableBranchOpts) { | |||
468 | MadeChange = false; | |||
469 | // Use a set vector to get deterministic iteration order. The order the | |||
470 | // blocks are removed may affect whether or not PHI nodes in successors | |||
471 | // are removed. | |||
472 | SmallSetVector<BasicBlock*, 8> WorkList; | |||
473 | for (BasicBlock &BB : F) { | |||
474 | SmallVector<BasicBlock *, 2> Successors(succ_begin(&BB), succ_end(&BB)); | |||
475 | MadeChange |= ConstantFoldTerminator(&BB, true); | |||
476 | if (!MadeChange) continue; | |||
477 | ||||
478 | for (SmallVectorImpl<BasicBlock*>::iterator | |||
479 | II = Successors.begin(), IE = Successors.end(); II != IE; ++II) | |||
480 | if (pred_begin(*II) == pred_end(*II)) | |||
481 | WorkList.insert(*II); | |||
482 | } | |||
483 | ||||
484 | // Delete the dead blocks and any of their dead successors. | |||
485 | MadeChange |= !WorkList.empty(); | |||
486 | while (!WorkList.empty()) { | |||
487 | BasicBlock *BB = WorkList.pop_back_val(); | |||
488 | SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB)); | |||
489 | ||||
490 | DeleteDeadBlock(BB); | |||
491 | ||||
492 | for (SmallVectorImpl<BasicBlock*>::iterator | |||
493 | II = Successors.begin(), IE = Successors.end(); II != IE; ++II) | |||
494 | if (pred_begin(*II) == pred_end(*II)) | |||
495 | WorkList.insert(*II); | |||
496 | } | |||
497 | ||||
498 | // Merge pairs of basic blocks with unconditional branches, connected by | |||
499 | // a single edge. | |||
500 | if (EverMadeChange || MadeChange) | |||
501 | MadeChange |= eliminateFallThrough(F); | |||
502 | ||||
503 | EverMadeChange |= MadeChange; | |||
504 | } | |||
505 | ||||
506 | if (!DisableGCOpts) { | |||
507 | SmallVector<Instruction *, 2> Statepoints; | |||
508 | for (BasicBlock &BB : F) | |||
509 | for (Instruction &I : BB) | |||
510 | if (isStatepoint(I)) | |||
511 | Statepoints.push_back(&I); | |||
512 | for (auto &I : Statepoints) | |||
513 | EverMadeChange |= simplifyOffsetableRelocate(*I); | |||
514 | } | |||
515 | ||||
516 | // Do this last to clean up use-before-def scenarios introduced by other | |||
517 | // preparatory transforms. | |||
518 | EverMadeChange |= placeDbgValues(F); | |||
519 | ||||
520 | return EverMadeChange; | |||
521 | } | |||
522 | ||||
523 | /// Merge basic blocks which are connected by a single edge, where one of the | |||
524 | /// basic blocks has a single successor pointing to the other basic block, | |||
525 | /// which has a single predecessor. | |||
526 | bool CodeGenPrepare::eliminateFallThrough(Function &F) { | |||
527 | bool Changed = false; | |||
528 | // Scan all of the blocks in the function, except for the entry block. | |||
529 | // Use a temporary array to avoid iterator being invalidated when | |||
530 | // deleting blocks. | |||
531 | SmallVector<WeakTrackingVH, 16> Blocks; | |||
532 | for (auto &Block : llvm::make_range(std::next(F.begin()), F.end())) | |||
533 | Blocks.push_back(&Block); | |||
534 | ||||
535 | for (auto &Block : Blocks) { | |||
536 | auto *BB = cast_or_null<BasicBlock>(Block); | |||
537 | if (!BB) | |||
538 | continue; | |||
539 | // If the destination block has a single pred, then this is a trivial | |||
540 | // edge, just collapse it. | |||
541 | BasicBlock *SinglePred = BB->getSinglePredecessor(); | |||
542 | ||||
543 | // Don't merge if BB's address is taken. | |||
544 | if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue; | |||
545 | ||||
546 | BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator()); | |||
547 | if (Term && !Term->isConditional()) { | |||
548 | Changed = true; | |||
549 | LLVM_DEBUG(dbgs() << "To merge:\n" << *BB << "\n\n\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "To merge:\n" << * BB << "\n\n\n"; } } while (false); | |||
550 | ||||
551 | // Merge BB into SinglePred and delete it. | |||
552 | MergeBlockIntoPredecessor(BB); | |||
553 | } | |||
554 | } | |||
555 | return Changed; | |||
556 | } | |||
557 | ||||
558 | /// Find a destination block from BB if BB is mergeable empty block. | |||
559 | BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) { | |||
560 | // If this block doesn't end with an uncond branch, ignore it. | |||
561 | BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); | |||
562 | if (!BI || !BI->isUnconditional()) | |||
563 | return nullptr; | |||
564 | ||||
565 | // If the instruction before the branch (skipping debug info) isn't a phi | |||
566 | // node, then other stuff is happening here. | |||
567 | BasicBlock::iterator BBI = BI->getIterator(); | |||
568 | if (BBI != BB->begin()) { | |||
569 | --BBI; | |||
570 | while (isa<DbgInfoIntrinsic>(BBI)) { | |||
571 | if (BBI == BB->begin()) | |||
572 | break; | |||
573 | --BBI; | |||
574 | } | |||
575 | if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) | |||
576 | return nullptr; | |||
577 | } | |||
578 | ||||
579 | // Do not break infinite loops. | |||
580 | BasicBlock *DestBB = BI->getSuccessor(0); | |||
581 | if (DestBB == BB) | |||
582 | return nullptr; | |||
583 | ||||
584 | if (!canMergeBlocks(BB, DestBB)) | |||
585 | DestBB = nullptr; | |||
586 | ||||
587 | return DestBB; | |||
588 | } | |||
589 | ||||
590 | /// Eliminate blocks that contain only PHI nodes, debug info directives, and an | |||
591 | /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split | |||
592 | /// edges in ways that are non-optimal for isel. Start by eliminating these | |||
593 | /// blocks so we can split them the way we want them. | |||
594 | bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) { | |||
595 | SmallPtrSet<BasicBlock *, 16> Preheaders; | |||
596 | SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end()); | |||
597 | while (!LoopList.empty()) { | |||
598 | Loop *L = LoopList.pop_back_val(); | |||
599 | LoopList.insert(LoopList.end(), L->begin(), L->end()); | |||
600 | if (BasicBlock *Preheader = L->getLoopPreheader()) | |||
601 | Preheaders.insert(Preheader); | |||
602 | } | |||
603 | ||||
604 | bool MadeChange = false; | |||
605 | // Copy blocks into a temporary array to avoid iterator invalidation issues | |||
606 | // as we remove them. | |||
607 | // Note that this intentionally skips the entry block. | |||
608 | SmallVector<WeakTrackingVH, 16> Blocks; | |||
609 | for (auto &Block : llvm::make_range(std::next(F.begin()), F.end())) | |||
610 | Blocks.push_back(&Block); | |||
611 | ||||
612 | for (auto &Block : Blocks) { | |||
613 | BasicBlock *BB = cast_or_null<BasicBlock>(Block); | |||
614 | if (!BB) | |||
615 | continue; | |||
616 | BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB); | |||
617 | if (!DestBB || | |||
618 | !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB))) | |||
619 | continue; | |||
620 | ||||
621 | eliminateMostlyEmptyBlock(BB); | |||
622 | MadeChange = true; | |||
623 | } | |||
624 | return MadeChange; | |||
625 | } | |||
626 | ||||
627 | bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB, | |||
628 | BasicBlock *DestBB, | |||
629 | bool isPreheader) { | |||
630 | // Do not delete loop preheaders if doing so would create a critical edge. | |||
631 | // Loop preheaders can be good locations to spill registers. If the | |||
632 | // preheader is deleted and we create a critical edge, registers may be | |||
633 | // spilled in the loop body instead. | |||
634 | if (!DisablePreheaderProtect && isPreheader && | |||
635 | !(BB->getSinglePredecessor() && | |||
636 | BB->getSinglePredecessor()->getSingleSuccessor())) | |||
637 | return false; | |||
638 | ||||
639 | // Try to skip merging if the unique predecessor of BB is terminated by a | |||
640 | // switch or indirect branch instruction, and BB is used as an incoming block | |||
641 | // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to | |||
642 | // add COPY instructions in the predecessor of BB instead of BB (if it is not | |||
643 | // merged). Note that the critical edge created by merging such blocks wont be | |||
644 | // split in MachineSink because the jump table is not analyzable. By keeping | |||
645 | // such empty block (BB), ISel will place COPY instructions in BB, not in the | |||
646 | // predecessor of BB. | |||
647 | BasicBlock *Pred = BB->getUniquePredecessor(); | |||
648 | if (!Pred || | |||
649 | !(isa<SwitchInst>(Pred->getTerminator()) || | |||
650 | isa<IndirectBrInst>(Pred->getTerminator()))) | |||
651 | return true; | |||
652 | ||||
653 | if (BB->getTerminator() != BB->getFirstNonPHIOrDbg()) | |||
654 | return true; | |||
655 | ||||
656 | // We use a simple cost heuristic which determine skipping merging is | |||
657 | // profitable if the cost of skipping merging is less than the cost of | |||
658 | // merging : Cost(skipping merging) < Cost(merging BB), where the | |||
659 | // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and | |||
660 | // the Cost(merging BB) is Freq(Pred) * Cost(Copy). | |||
661 | // Assuming Cost(Copy) == Cost(Branch), we could simplify it to : | |||
662 | // Freq(Pred) / Freq(BB) > 2. | |||
663 | // Note that if there are multiple empty blocks sharing the same incoming | |||
664 | // value for the PHIs in the DestBB, we consider them together. In such | |||
665 | // case, Cost(merging BB) will be the sum of their frequencies. | |||
666 | ||||
667 | if (!isa<PHINode>(DestBB->begin())) | |||
668 | return true; | |||
669 | ||||
670 | SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs; | |||
671 | ||||
672 | // Find all other incoming blocks from which incoming values of all PHIs in | |||
673 | // DestBB are the same as the ones from BB. | |||
674 | for (pred_iterator PI = pred_begin(DestBB), E = pred_end(DestBB); PI != E; | |||
675 | ++PI) { | |||
676 | BasicBlock *DestBBPred = *PI; | |||
677 | if (DestBBPred == BB) | |||
678 | continue; | |||
679 | ||||
680 | if (llvm::all_of(DestBB->phis(), [&](const PHINode &DestPN) { | |||
681 | return DestPN.getIncomingValueForBlock(BB) == | |||
682 | DestPN.getIncomingValueForBlock(DestBBPred); | |||
683 | })) | |||
684 | SameIncomingValueBBs.insert(DestBBPred); | |||
685 | } | |||
686 | ||||
687 | // See if all BB's incoming values are same as the value from Pred. In this | |||
688 | // case, no reason to skip merging because COPYs are expected to be place in | |||
689 | // Pred already. | |||
690 | if (SameIncomingValueBBs.count(Pred)) | |||
691 | return true; | |||
692 | ||||
693 | BlockFrequency PredFreq = BFI->getBlockFreq(Pred); | |||
694 | BlockFrequency BBFreq = BFI->getBlockFreq(BB); | |||
695 | ||||
696 | for (auto SameValueBB : SameIncomingValueBBs) | |||
697 | if (SameValueBB->getUniquePredecessor() == Pred && | |||
698 | DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB)) | |||
699 | BBFreq += BFI->getBlockFreq(SameValueBB); | |||
700 | ||||
701 | return PredFreq.getFrequency() <= | |||
702 | BBFreq.getFrequency() * FreqRatioToSkipMerge; | |||
703 | } | |||
704 | ||||
705 | /// Return true if we can merge BB into DestBB if there is a single | |||
706 | /// unconditional branch between them, and BB contains no other non-phi | |||
707 | /// instructions. | |||
708 | bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB, | |||
709 | const BasicBlock *DestBB) const { | |||
710 | // We only want to eliminate blocks whose phi nodes are used by phi nodes in | |||
711 | // the successor. If there are more complex condition (e.g. preheaders), | |||
712 | // don't mess around with them. | |||
713 | for (const PHINode &PN : BB->phis()) { | |||
714 | for (const User *U : PN.users()) { | |||
715 | const Instruction *UI = cast<Instruction>(U); | |||
716 | if (UI->getParent() != DestBB || !isa<PHINode>(UI)) | |||
717 | return false; | |||
718 | // If User is inside DestBB block and it is a PHINode then check | |||
719 | // incoming value. If incoming value is not from BB then this is | |||
720 | // a complex condition (e.g. preheaders) we want to avoid here. | |||
721 | if (UI->getParent() == DestBB) { | |||
722 | if (const PHINode *UPN = dyn_cast<PHINode>(UI)) | |||
723 | for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { | |||
724 | Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); | |||
725 | if (Insn && Insn->getParent() == BB && | |||
726 | Insn->getParent() != UPN->getIncomingBlock(I)) | |||
727 | return false; | |||
728 | } | |||
729 | } | |||
730 | } | |||
731 | } | |||
732 | ||||
733 | // If BB and DestBB contain any common predecessors, then the phi nodes in BB | |||
734 | // and DestBB may have conflicting incoming values for the block. If so, we | |||
735 | // can't merge the block. | |||
736 | const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); | |||
737 | if (!DestBBPN) return true; // no conflict. | |||
738 | ||||
739 | // Collect the preds of BB. | |||
740 | SmallPtrSet<const BasicBlock*, 16> BBPreds; | |||
741 | if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { | |||
742 | // It is faster to get preds from a PHI than with pred_iterator. | |||
743 | for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) | |||
744 | BBPreds.insert(BBPN->getIncomingBlock(i)); | |||
745 | } else { | |||
746 | BBPreds.insert(pred_begin(BB), pred_end(BB)); | |||
747 | } | |||
748 | ||||
749 | // Walk the preds of DestBB. | |||
750 | for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { | |||
751 | BasicBlock *Pred = DestBBPN->getIncomingBlock(i); | |||
752 | if (BBPreds.count(Pred)) { // Common predecessor? | |||
753 | for (const PHINode &PN : DestBB->phis()) { | |||
754 | const Value *V1 = PN.getIncomingValueForBlock(Pred); | |||
755 | const Value *V2 = PN.getIncomingValueForBlock(BB); | |||
756 | ||||
757 | // If V2 is a phi node in BB, look up what the mapped value will be. | |||
758 | if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) | |||
759 | if (V2PN->getParent() == BB) | |||
760 | V2 = V2PN->getIncomingValueForBlock(Pred); | |||
761 | ||||
762 | // If there is a conflict, bail out. | |||
763 | if (V1 != V2) return false; | |||
764 | } | |||
765 | } | |||
766 | } | |||
767 | ||||
768 | return true; | |||
769 | } | |||
770 | ||||
771 | /// Eliminate a basic block that has only phi's and an unconditional branch in | |||
772 | /// it. | |||
773 | void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) { | |||
774 | BranchInst *BI = cast<BranchInst>(BB->getTerminator()); | |||
775 | BasicBlock *DestBB = BI->getSuccessor(0); | |||
776 | ||||
777 | LLVM_DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB; } } while (false) | |||
778 | << *BB << *DestBB)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB; } } while (false); | |||
779 | ||||
780 | // If the destination block has a single pred, then this is a trivial edge, | |||
781 | // just collapse it. | |||
782 | if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { | |||
783 | if (SinglePred != DestBB) { | |||
784 | assert(SinglePred == BB &&((SinglePred == BB && "Single predecessor not the same as predecessor" ) ? static_cast<void> (0) : __assert_fail ("SinglePred == BB && \"Single predecessor not the same as predecessor\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 785, __PRETTY_FUNCTION__)) | |||
785 | "Single predecessor not the same as predecessor")((SinglePred == BB && "Single predecessor not the same as predecessor" ) ? static_cast<void> (0) : __assert_fail ("SinglePred == BB && \"Single predecessor not the same as predecessor\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 785, __PRETTY_FUNCTION__)); | |||
786 | // Merge DestBB into SinglePred/BB and delete it. | |||
787 | MergeBlockIntoPredecessor(DestBB); | |||
788 | // Note: BB(=SinglePred) will not be deleted on this path. | |||
789 | // DestBB(=its single successor) is the one that was deleted. | |||
790 | LLVM_DEBUG(dbgs() << "AFTER:\n" << *SinglePred << "\n\n\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "AFTER:\n" << *SinglePred << "\n\n\n"; } } while (false); | |||
791 | return; | |||
792 | } | |||
793 | } | |||
794 | ||||
795 | // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB | |||
796 | // to handle the new incoming edges it is about to have. | |||
797 | for (PHINode &PN : DestBB->phis()) { | |||
798 | // Remove the incoming value for BB, and remember it. | |||
799 | Value *InVal = PN.removeIncomingValue(BB, false); | |||
800 | ||||
801 | // Two options: either the InVal is a phi node defined in BB or it is some | |||
802 | // value that dominates BB. | |||
803 | PHINode *InValPhi = dyn_cast<PHINode>(InVal); | |||
804 | if (InValPhi && InValPhi->getParent() == BB) { | |||
805 | // Add all of the input values of the input PHI as inputs of this phi. | |||
806 | for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) | |||
807 | PN.addIncoming(InValPhi->getIncomingValue(i), | |||
808 | InValPhi->getIncomingBlock(i)); | |||
809 | } else { | |||
810 | // Otherwise, add one instance of the dominating value for each edge that | |||
811 | // we will be adding. | |||
812 | if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { | |||
813 | for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) | |||
814 | PN.addIncoming(InVal, BBPN->getIncomingBlock(i)); | |||
815 | } else { | |||
816 | for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) | |||
817 | PN.addIncoming(InVal, *PI); | |||
818 | } | |||
819 | } | |||
820 | } | |||
821 | ||||
822 | // The PHIs are now updated, change everything that refers to BB to use | |||
823 | // DestBB and remove BB. | |||
824 | BB->replaceAllUsesWith(DestBB); | |||
825 | BB->eraseFromParent(); | |||
826 | ++NumBlocksElim; | |||
827 | ||||
828 | LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"; } } while (false); | |||
829 | } | |||
830 | ||||
831 | // Computes a map of base pointer relocation instructions to corresponding | |||
832 | // derived pointer relocation instructions given a vector of all relocate calls | |||
833 | static void computeBaseDerivedRelocateMap( | |||
834 | const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls, | |||
835 | DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> | |||
836 | &RelocateInstMap) { | |||
837 | // Collect information in two maps: one primarily for locating the base object | |||
838 | // while filling the second map; the second map is the final structure holding | |||
839 | // a mapping between Base and corresponding Derived relocate calls | |||
840 | DenseMap<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap; | |||
841 | for (auto *ThisRelocate : AllRelocateCalls) { | |||
842 | auto K = std::make_pair(ThisRelocate->getBasePtrIndex(), | |||
843 | ThisRelocate->getDerivedPtrIndex()); | |||
844 | RelocateIdxMap.insert(std::make_pair(K, ThisRelocate)); | |||
845 | } | |||
846 | for (auto &Item : RelocateIdxMap) { | |||
847 | std::pair<unsigned, unsigned> Key = Item.first; | |||
848 | if (Key.first == Key.second) | |||
849 | // Base relocation: nothing to insert | |||
850 | continue; | |||
851 | ||||
852 | GCRelocateInst *I = Item.second; | |||
853 | auto BaseKey = std::make_pair(Key.first, Key.first); | |||
854 | ||||
855 | // We're iterating over RelocateIdxMap so we cannot modify it. | |||
856 | auto MaybeBase = RelocateIdxMap.find(BaseKey); | |||
857 | if (MaybeBase == RelocateIdxMap.end()) | |||
858 | // TODO: We might want to insert a new base object relocate and gep off | |||
859 | // that, if there are enough derived object relocates. | |||
860 | continue; | |||
861 | ||||
862 | RelocateInstMap[MaybeBase->second].push_back(I); | |||
863 | } | |||
864 | } | |||
865 | ||||
866 | // Accepts a GEP and extracts the operands into a vector provided they're all | |||
867 | // small integer constants | |||
868 | static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP, | |||
869 | SmallVectorImpl<Value *> &OffsetV) { | |||
870 | for (unsigned i = 1; i < GEP->getNumOperands(); i++) { | |||
871 | // Only accept small constant integer operands | |||
872 | auto Op = dyn_cast<ConstantInt>(GEP->getOperand(i)); | |||
873 | if (!Op || Op->getZExtValue() > 20) | |||
874 | return false; | |||
875 | } | |||
876 | ||||
877 | for (unsigned i = 1; i < GEP->getNumOperands(); i++) | |||
878 | OffsetV.push_back(GEP->getOperand(i)); | |||
879 | return true; | |||
880 | } | |||
881 | ||||
882 | // Takes a RelocatedBase (base pointer relocation instruction) and Targets to | |||
883 | // replace, computes a replacement, and affects it. | |||
884 | static bool | |||
885 | simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase, | |||
886 | const SmallVectorImpl<GCRelocateInst *> &Targets) { | |||
887 | bool MadeChange = false; | |||
888 | // We must ensure the relocation of derived pointer is defined after | |||
889 | // relocation of base pointer. If we find a relocation corresponding to base | |||
890 | // defined earlier than relocation of base then we move relocation of base | |||
891 | // right before found relocation. We consider only relocation in the same | |||
892 | // basic block as relocation of base. Relocations from other basic block will | |||
893 | // be skipped by optimization and we do not care about them. | |||
894 | for (auto R = RelocatedBase->getParent()->getFirstInsertionPt(); | |||
895 | &*R != RelocatedBase; ++R) | |||
896 | if (auto RI = dyn_cast<GCRelocateInst>(R)) | |||
897 | if (RI->getStatepoint() == RelocatedBase->getStatepoint()) | |||
898 | if (RI->getBasePtrIndex() == RelocatedBase->getBasePtrIndex()) { | |||
899 | RelocatedBase->moveBefore(RI); | |||
900 | break; | |||
901 | } | |||
902 | ||||
903 | for (GCRelocateInst *ToReplace : Targets) { | |||
904 | assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() &&((ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex () && "Not relocating a derived object of the original base object" ) ? static_cast<void> (0) : __assert_fail ("ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() && \"Not relocating a derived object of the original base object\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 905, __PRETTY_FUNCTION__)) | |||
905 | "Not relocating a derived object of the original base object")((ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex () && "Not relocating a derived object of the original base object" ) ? static_cast<void> (0) : __assert_fail ("ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() && \"Not relocating a derived object of the original base object\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 905, __PRETTY_FUNCTION__)); | |||
906 | if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) { | |||
907 | // A duplicate relocate call. TODO: coalesce duplicates. | |||
908 | continue; | |||
909 | } | |||
910 | ||||
911 | if (RelocatedBase->getParent() != ToReplace->getParent()) { | |||
912 | // Base and derived relocates are in different basic blocks. | |||
913 | // In this case transform is only valid when base dominates derived | |||
914 | // relocate. However it would be too expensive to check dominance | |||
915 | // for each such relocate, so we skip the whole transformation. | |||
916 | continue; | |||
917 | } | |||
918 | ||||
919 | Value *Base = ToReplace->getBasePtr(); | |||
920 | auto Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr()); | |||
921 | if (!Derived || Derived->getPointerOperand() != Base) | |||
922 | continue; | |||
923 | ||||
924 | SmallVector<Value *, 2> OffsetV; | |||
925 | if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV)) | |||
926 | continue; | |||
927 | ||||
928 | // Create a Builder and replace the target callsite with a gep | |||
929 | assert(RelocatedBase->getNextNode() &&((RelocatedBase->getNextNode() && "Should always have one since it's not a terminator" ) ? static_cast<void> (0) : __assert_fail ("RelocatedBase->getNextNode() && \"Should always have one since it's not a terminator\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 930, __PRETTY_FUNCTION__)) | |||
930 | "Should always have one since it's not a terminator")((RelocatedBase->getNextNode() && "Should always have one since it's not a terminator" ) ? static_cast<void> (0) : __assert_fail ("RelocatedBase->getNextNode() && \"Should always have one since it's not a terminator\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 930, __PRETTY_FUNCTION__)); | |||
931 | ||||
932 | // Insert after RelocatedBase | |||
933 | IRBuilder<> Builder(RelocatedBase->getNextNode()); | |||
934 | Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc()); | |||
935 | ||||
936 | // If gc_relocate does not match the actual type, cast it to the right type. | |||
937 | // In theory, there must be a bitcast after gc_relocate if the type does not | |||
938 | // match, and we should reuse it to get the derived pointer. But it could be | |||
939 | // cases like this: | |||
940 | // bb1: | |||
941 | // ... | |||
942 | // %g1 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) | |||
943 | // br label %merge | |||
944 | // | |||
945 | // bb2: | |||
946 | // ... | |||
947 | // %g2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) | |||
948 | // br label %merge | |||
949 | // | |||
950 | // merge: | |||
951 | // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ] | |||
952 | // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)* | |||
953 | // | |||
954 | // In this case, we can not find the bitcast any more. So we insert a new bitcast | |||
955 | // no matter there is already one or not. In this way, we can handle all cases, and | |||
956 | // the extra bitcast should be optimized away in later passes. | |||
957 | Value *ActualRelocatedBase = RelocatedBase; | |||
958 | if (RelocatedBase->getType() != Base->getType()) { | |||
959 | ActualRelocatedBase = | |||
960 | Builder.CreateBitCast(RelocatedBase, Base->getType()); | |||
961 | } | |||
962 | Value *Replacement = Builder.CreateGEP( | |||
963 | Derived->getSourceElementType(), ActualRelocatedBase, makeArrayRef(OffsetV)); | |||
964 | Replacement->takeName(ToReplace); | |||
965 | // If the newly generated derived pointer's type does not match the original derived | |||
966 | // pointer's type, cast the new derived pointer to match it. Same reasoning as above. | |||
967 | Value *ActualReplacement = Replacement; | |||
968 | if (Replacement->getType() != ToReplace->getType()) { | |||
969 | ActualReplacement = | |||
970 | Builder.CreateBitCast(Replacement, ToReplace->getType()); | |||
971 | } | |||
972 | ToReplace->replaceAllUsesWith(ActualReplacement); | |||
973 | ToReplace->eraseFromParent(); | |||
974 | ||||
975 | MadeChange = true; | |||
976 | } | |||
977 | return MadeChange; | |||
978 | } | |||
979 | ||||
980 | // Turns this: | |||
981 | // | |||
982 | // %base = ... | |||
983 | // %ptr = gep %base + 15 | |||
984 | // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) | |||
985 | // %base' = relocate(%tok, i32 4, i32 4) | |||
986 | // %ptr' = relocate(%tok, i32 4, i32 5) | |||
987 | // %val = load %ptr' | |||
988 | // | |||
989 | // into this: | |||
990 | // | |||
991 | // %base = ... | |||
992 | // %ptr = gep %base + 15 | |||
993 | // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) | |||
994 | // %base' = gc.relocate(%tok, i32 4, i32 4) | |||
995 | // %ptr' = gep %base' + 15 | |||
996 | // %val = load %ptr' | |||
997 | bool CodeGenPrepare::simplifyOffsetableRelocate(Instruction &I) { | |||
998 | bool MadeChange = false; | |||
999 | SmallVector<GCRelocateInst *, 2> AllRelocateCalls; | |||
1000 | ||||
1001 | for (auto *U : I.users()) | |||
1002 | if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U)) | |||
1003 | // Collect all the relocate calls associated with a statepoint | |||
1004 | AllRelocateCalls.push_back(Relocate); | |||
1005 | ||||
1006 | // We need atleast one base pointer relocation + one derived pointer | |||
1007 | // relocation to mangle | |||
1008 | if (AllRelocateCalls.size() < 2) | |||
1009 | return false; | |||
1010 | ||||
1011 | // RelocateInstMap is a mapping from the base relocate instruction to the | |||
1012 | // corresponding derived relocate instructions | |||
1013 | DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> RelocateInstMap; | |||
1014 | computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap); | |||
1015 | if (RelocateInstMap.empty()) | |||
1016 | return false; | |||
1017 | ||||
1018 | for (auto &Item : RelocateInstMap) | |||
1019 | // Item.first is the RelocatedBase to offset against | |||
1020 | // Item.second is the vector of Targets to replace | |||
1021 | MadeChange = simplifyRelocatesOffABase(Item.first, Item.second); | |||
1022 | return MadeChange; | |||
1023 | } | |||
1024 | ||||
1025 | /// SinkCast - Sink the specified cast instruction into its user blocks | |||
1026 | static bool SinkCast(CastInst *CI) { | |||
1027 | BasicBlock *DefBB = CI->getParent(); | |||
1028 | ||||
1029 | /// InsertedCasts - Only insert a cast in each block once. | |||
1030 | DenseMap<BasicBlock*, CastInst*> InsertedCasts; | |||
1031 | ||||
1032 | bool MadeChange = false; | |||
1033 | for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); | |||
1034 | UI != E; ) { | |||
1035 | Use &TheUse = UI.getUse(); | |||
1036 | Instruction *User = cast<Instruction>(*UI); | |||
1037 | ||||
1038 | // Figure out which BB this cast is used in. For PHI's this is the | |||
1039 | // appropriate predecessor block. | |||
1040 | BasicBlock *UserBB = User->getParent(); | |||
1041 | if (PHINode *PN = dyn_cast<PHINode>(User)) { | |||
1042 | UserBB = PN->getIncomingBlock(TheUse); | |||
1043 | } | |||
1044 | ||||
1045 | // Preincrement use iterator so we don't invalidate it. | |||
1046 | ++UI; | |||
1047 | ||||
1048 | // The first insertion point of a block containing an EH pad is after the | |||
1049 | // pad. If the pad is the user, we cannot sink the cast past the pad. | |||
1050 | if (User->isEHPad()) | |||
1051 | continue; | |||
1052 | ||||
1053 | // If the block selected to receive the cast is an EH pad that does not | |||
1054 | // allow non-PHI instructions before the terminator, we can't sink the | |||
1055 | // cast. | |||
1056 | if (UserBB->getTerminator()->isEHPad()) | |||
1057 | continue; | |||
1058 | ||||
1059 | // If this user is in the same block as the cast, don't change the cast. | |||
1060 | if (UserBB == DefBB) continue; | |||
1061 | ||||
1062 | // If we have already inserted a cast into this block, use it. | |||
1063 | CastInst *&InsertedCast = InsertedCasts[UserBB]; | |||
1064 | ||||
1065 | if (!InsertedCast) { | |||
1066 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); | |||
1067 | assert(InsertPt != UserBB->end())((InsertPt != UserBB->end()) ? static_cast<void> (0) : __assert_fail ("InsertPt != UserBB->end()", "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 1067, __PRETTY_FUNCTION__)); | |||
1068 | InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0), | |||
1069 | CI->getType(), "", &*InsertPt); | |||
1070 | InsertedCast->setDebugLoc(CI->getDebugLoc()); | |||
1071 | } | |||
1072 | ||||
1073 | // Replace a use of the cast with a use of the new cast. | |||
1074 | TheUse = InsertedCast; | |||
1075 | MadeChange = true; | |||
1076 | ++NumCastUses; | |||
1077 | } | |||
1078 | ||||
1079 | // If we removed all uses, nuke the cast. | |||
1080 | if (CI->use_empty()) { | |||
1081 | salvageDebugInfo(*CI); | |||
1082 | CI->eraseFromParent(); | |||
1083 | MadeChange = true; | |||
1084 | } | |||
1085 | ||||
1086 | return MadeChange; | |||
1087 | } | |||
1088 | ||||
1089 | /// If the specified cast instruction is a noop copy (e.g. it's casting from | |||
1090 | /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to | |||
1091 | /// reduce the number of virtual registers that must be created and coalesced. | |||
1092 | /// | |||
1093 | /// Return true if any changes are made. | |||
1094 | static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI, | |||
1095 | const DataLayout &DL) { | |||
1096 | // Sink only "cheap" (or nop) address-space casts. This is a weaker condition | |||
1097 | // than sinking only nop casts, but is helpful on some platforms. | |||
1098 | if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) { | |||
1099 | if (!TLI.isCheapAddrSpaceCast(ASC->getSrcAddressSpace(), | |||
1100 | ASC->getDestAddressSpace())) | |||
1101 | return false; | |||
1102 | } | |||
1103 | ||||
1104 | // If this is a noop copy, | |||
1105 | EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType()); | |||
1106 | EVT DstVT = TLI.getValueType(DL, CI->getType()); | |||
1107 | ||||
1108 | // This is an fp<->int conversion? | |||
1109 | if (SrcVT.isInteger() != DstVT.isInteger()) | |||
1110 | return false; | |||
1111 | ||||
1112 | // If this is an extension, it will be a zero or sign extension, which | |||
1113 | // isn't a noop. | |||
1114 | if (SrcVT.bitsLT(DstVT)) return false; | |||
1115 | ||||
1116 | // If these values will be promoted, find out what they will be promoted | |||
1117 | // to. This helps us consider truncates on PPC as noop copies when they | |||
1118 | // are. | |||
1119 | if (TLI.getTypeAction(CI->getContext(), SrcVT) == | |||
1120 | TargetLowering::TypePromoteInteger) | |||
1121 | SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); | |||
1122 | if (TLI.getTypeAction(CI->getContext(), DstVT) == | |||
1123 | TargetLowering::TypePromoteInteger) | |||
1124 | DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); | |||
1125 | ||||
1126 | // If, after promotion, these are the same types, this is a noop copy. | |||
1127 | if (SrcVT != DstVT) | |||
1128 | return false; | |||
1129 | ||||
1130 | return SinkCast(CI); | |||
1131 | } | |||
1132 | ||||
1133 | /// Try to combine CI into a call to the llvm.uadd.with.overflow intrinsic if | |||
1134 | /// possible. | |||
1135 | /// | |||
1136 | /// Return true if any changes were made. | |||
1137 | static bool CombineUAddWithOverflow(CmpInst *CI) { | |||
1138 | Value *A, *B; | |||
1139 | Instruction *AddI; | |||
1140 | if (!match(CI, | |||
1141 | m_UAddWithOverflow(m_Value(A), m_Value(B), m_Instruction(AddI)))) | |||
1142 | return false; | |||
1143 | ||||
1144 | Type *Ty = AddI->getType(); | |||
1145 | if (!isa<IntegerType>(Ty)) | |||
1146 | return false; | |||
1147 | ||||
1148 | // We don't want to move around uses of condition values this late, so we we | |||
1149 | // check if it is legal to create the call to the intrinsic in the basic | |||
1150 | // block containing the icmp: | |||
1151 | ||||
1152 | if (AddI->getParent() != CI->getParent() && !AddI->hasOneUse()) | |||
1153 | return false; | |||
1154 | ||||
1155 | #ifndef NDEBUG | |||
1156 | // Someday m_UAddWithOverflow may get smarter, but this is a safe assumption | |||
1157 | // for now: | |||
1158 | if (AddI->hasOneUse()) | |||
1159 | assert(*AddI->user_begin() == CI && "expected!")((*AddI->user_begin() == CI && "expected!") ? static_cast <void> (0) : __assert_fail ("*AddI->user_begin() == CI && \"expected!\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 1159, __PRETTY_FUNCTION__)); | |||
1160 | #endif | |||
1161 | ||||
1162 | Module *M = CI->getModule(); | |||
1163 | Value *F = Intrinsic::getDeclaration(M, Intrinsic::uadd_with_overflow, Ty); | |||
1164 | ||||
1165 | auto *InsertPt = AddI->hasOneUse() ? CI : AddI; | |||
1166 | ||||
1167 | DebugLoc Loc = CI->getDebugLoc(); | |||
1168 | auto *UAddWithOverflow = | |||
1169 | CallInst::Create(F, {A, B}, "uadd.overflow", InsertPt); | |||
1170 | UAddWithOverflow->setDebugLoc(Loc); | |||
1171 | auto *UAdd = ExtractValueInst::Create(UAddWithOverflow, 0, "uadd", InsertPt); | |||
1172 | UAdd->setDebugLoc(Loc); | |||
1173 | auto *Overflow = | |||
1174 | ExtractValueInst::Create(UAddWithOverflow, 1, "overflow", InsertPt); | |||
1175 | Overflow->setDebugLoc(Loc); | |||
1176 | ||||
1177 | CI->replaceAllUsesWith(Overflow); | |||
1178 | AddI->replaceAllUsesWith(UAdd); | |||
1179 | CI->eraseFromParent(); | |||
1180 | AddI->eraseFromParent(); | |||
1181 | return true; | |||
1182 | } | |||
1183 | ||||
1184 | /// Sink the given CmpInst into user blocks to reduce the number of virtual | |||
1185 | /// registers that must be created and coalesced. This is a clear win except on | |||
1186 | /// targets with multiple condition code registers (PowerPC), where it might | |||
1187 | /// lose; some adjustment may be wanted there. | |||
1188 | /// | |||
1189 | /// Return true if any changes are made. | |||
1190 | static bool SinkCmpExpression(CmpInst *CI, const TargetLowering *TLI) { | |||
1191 | BasicBlock *DefBB = CI->getParent(); | |||
1192 | ||||
1193 | // Avoid sinking soft-FP comparisons, since this can move them into a loop. | |||
1194 | if (TLI && TLI->useSoftFloat() && isa<FCmpInst>(CI)) | |||
1195 | return false; | |||
1196 | ||||
1197 | // Only insert a cmp in each block once. | |||
1198 | DenseMap<BasicBlock*, CmpInst*> InsertedCmps; | |||
1199 | ||||
1200 | bool MadeChange = false; | |||
1201 | for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); | |||
1202 | UI != E; ) { | |||
1203 | Use &TheUse = UI.getUse(); | |||
1204 | Instruction *User = cast<Instruction>(*UI); | |||
1205 | ||||
1206 | // Preincrement use iterator so we don't invalidate it. | |||
1207 | ++UI; | |||
1208 | ||||
1209 | // Don't bother for PHI nodes. | |||
1210 | if (isa<PHINode>(User)) | |||
1211 | continue; | |||
1212 | ||||
1213 | // Figure out which BB this cmp is used in. | |||
1214 | BasicBlock *UserBB = User->getParent(); | |||
1215 | ||||
1216 | // If this user is in the same block as the cmp, don't change the cmp. | |||
1217 | if (UserBB == DefBB) continue; | |||
1218 | ||||
1219 | // If we have already inserted a cmp into this block, use it. | |||
1220 | CmpInst *&InsertedCmp = InsertedCmps[UserBB]; | |||
1221 | ||||
1222 | if (!InsertedCmp) { | |||
1223 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); | |||
1224 | assert(InsertPt != UserBB->end())((InsertPt != UserBB->end()) ? static_cast<void> (0) : __assert_fail ("InsertPt != UserBB->end()", "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 1224, __PRETTY_FUNCTION__)); | |||
1225 | InsertedCmp = | |||
1226 | CmpInst::Create(CI->getOpcode(), CI->getPredicate(), | |||
1227 | CI->getOperand(0), CI->getOperand(1), "", &*InsertPt); | |||
1228 | // Propagate the debug info. | |||
1229 | InsertedCmp->setDebugLoc(CI->getDebugLoc()); | |||
1230 | } | |||
1231 | ||||
1232 | // Replace a use of the cmp with a use of the new cmp. | |||
1233 | TheUse = InsertedCmp; | |||
1234 | MadeChange = true; | |||
1235 | ++NumCmpUses; | |||
1236 | } | |||
1237 | ||||
1238 | // If we removed all uses, nuke the cmp. | |||
1239 | if (CI->use_empty()) { | |||
1240 | CI->eraseFromParent(); | |||
1241 | MadeChange = true; | |||
1242 | } | |||
1243 | ||||
1244 | return MadeChange; | |||
1245 | } | |||
1246 | ||||
1247 | static bool OptimizeCmpExpression(CmpInst *CI, const TargetLowering *TLI) { | |||
1248 | if (SinkCmpExpression(CI, TLI)) | |||
1249 | return true; | |||
1250 | ||||
1251 | if (CombineUAddWithOverflow(CI)) | |||
1252 | return true; | |||
1253 | ||||
1254 | return false; | |||
1255 | } | |||
1256 | ||||
1257 | /// Duplicate and sink the given 'and' instruction into user blocks where it is | |||
1258 | /// used in a compare to allow isel to generate better code for targets where | |||
1259 | /// this operation can be combined. | |||
1260 | /// | |||
1261 | /// Return true if any changes are made. | |||
1262 | static bool sinkAndCmp0Expression(Instruction *AndI, | |||
1263 | const TargetLowering &TLI, | |||
1264 | SetOfInstrs &InsertedInsts) { | |||
1265 | // Double-check that we're not trying to optimize an instruction that was | |||
1266 | // already optimized by some other part of this pass. | |||
1267 | assert(!InsertedInsts.count(AndI) &&((!InsertedInsts.count(AndI) && "Attempting to optimize already optimized and instruction" ) ? static_cast<void> (0) : __assert_fail ("!InsertedInsts.count(AndI) && \"Attempting to optimize already optimized and instruction\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 1268, __PRETTY_FUNCTION__)) | |||
1268 | "Attempting to optimize already optimized and instruction")((!InsertedInsts.count(AndI) && "Attempting to optimize already optimized and instruction" ) ? static_cast<void> (0) : __assert_fail ("!InsertedInsts.count(AndI) && \"Attempting to optimize already optimized and instruction\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 1268, __PRETTY_FUNCTION__)); | |||
1269 | (void) InsertedInsts; | |||
1270 | ||||
1271 | // Nothing to do for single use in same basic block. | |||
1272 | if (AndI->hasOneUse() && | |||
1273 | AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent()) | |||
1274 | return false; | |||
1275 | ||||
1276 | // Try to avoid cases where sinking/duplicating is likely to increase register | |||
1277 | // pressure. | |||
1278 | if (!isa<ConstantInt>(AndI->getOperand(0)) && | |||
1279 | !isa<ConstantInt>(AndI->getOperand(1)) && | |||
1280 | AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse()) | |||
1281 | return false; | |||
1282 | ||||
1283 | for (auto *U : AndI->users()) { | |||
1284 | Instruction *User = cast<Instruction>(U); | |||
1285 | ||||
1286 | // Only sink for and mask feeding icmp with 0. | |||
1287 | if (!isa<ICmpInst>(User)) | |||
1288 | return false; | |||
1289 | ||||
1290 | auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1)); | |||
1291 | if (!CmpC || !CmpC->isZero()) | |||
1292 | return false; | |||
1293 | } | |||
1294 | ||||
1295 | if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI)) | |||
1296 | return false; | |||
1297 | ||||
1298 | LLVM_DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "found 'and' feeding only icmp 0;\n" ; } } while (false); | |||
1299 | LLVM_DEBUG(AndI->getParent()->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { AndI->getParent()->dump(); } } while (false); | |||
1300 | ||||
1301 | // Push the 'and' into the same block as the icmp 0. There should only be | |||
1302 | // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any | |||
1303 | // others, so we don't need to keep track of which BBs we insert into. | |||
1304 | for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end(); | |||
1305 | UI != E; ) { | |||
1306 | Use &TheUse = UI.getUse(); | |||
1307 | Instruction *User = cast<Instruction>(*UI); | |||
1308 | ||||
1309 | // Preincrement use iterator so we don't invalidate it. | |||
1310 | ++UI; | |||
1311 | ||||
1312 | LLVM_DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "sinking 'and' use: " << *User << "\n"; } } while (false); | |||
1313 | ||||
1314 | // Keep the 'and' in the same place if the use is already in the same block. | |||
1315 | Instruction *InsertPt = | |||
1316 | User->getParent() == AndI->getParent() ? AndI : User; | |||
1317 | Instruction *InsertedAnd = | |||
1318 | BinaryOperator::Create(Instruction::And, AndI->getOperand(0), | |||
1319 | AndI->getOperand(1), "", InsertPt); | |||
1320 | // Propagate the debug info. | |||
1321 | InsertedAnd->setDebugLoc(AndI->getDebugLoc()); | |||
1322 | ||||
1323 | // Replace a use of the 'and' with a use of the new 'and'. | |||
1324 | TheUse = InsertedAnd; | |||
1325 | ++NumAndUses; | |||
1326 | LLVM_DEBUG(User->getParent()->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { User->getParent()->dump(); } } while (false); | |||
1327 | } | |||
1328 | ||||
1329 | // We removed all uses, nuke the and. | |||
1330 | AndI->eraseFromParent(); | |||
1331 | return true; | |||
1332 | } | |||
1333 | ||||
1334 | /// Check if the candidates could be combined with a shift instruction, which | |||
1335 | /// includes: | |||
1336 | /// 1. Truncate instruction | |||
1337 | /// 2. And instruction and the imm is a mask of the low bits: | |||
1338 | /// imm & (imm+1) == 0 | |||
1339 | static bool isExtractBitsCandidateUse(Instruction *User) { | |||
1340 | if (!isa<TruncInst>(User)) { | |||
1341 | if (User->getOpcode() != Instruction::And || | |||
1342 | !isa<ConstantInt>(User->getOperand(1))) | |||
1343 | return false; | |||
1344 | ||||
1345 | const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue(); | |||
1346 | ||||
1347 | if ((Cimm & (Cimm + 1)).getBoolValue()) | |||
1348 | return false; | |||
1349 | } | |||
1350 | return true; | |||
1351 | } | |||
1352 | ||||
1353 | /// Sink both shift and truncate instruction to the use of truncate's BB. | |||
1354 | static bool | |||
1355 | SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, | |||
1356 | DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts, | |||
1357 | const TargetLowering &TLI, const DataLayout &DL) { | |||
1358 | BasicBlock *UserBB = User->getParent(); | |||
1359 | DenseMap<BasicBlock *, CastInst *> InsertedTruncs; | |||
1360 | TruncInst *TruncI = dyn_cast<TruncInst>(User); | |||
1361 | bool MadeChange = false; | |||
1362 | ||||
1363 | for (Value::user_iterator TruncUI = TruncI->user_begin(), | |||
1364 | TruncE = TruncI->user_end(); | |||
1365 | TruncUI != TruncE;) { | |||
1366 | ||||
1367 | Use &TruncTheUse = TruncUI.getUse(); | |||
1368 | Instruction *TruncUser = cast<Instruction>(*TruncUI); | |||
1369 | // Preincrement use iterator so we don't invalidate it. | |||
1370 | ||||
1371 | ++TruncUI; | |||
1372 | ||||
1373 | int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode()); | |||
1374 | if (!ISDOpcode) | |||
1375 | continue; | |||
1376 | ||||
1377 | // If the use is actually a legal node, there will not be an | |||
1378 | // implicit truncate. | |||
1379 | // FIXME: always querying the result type is just an | |||
1380 | // approximation; some nodes' legality is determined by the | |||
1381 | // operand or other means. There's no good way to find out though. | |||
1382 | if (TLI.isOperationLegalOrCustom( | |||
1383 | ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true))) | |||
1384 | continue; | |||
1385 | ||||
1386 | // Don't bother for PHI nodes. | |||
1387 | if (isa<PHINode>(TruncUser)) | |||
1388 | continue; | |||
1389 | ||||
1390 | BasicBlock *TruncUserBB = TruncUser->getParent(); | |||
1391 | ||||
1392 | if (UserBB == TruncUserBB) | |||
1393 | continue; | |||
1394 | ||||
1395 | BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB]; | |||
1396 | CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB]; | |||
1397 | ||||
1398 | if (!InsertedShift && !InsertedTrunc) { | |||
1399 | BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt(); | |||
1400 | assert(InsertPt != TruncUserBB->end())((InsertPt != TruncUserBB->end()) ? static_cast<void> (0) : __assert_fail ("InsertPt != TruncUserBB->end()", "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 1400, __PRETTY_FUNCTION__)); | |||
1401 | // Sink the shift | |||
1402 | if (ShiftI->getOpcode() == Instruction::AShr) | |||
1403 | InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, | |||
1404 | "", &*InsertPt); | |||
1405 | else | |||
1406 | InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, | |||
1407 | "", &*InsertPt); | |||
1408 | InsertedShift->setDebugLoc(ShiftI->getDebugLoc()); | |||
1409 | ||||
1410 | // Sink the trunc | |||
1411 | BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt(); | |||
1412 | TruncInsertPt++; | |||
1413 | assert(TruncInsertPt != TruncUserBB->end())((TruncInsertPt != TruncUserBB->end()) ? static_cast<void > (0) : __assert_fail ("TruncInsertPt != TruncUserBB->end()" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 1413, __PRETTY_FUNCTION__)); | |||
1414 | ||||
1415 | InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift, | |||
1416 | TruncI->getType(), "", &*TruncInsertPt); | |||
1417 | InsertedTrunc->setDebugLoc(TruncI->getDebugLoc()); | |||
1418 | ||||
1419 | MadeChange = true; | |||
1420 | ||||
1421 | TruncTheUse = InsertedTrunc; | |||
1422 | } | |||
1423 | } | |||
1424 | return MadeChange; | |||
1425 | } | |||
1426 | ||||
1427 | /// Sink the shift *right* instruction into user blocks if the uses could | |||
1428 | /// potentially be combined with this shift instruction and generate BitExtract | |||
1429 | /// instruction. It will only be applied if the architecture supports BitExtract | |||
1430 | /// instruction. Here is an example: | |||
1431 | /// BB1: | |||
1432 | /// %x.extract.shift = lshr i64 %arg1, 32 | |||
1433 | /// BB2: | |||
1434 | /// %x.extract.trunc = trunc i64 %x.extract.shift to i16 | |||
1435 | /// ==> | |||
1436 | /// | |||
1437 | /// BB2: | |||
1438 | /// %x.extract.shift.1 = lshr i64 %arg1, 32 | |||
1439 | /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16 | |||
1440 | /// | |||
1441 | /// CodeGen will recognize the pattern in BB2 and generate BitExtract | |||
1442 | /// instruction. | |||
1443 | /// Return true if any changes are made. | |||
1444 | static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, | |||
1445 | const TargetLowering &TLI, | |||
1446 | const DataLayout &DL) { | |||
1447 | BasicBlock *DefBB = ShiftI->getParent(); | |||
1448 | ||||
1449 | /// Only insert instructions in each block once. | |||
1450 | DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts; | |||
1451 | ||||
1452 | bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType())); | |||
1453 | ||||
1454 | bool MadeChange = false; | |||
1455 | for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end(); | |||
1456 | UI != E;) { | |||
1457 | Use &TheUse = UI.getUse(); | |||
1458 | Instruction *User = cast<Instruction>(*UI); | |||
1459 | // Preincrement use iterator so we don't invalidate it. | |||
1460 | ++UI; | |||
1461 | ||||
1462 | // Don't bother for PHI nodes. | |||
1463 | if (isa<PHINode>(User)) | |||
1464 | continue; | |||
1465 | ||||
1466 | if (!isExtractBitsCandidateUse(User)) | |||
1467 | continue; | |||
1468 | ||||
1469 | BasicBlock *UserBB = User->getParent(); | |||
1470 | ||||
1471 | if (UserBB == DefBB) { | |||
1472 | // If the shift and truncate instruction are in the same BB. The use of | |||
1473 | // the truncate(TruncUse) may still introduce another truncate if not | |||
1474 | // legal. In this case, we would like to sink both shift and truncate | |||
1475 | // instruction to the BB of TruncUse. | |||
1476 | // for example: | |||
1477 | // BB1: | |||
1478 | // i64 shift.result = lshr i64 opnd, imm | |||
1479 | // trunc.result = trunc shift.result to i16 | |||
1480 | // | |||
1481 | // BB2: | |||
1482 | // ----> We will have an implicit truncate here if the architecture does | |||
1483 | // not have i16 compare. | |||
1484 | // cmp i16 trunc.result, opnd2 | |||
1485 | // | |||
1486 | if (isa<TruncInst>(User) && shiftIsLegal | |||
1487 | // If the type of the truncate is legal, no truncate will be | |||
1488 | // introduced in other basic blocks. | |||
1489 | && | |||
1490 | (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType())))) | |||
1491 | MadeChange = | |||
1492 | SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL); | |||
1493 | ||||
1494 | continue; | |||
1495 | } | |||
1496 | // If we have already inserted a shift into this block, use it. | |||
1497 | BinaryOperator *&InsertedShift = InsertedShifts[UserBB]; | |||
1498 | ||||
1499 | if (!InsertedShift) { | |||
1500 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); | |||
1501 | assert(InsertPt != UserBB->end())((InsertPt != UserBB->end()) ? static_cast<void> (0) : __assert_fail ("InsertPt != UserBB->end()", "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 1501, __PRETTY_FUNCTION__)); | |||
1502 | ||||
1503 | if (ShiftI->getOpcode() == Instruction::AShr) | |||
1504 | InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, | |||
1505 | "", &*InsertPt); | |||
1506 | else | |||
1507 | InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, | |||
1508 | "", &*InsertPt); | |||
1509 | InsertedShift->setDebugLoc(ShiftI->getDebugLoc()); | |||
1510 | ||||
1511 | MadeChange = true; | |||
1512 | } | |||
1513 | ||||
1514 | // Replace a use of the shift with a use of the new shift. | |||
1515 | TheUse = InsertedShift; | |||
1516 | } | |||
1517 | ||||
1518 | // If we removed all uses, nuke the shift. | |||
1519 | if (ShiftI->use_empty()) { | |||
1520 | salvageDebugInfo(*ShiftI); | |||
1521 | ShiftI->eraseFromParent(); | |||
1522 | } | |||
1523 | ||||
1524 | return MadeChange; | |||
1525 | } | |||
1526 | ||||
1527 | /// If counting leading or trailing zeros is an expensive operation and a zero | |||
1528 | /// input is defined, add a check for zero to avoid calling the intrinsic. | |||
1529 | /// | |||
1530 | /// We want to transform: | |||
1531 | /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false) | |||
1532 | /// | |||
1533 | /// into: | |||
1534 | /// entry: | |||
1535 | /// %cmpz = icmp eq i64 %A, 0 | |||
1536 | /// br i1 %cmpz, label %cond.end, label %cond.false | |||
1537 | /// cond.false: | |||
1538 | /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true) | |||
1539 | /// br label %cond.end | |||
1540 | /// cond.end: | |||
1541 | /// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ] | |||
1542 | /// | |||
1543 | /// If the transform is performed, return true and set ModifiedDT to true. | |||
1544 | static bool despeculateCountZeros(IntrinsicInst *CountZeros, | |||
1545 | const TargetLowering *TLI, | |||
1546 | const DataLayout *DL, | |||
1547 | bool &ModifiedDT) { | |||
1548 | if (!TLI || !DL) | |||
1549 | return false; | |||
1550 | ||||
1551 | // If a zero input is undefined, it doesn't make sense to despeculate that. | |||
1552 | if (match(CountZeros->getOperand(1), m_One())) | |||
1553 | return false; | |||
1554 | ||||
1555 | // If it's cheap to speculate, there's nothing to do. | |||
1556 | auto IntrinsicID = CountZeros->getIntrinsicID(); | |||
1557 | if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz()) || | |||
1558 | (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz())) | |||
1559 | return false; | |||
1560 | ||||
1561 | // Only handle legal scalar cases. Anything else requires too much work. | |||
1562 | Type *Ty = CountZeros->getType(); | |||
1563 | unsigned SizeInBits = Ty->getPrimitiveSizeInBits(); | |||
1564 | if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits()) | |||
1565 | return false; | |||
1566 | ||||
1567 | // The intrinsic will be sunk behind a compare against zero and branch. | |||
1568 | BasicBlock *StartBlock = CountZeros->getParent(); | |||
1569 | BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false"); | |||
1570 | ||||
1571 | // Create another block after the count zero intrinsic. A PHI will be added | |||
1572 | // in this block to select the result of the intrinsic or the bit-width | |||
1573 | // constant if the input to the intrinsic is zero. | |||
1574 | BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros)); | |||
1575 | BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end"); | |||
1576 | ||||
1577 | // Set up a builder to create a compare, conditional branch, and PHI. | |||
1578 | IRBuilder<> Builder(CountZeros->getContext()); | |||
1579 | Builder.SetInsertPoint(StartBlock->getTerminator()); | |||
1580 | Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc()); | |||
1581 | ||||
1582 | // Replace the unconditional branch that was created by the first split with | |||
1583 | // a compare against zero and a conditional branch. | |||
1584 | Value *Zero = Constant::getNullValue(Ty); | |||
1585 | Value *Cmp = Builder.CreateICmpEQ(CountZeros->getOperand(0), Zero, "cmpz"); | |||
1586 | Builder.CreateCondBr(Cmp, EndBlock, CallBlock); | |||
1587 | StartBlock->getTerminator()->eraseFromParent(); | |||
1588 | ||||
1589 | // Create a PHI in the end block to select either the output of the intrinsic | |||
1590 | // or the bit width of the operand. | |||
1591 | Builder.SetInsertPoint(&EndBlock->front()); | |||
1592 | PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz"); | |||
1593 | CountZeros->replaceAllUsesWith(PN); | |||
1594 | Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits)); | |||
1595 | PN->addIncoming(BitWidth, StartBlock); | |||
1596 | PN->addIncoming(CountZeros, CallBlock); | |||
1597 | ||||
1598 | // We are explicitly handling the zero case, so we can set the intrinsic's | |||
1599 | // undefined zero argument to 'true'. This will also prevent reprocessing the | |||
1600 | // intrinsic; we only despeculate when a zero input is defined. | |||
1601 | CountZeros->setArgOperand(1, Builder.getTrue()); | |||
1602 | ModifiedDT = true; | |||
1603 | return true; | |||
1604 | } | |||
1605 | ||||
1606 | bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) { | |||
1607 | BasicBlock *BB = CI->getParent(); | |||
1608 | ||||
1609 | // Lower inline assembly if we can. | |||
1610 | // If we found an inline asm expession, and if the target knows how to | |||
1611 | // lower it to normal LLVM code, do so now. | |||
1612 | if (TLI && isa<InlineAsm>(CI->getCalledValue())) { | |||
1613 | if (TLI->ExpandInlineAsm(CI)) { | |||
1614 | // Avoid invalidating the iterator. | |||
1615 | CurInstIterator = BB->begin(); | |||
1616 | // Avoid processing instructions out of order, which could cause | |||
1617 | // reuse before a value is defined. | |||
1618 | SunkAddrs.clear(); | |||
1619 | return true; | |||
1620 | } | |||
1621 | // Sink address computing for memory operands into the block. | |||
1622 | if (optimizeInlineAsmInst(CI)) | |||
1623 | return true; | |||
1624 | } | |||
1625 | ||||
1626 | // Align the pointer arguments to this call if the target thinks it's a good | |||
1627 | // idea | |||
1628 | unsigned MinSize, PrefAlign; | |||
1629 | if (TLI && TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) { | |||
1630 | for (auto &Arg : CI->arg_operands()) { | |||
1631 | // We want to align both objects whose address is used directly and | |||
1632 | // objects whose address is used in casts and GEPs, though it only makes | |||
1633 | // sense for GEPs if the offset is a multiple of the desired alignment and | |||
1634 | // if size - offset meets the size threshold. | |||
1635 | if (!Arg->getType()->isPointerTy()) | |||
1636 | continue; | |||
1637 | APInt Offset(DL->getIndexSizeInBits( | |||
1638 | cast<PointerType>(Arg->getType())->getAddressSpace()), | |||
1639 | 0); | |||
1640 | Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset); | |||
1641 | uint64_t Offset2 = Offset.getLimitedValue(); | |||
1642 | if ((Offset2 & (PrefAlign-1)) != 0) | |||
1643 | continue; | |||
1644 | AllocaInst *AI; | |||
1645 | if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlignment() < PrefAlign && | |||
1646 | DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2) | |||
1647 | AI->setAlignment(PrefAlign); | |||
1648 | // Global variables can only be aligned if they are defined in this | |||
1649 | // object (i.e. they are uniquely initialized in this object), and | |||
1650 | // over-aligning global variables that have an explicit section is | |||
1651 | // forbidden. | |||
1652 | GlobalVariable *GV; | |||
1653 | if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() && | |||
1654 | GV->getPointerAlignment(*DL) < PrefAlign && | |||
1655 | DL->getTypeAllocSize(GV->getValueType()) >= | |||
1656 | MinSize + Offset2) | |||
1657 | GV->setAlignment(PrefAlign); | |||
1658 | } | |||
1659 | // If this is a memcpy (or similar) then we may be able to improve the | |||
1660 | // alignment | |||
1661 | if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) { | |||
1662 | unsigned DestAlign = getKnownAlignment(MI->getDest(), *DL); | |||
1663 | if (DestAlign > MI->getDestAlignment()) | |||
1664 | MI->setDestAlignment(DestAlign); | |||
1665 | if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { | |||
1666 | unsigned SrcAlign = getKnownAlignment(MTI->getSource(), *DL); | |||
1667 | if (SrcAlign > MTI->getSourceAlignment()) | |||
1668 | MTI->setSourceAlignment(SrcAlign); | |||
1669 | } | |||
1670 | } | |||
1671 | } | |||
1672 | ||||
1673 | // If we have a cold call site, try to sink addressing computation into the | |||
1674 | // cold block. This interacts with our handling for loads and stores to | |||
1675 | // ensure that we can fold all uses of a potential addressing computation | |||
1676 | // into their uses. TODO: generalize this to work over profiling data | |||
1677 | if (!OptSize && CI->hasFnAttr(Attribute::Cold)) | |||
1678 | for (auto &Arg : CI->arg_operands()) { | |||
1679 | if (!Arg->getType()->isPointerTy()) | |||
1680 | continue; | |||
1681 | unsigned AS = Arg->getType()->getPointerAddressSpace(); | |||
1682 | return optimizeMemoryInst(CI, Arg, Arg->getType(), AS); | |||
1683 | } | |||
1684 | ||||
1685 | IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); | |||
1686 | if (II) { | |||
1687 | switch (II->getIntrinsicID()) { | |||
1688 | default: break; | |||
1689 | case Intrinsic::objectsize: { | |||
1690 | // Lower all uses of llvm.objectsize.* | |||
1691 | ConstantInt *RetVal = | |||
1692 | lowerObjectSizeCall(II, *DL, TLInfo, /*MustSucceed=*/true); | |||
1693 | // Substituting this can cause recursive simplifications, which can | |||
1694 | // invalidate our iterator. Use a WeakTrackingVH to hold onto it in case | |||
1695 | // this | |||
1696 | // happens. | |||
1697 | Value *CurValue = &*CurInstIterator; | |||
1698 | WeakTrackingVH IterHandle(CurValue); | |||
1699 | ||||
1700 | replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr); | |||
1701 | ||||
1702 | // If the iterator instruction was recursively deleted, start over at the | |||
1703 | // start of the block. | |||
1704 | if (IterHandle != CurValue) { | |||
1705 | CurInstIterator = BB->begin(); | |||
1706 | SunkAddrs.clear(); | |||
1707 | } | |||
1708 | return true; | |||
1709 | } | |||
1710 | case Intrinsic::aarch64_stlxr: | |||
1711 | case Intrinsic::aarch64_stxr: { | |||
1712 | ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0)); | |||
1713 | if (!ExtVal || !ExtVal->hasOneUse() || | |||
1714 | ExtVal->getParent() == CI->getParent()) | |||
1715 | return false; | |||
1716 | // Sink a zext feeding stlxr/stxr before it, so it can be folded into it. | |||
1717 | ExtVal->moveBefore(CI); | |||
1718 | // Mark this instruction as "inserted by CGP", so that other | |||
1719 | // optimizations don't touch it. | |||
1720 | InsertedInsts.insert(ExtVal); | |||
1721 | return true; | |||
1722 | } | |||
1723 | case Intrinsic::launder_invariant_group: | |||
1724 | case Intrinsic::strip_invariant_group: { | |||
1725 | Value *ArgVal = II->getArgOperand(0); | |||
1726 | auto it = LargeOffsetGEPMap.find(II); | |||
1727 | if (it != LargeOffsetGEPMap.end()) { | |||
1728 | // Merge entries in LargeOffsetGEPMap to reflect the RAUW. | |||
1729 | // Make sure not to have to deal with iterator invalidation | |||
1730 | // after possibly adding ArgVal to LargeOffsetGEPMap. | |||
1731 | auto GEPs = std::move(it->second); | |||
1732 | LargeOffsetGEPMap[ArgVal].append(GEPs.begin(), GEPs.end()); | |||
1733 | LargeOffsetGEPMap.erase(II); | |||
1734 | } | |||
1735 | ||||
1736 | II->replaceAllUsesWith(ArgVal); | |||
1737 | II->eraseFromParent(); | |||
1738 | return true; | |||
1739 | } | |||
1740 | case Intrinsic::cttz: | |||
1741 | case Intrinsic::ctlz: | |||
1742 | // If counting zeros is expensive, try to avoid it. | |||
1743 | return despeculateCountZeros(II, TLI, DL, ModifiedDT); | |||
1744 | } | |||
1745 | ||||
1746 | if (TLI) { | |||
1747 | SmallVector<Value*, 2> PtrOps; | |||
1748 | Type *AccessTy; | |||
1749 | if (TLI->getAddrModeArguments(II, PtrOps, AccessTy)) | |||
1750 | while (!PtrOps.empty()) { | |||
1751 | Value *PtrVal = PtrOps.pop_back_val(); | |||
1752 | unsigned AS = PtrVal->getType()->getPointerAddressSpace(); | |||
1753 | if (optimizeMemoryInst(II, PtrVal, AccessTy, AS)) | |||
1754 | return true; | |||
1755 | } | |||
1756 | } | |||
1757 | } | |||
1758 | ||||
1759 | // From here on out we're working with named functions. | |||
1760 | if (!CI->getCalledFunction()) return false; | |||
1761 | ||||
1762 | // Lower all default uses of _chk calls. This is very similar | |||
1763 | // to what InstCombineCalls does, but here we are only lowering calls | |||
1764 | // to fortified library functions (e.g. __memcpy_chk) that have the default | |||
1765 | // "don't know" as the objectsize. Anything else should be left alone. | |||
1766 | FortifiedLibCallSimplifier Simplifier(TLInfo, true); | |||
1767 | if (Value *V = Simplifier.optimizeCall(CI)) { | |||
1768 | CI->replaceAllUsesWith(V); | |||
1769 | CI->eraseFromParent(); | |||
1770 | return true; | |||
1771 | } | |||
1772 | ||||
1773 | return false; | |||
1774 | } | |||
1775 | ||||
1776 | /// Look for opportunities to duplicate return instructions to the predecessor | |||
1777 | /// to enable tail call optimizations. The case it is currently looking for is: | |||
1778 | /// @code | |||
1779 | /// bb0: | |||
1780 | /// %tmp0 = tail call i32 @f0() | |||
1781 | /// br label %return | |||
1782 | /// bb1: | |||
1783 | /// %tmp1 = tail call i32 @f1() | |||
1784 | /// br label %return | |||
1785 | /// bb2: | |||
1786 | /// %tmp2 = tail call i32 @f2() | |||
1787 | /// br label %return | |||
1788 | /// return: | |||
1789 | /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ] | |||
1790 | /// ret i32 %retval | |||
1791 | /// @endcode | |||
1792 | /// | |||
1793 | /// => | |||
1794 | /// | |||
1795 | /// @code | |||
1796 | /// bb0: | |||
1797 | /// %tmp0 = tail call i32 @f0() | |||
1798 | /// ret i32 %tmp0 | |||
1799 | /// bb1: | |||
1800 | /// %tmp1 = tail call i32 @f1() | |||
1801 | /// ret i32 %tmp1 | |||
1802 | /// bb2: | |||
1803 | /// %tmp2 = tail call i32 @f2() | |||
1804 | /// ret i32 %tmp2 | |||
1805 | /// @endcode | |||
1806 | bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB) { | |||
1807 | if (!TLI) | |||
1808 | return false; | |||
1809 | ||||
1810 | ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator()); | |||
1811 | if (!RetI) | |||
1812 | return false; | |||
1813 | ||||
1814 | PHINode *PN = nullptr; | |||
1815 | BitCastInst *BCI = nullptr; | |||
1816 | Value *V = RetI->getReturnValue(); | |||
1817 | if (V) { | |||
1818 | BCI = dyn_cast<BitCastInst>(V); | |||
1819 | if (BCI) | |||
1820 | V = BCI->getOperand(0); | |||
1821 | ||||
1822 | PN = dyn_cast<PHINode>(V); | |||
1823 | if (!PN) | |||
1824 | return false; | |||
1825 | } | |||
1826 | ||||
1827 | if (PN && PN->getParent() != BB) | |||
1828 | return false; | |||
1829 | ||||
1830 | // Make sure there are no instructions between the PHI and return, or that the | |||
1831 | // return is the first instruction in the block. | |||
1832 | if (PN) { | |||
1833 | BasicBlock::iterator BI = BB->begin(); | |||
1834 | do { ++BI; } while (isa<DbgInfoIntrinsic>(BI)); | |||
1835 | if (&*BI == BCI) | |||
1836 | // Also skip over the bitcast. | |||
1837 | ++BI; | |||
1838 | if (&*BI != RetI) | |||
1839 | return false; | |||
1840 | } else { | |||
1841 | BasicBlock::iterator BI = BB->begin(); | |||
1842 | while (isa<DbgInfoIntrinsic>(BI)) ++BI; | |||
1843 | if (&*BI != RetI) | |||
1844 | return false; | |||
1845 | } | |||
1846 | ||||
1847 | /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail | |||
1848 | /// call. | |||
1849 | const Function *F = BB->getParent(); | |||
1850 | SmallVector<CallInst*, 4> TailCalls; | |||
1851 | if (PN) { | |||
1852 | for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) { | |||
1853 | CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I)); | |||
1854 | // Make sure the phi value is indeed produced by the tail call. | |||
1855 | if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) && | |||
1856 | TLI->mayBeEmittedAsTailCall(CI) && | |||
1857 | attributesPermitTailCall(F, CI, RetI, *TLI)) | |||
1858 | TailCalls.push_back(CI); | |||
1859 | } | |||
1860 | } else { | |||
1861 | SmallPtrSet<BasicBlock*, 4> VisitedBBs; | |||
1862 | for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) { | |||
1863 | if (!VisitedBBs.insert(*PI).second) | |||
1864 | continue; | |||
1865 | ||||
1866 | BasicBlock::InstListType &InstList = (*PI)->getInstList(); | |||
1867 | BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin(); | |||
1868 | BasicBlock::InstListType::reverse_iterator RE = InstList.rend(); | |||
1869 | do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI)); | |||
1870 | if (RI == RE) | |||
1871 | continue; | |||
1872 | ||||
1873 | CallInst *CI = dyn_cast<CallInst>(&*RI); | |||
1874 | if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) && | |||
1875 | attributesPermitTailCall(F, CI, RetI, *TLI)) | |||
1876 | TailCalls.push_back(CI); | |||
1877 | } | |||
1878 | } | |||
1879 | ||||
1880 | bool Changed = false; | |||
1881 | for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) { | |||
1882 | CallInst *CI = TailCalls[i]; | |||
1883 | CallSite CS(CI); | |||
1884 | ||||
1885 | // Make sure the call instruction is followed by an unconditional branch to | |||
1886 | // the return block. | |||
1887 | BasicBlock *CallBB = CI->getParent(); | |||
1888 | BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator()); | |||
1889 | if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB) | |||
1890 | continue; | |||
1891 | ||||
1892 | // Duplicate the return into CallBB. | |||
1893 | (void)FoldReturnIntoUncondBranch(RetI, BB, CallBB); | |||
1894 | ModifiedDT = Changed = true; | |||
1895 | ++NumRetsDup; | |||
1896 | } | |||
1897 | ||||
1898 | // If we eliminated all predecessors of the block, delete the block now. | |||
1899 | if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB)) | |||
1900 | BB->eraseFromParent(); | |||
1901 | ||||
1902 | return Changed; | |||
1903 | } | |||
1904 | ||||
1905 | //===----------------------------------------------------------------------===// | |||
1906 | // Memory Optimization | |||
1907 | //===----------------------------------------------------------------------===// | |||
1908 | ||||
1909 | namespace { | |||
1910 | ||||
1911 | /// This is an extended version of TargetLowering::AddrMode | |||
1912 | /// which holds actual Value*'s for register values. | |||
1913 | struct ExtAddrMode : public TargetLowering::AddrMode { | |||
1914 | Value *BaseReg = nullptr; | |||
1915 | Value *ScaledReg = nullptr; | |||
1916 | Value *OriginalValue = nullptr; | |||
1917 | ||||
1918 | enum FieldName { | |||
1919 | NoField = 0x00, | |||
1920 | BaseRegField = 0x01, | |||
1921 | BaseGVField = 0x02, | |||
1922 | BaseOffsField = 0x04, | |||
1923 | ScaledRegField = 0x08, | |||
1924 | ScaleField = 0x10, | |||
1925 | MultipleFields = 0xff | |||
1926 | }; | |||
1927 | ||||
1928 | ExtAddrMode() = default; | |||
1929 | ||||
1930 | void print(raw_ostream &OS) const; | |||
1931 | void dump() const; | |||
1932 | ||||
1933 | FieldName compare(const ExtAddrMode &other) { | |||
1934 | // First check that the types are the same on each field, as differing types | |||
1935 | // is something we can't cope with later on. | |||
1936 | if (BaseReg && other.BaseReg && | |||
1937 | BaseReg->getType() != other.BaseReg->getType()) | |||
1938 | return MultipleFields; | |||
1939 | if (BaseGV && other.BaseGV && | |||
1940 | BaseGV->getType() != other.BaseGV->getType()) | |||
1941 | return MultipleFields; | |||
1942 | if (ScaledReg && other.ScaledReg && | |||
1943 | ScaledReg->getType() != other.ScaledReg->getType()) | |||
1944 | return MultipleFields; | |||
1945 | ||||
1946 | // Check each field to see if it differs. | |||
1947 | unsigned Result = NoField; | |||
1948 | if (BaseReg != other.BaseReg) | |||
1949 | Result |= BaseRegField; | |||
1950 | if (BaseGV != other.BaseGV) | |||
1951 | Result |= BaseGVField; | |||
1952 | if (BaseOffs != other.BaseOffs) | |||
1953 | Result |= BaseOffsField; | |||
1954 | if (ScaledReg != other.ScaledReg) | |||
1955 | Result |= ScaledRegField; | |||
1956 | // Don't count 0 as being a different scale, because that actually means | |||
1957 | // unscaled (which will already be counted by having no ScaledReg). | |||
1958 | if (Scale && other.Scale && Scale != other.Scale) | |||
1959 | Result |= ScaleField; | |||
1960 | ||||
1961 | if (countPopulation(Result) > 1) | |||
1962 | return MultipleFields; | |||
1963 | else | |||
1964 | return static_cast<FieldName>(Result); | |||
1965 | } | |||
1966 | ||||
1967 | // An AddrMode is trivial if it involves no calculation i.e. it is just a base | |||
1968 | // with no offset. | |||
1969 | bool isTrivial() { | |||
1970 | // An AddrMode is (BaseGV + BaseReg + BaseOffs + ScaleReg * Scale) so it is | |||
1971 | // trivial if at most one of these terms is nonzero, except that BaseGV and | |||
1972 | // BaseReg both being zero actually means a null pointer value, which we | |||
1973 | // consider to be 'non-zero' here. | |||
1974 | return !BaseOffs && !Scale && !(BaseGV && BaseReg); | |||
1975 | } | |||
1976 | ||||
1977 | Value *GetFieldAsValue(FieldName Field, Type *IntPtrTy) { | |||
1978 | switch (Field) { | |||
1979 | default: | |||
1980 | return nullptr; | |||
1981 | case BaseRegField: | |||
1982 | return BaseReg; | |||
1983 | case BaseGVField: | |||
1984 | return BaseGV; | |||
1985 | case ScaledRegField: | |||
1986 | return ScaledReg; | |||
1987 | case BaseOffsField: | |||
1988 | return ConstantInt::get(IntPtrTy, BaseOffs); | |||
1989 | } | |||
1990 | } | |||
1991 | ||||
1992 | void SetCombinedField(FieldName Field, Value *V, | |||
1993 | const SmallVectorImpl<ExtAddrMode> &AddrModes) { | |||
1994 | switch (Field) { | |||
1995 | default: | |||
1996 | llvm_unreachable("Unhandled fields are expected to be rejected earlier")::llvm::llvm_unreachable_internal("Unhandled fields are expected to be rejected earlier" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 1996); | |||
1997 | break; | |||
1998 | case ExtAddrMode::BaseRegField: | |||
1999 | BaseReg = V; | |||
2000 | break; | |||
2001 | case ExtAddrMode::BaseGVField: | |||
2002 | // A combined BaseGV is an Instruction, not a GlobalValue, so it goes | |||
2003 | // in the BaseReg field. | |||
2004 | assert(BaseReg == nullptr)((BaseReg == nullptr) ? static_cast<void> (0) : __assert_fail ("BaseReg == nullptr", "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 2004, __PRETTY_FUNCTION__)); | |||
2005 | BaseReg = V; | |||
2006 | BaseGV = nullptr; | |||
2007 | break; | |||
2008 | case ExtAddrMode::ScaledRegField: | |||
2009 | ScaledReg = V; | |||
2010 | // If we have a mix of scaled and unscaled addrmodes then we want scale | |||
2011 | // to be the scale and not zero. | |||
2012 | if (!Scale) | |||
2013 | for (const ExtAddrMode &AM : AddrModes) | |||
2014 | if (AM.Scale) { | |||
2015 | Scale = AM.Scale; | |||
2016 | break; | |||
2017 | } | |||
2018 | break; | |||
2019 | case ExtAddrMode::BaseOffsField: | |||
2020 | // The offset is no longer a constant, so it goes in ScaledReg with a | |||
2021 | // scale of 1. | |||
2022 | assert(ScaledReg == nullptr)((ScaledReg == nullptr) ? static_cast<void> (0) : __assert_fail ("ScaledReg == nullptr", "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 2022, __PRETTY_FUNCTION__)); | |||
2023 | ScaledReg = V; | |||
2024 | Scale = 1; | |||
2025 | BaseOffs = 0; | |||
2026 | break; | |||
2027 | } | |||
2028 | } | |||
2029 | }; | |||
2030 | ||||
2031 | } // end anonymous namespace | |||
2032 | ||||
2033 | #ifndef NDEBUG | |||
2034 | static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) { | |||
2035 | AM.print(OS); | |||
2036 | return OS; | |||
2037 | } | |||
2038 | #endif | |||
2039 | ||||
2040 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) | |||
2041 | void ExtAddrMode::print(raw_ostream &OS) const { | |||
2042 | bool NeedPlus = false; | |||
2043 | OS << "["; | |||
2044 | if (BaseGV) { | |||
2045 | OS << (NeedPlus ? " + " : "") | |||
2046 | << "GV:"; | |||
2047 | BaseGV->printAsOperand(OS, /*PrintType=*/false); | |||
2048 | NeedPlus = true; | |||
2049 | } | |||
2050 | ||||
2051 | if (BaseOffs) { | |||
2052 | OS << (NeedPlus ? " + " : "") | |||
2053 | << BaseOffs; | |||
2054 | NeedPlus = true; | |||
2055 | } | |||
2056 | ||||
2057 | if (BaseReg) { | |||
2058 | OS << (NeedPlus ? " + " : "") | |||
2059 | << "Base:"; | |||
2060 | BaseReg->printAsOperand(OS, /*PrintType=*/false); | |||
2061 | NeedPlus = true; | |||
2062 | } | |||
2063 | if (Scale) { | |||
2064 | OS << (NeedPlus ? " + " : "") | |||
2065 | << Scale << "*"; | |||
2066 | ScaledReg->printAsOperand(OS, /*PrintType=*/false); | |||
2067 | } | |||
2068 | ||||
2069 | OS << ']'; | |||
2070 | } | |||
2071 | ||||
2072 | LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void ExtAddrMode::dump() const { | |||
2073 | print(dbgs()); | |||
2074 | dbgs() << '\n'; | |||
2075 | } | |||
2076 | #endif | |||
2077 | ||||
2078 | namespace { | |||
2079 | ||||
2080 | /// This class provides transaction based operation on the IR. | |||
2081 | /// Every change made through this class is recorded in the internal state and | |||
2082 | /// can be undone (rollback) until commit is called. | |||
2083 | class TypePromotionTransaction { | |||
2084 | /// This represents the common interface of the individual transaction. | |||
2085 | /// Each class implements the logic for doing one specific modification on | |||
2086 | /// the IR via the TypePromotionTransaction. | |||
2087 | class TypePromotionAction { | |||
2088 | protected: | |||
2089 | /// The Instruction modified. | |||
2090 | Instruction *Inst; | |||
2091 | ||||
2092 | public: | |||
2093 | /// Constructor of the action. | |||
2094 | /// The constructor performs the related action on the IR. | |||
2095 | TypePromotionAction(Instruction *Inst) : Inst(Inst) {} | |||
2096 | ||||
2097 | virtual ~TypePromotionAction() = default; | |||
2098 | ||||
2099 | /// Undo the modification done by this action. | |||
2100 | /// When this method is called, the IR must be in the same state as it was | |||
2101 | /// before this action was applied. | |||
2102 | /// \pre Undoing the action works if and only if the IR is in the exact same | |||
2103 | /// state as it was directly after this action was applied. | |||
2104 | virtual void undo() = 0; | |||
2105 | ||||
2106 | /// Advocate every change made by this action. | |||
2107 | /// When the results on the IR of the action are to be kept, it is important | |||
2108 | /// to call this function, otherwise hidden information may be kept forever. | |||
2109 | virtual void commit() { | |||
2110 | // Nothing to be done, this action is not doing anything. | |||
2111 | } | |||
2112 | }; | |||
2113 | ||||
2114 | /// Utility to remember the position of an instruction. | |||
2115 | class InsertionHandler { | |||
2116 | /// Position of an instruction. | |||
2117 | /// Either an instruction: | |||
2118 | /// - Is the first in a basic block: BB is used. | |||
2119 | /// - Has a previous instruction: PrevInst is used. | |||
2120 | union { | |||
2121 | Instruction *PrevInst; | |||
2122 | BasicBlock *BB; | |||
2123 | } Point; | |||
2124 | ||||
2125 | /// Remember whether or not the instruction had a previous instruction. | |||
2126 | bool HasPrevInstruction; | |||
2127 | ||||
2128 | public: | |||
2129 | /// Record the position of \p Inst. | |||
2130 | InsertionHandler(Instruction *Inst) { | |||
2131 | BasicBlock::iterator It = Inst->getIterator(); | |||
2132 | HasPrevInstruction = (It != (Inst->getParent()->begin())); | |||
2133 | if (HasPrevInstruction) | |||
2134 | Point.PrevInst = &*--It; | |||
2135 | else | |||
2136 | Point.BB = Inst->getParent(); | |||
2137 | } | |||
2138 | ||||
2139 | /// Insert \p Inst at the recorded position. | |||
2140 | void insert(Instruction *Inst) { | |||
2141 | if (HasPrevInstruction) { | |||
2142 | if (Inst->getParent()) | |||
2143 | Inst->removeFromParent(); | |||
2144 | Inst->insertAfter(Point.PrevInst); | |||
2145 | } else { | |||
2146 | Instruction *Position = &*Point.BB->getFirstInsertionPt(); | |||
2147 | if (Inst->getParent()) | |||
2148 | Inst->moveBefore(Position); | |||
2149 | else | |||
2150 | Inst->insertBefore(Position); | |||
2151 | } | |||
2152 | } | |||
2153 | }; | |||
2154 | ||||
2155 | /// Move an instruction before another. | |||
2156 | class InstructionMoveBefore : public TypePromotionAction { | |||
2157 | /// Original position of the instruction. | |||
2158 | InsertionHandler Position; | |||
2159 | ||||
2160 | public: | |||
2161 | /// Move \p Inst before \p Before. | |||
2162 | InstructionMoveBefore(Instruction *Inst, Instruction *Before) | |||
2163 | : TypePromotionAction(Inst), Position(Inst) { | |||
2164 | LLVM_DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Beforedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: move: " << * Inst << "\nbefore: " << *Before << "\n"; } } while (false) | |||
2165 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: move: " << * Inst << "\nbefore: " << *Before << "\n"; } } while (false); | |||
2166 | Inst->moveBefore(Before); | |||
2167 | } | |||
2168 | ||||
2169 | /// Move the instruction back to its original position. | |||
2170 | void undo() override { | |||
2171 | LLVM_DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: moveBefore: " << *Inst << "\n"; } } while (false); | |||
2172 | Position.insert(Inst); | |||
2173 | } | |||
2174 | }; | |||
2175 | ||||
2176 | /// Set the operand of an instruction with a new value. | |||
2177 | class OperandSetter : public TypePromotionAction { | |||
2178 | /// Original operand of the instruction. | |||
2179 | Value *Origin; | |||
2180 | ||||
2181 | /// Index of the modified instruction. | |||
2182 | unsigned Idx; | |||
2183 | ||||
2184 | public: | |||
2185 | /// Set \p Idx operand of \p Inst with \p NewVal. | |||
2186 | OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal) | |||
2187 | : TypePromotionAction(Inst), Idx(Idx) { | |||
2188 | LLVM_DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: setOperand: " << Idx << "\n" << "for:" << *Inst << "\n" << "with:" << *NewVal << "\n"; } } while ( false) | |||
2189 | << "for:" << *Inst << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: setOperand: " << Idx << "\n" << "for:" << *Inst << "\n" << "with:" << *NewVal << "\n"; } } while ( false) | |||
2190 | << "with:" << *NewVal << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: setOperand: " << Idx << "\n" << "for:" << *Inst << "\n" << "with:" << *NewVal << "\n"; } } while ( false); | |||
2191 | Origin = Inst->getOperand(Idx); | |||
2192 | Inst->setOperand(Idx, NewVal); | |||
2193 | } | |||
2194 | ||||
2195 | /// Restore the original value of the instruction. | |||
2196 | void undo() override { | |||
2197 | LLVM_DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: setOperand:" << Idx << "\n" << "for: " << *Inst << "\n" << "with: " << *Origin << "\n"; } } while ( false) | |||
2198 | << "for: " << *Inst << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: setOperand:" << Idx << "\n" << "for: " << *Inst << "\n" << "with: " << *Origin << "\n"; } } while ( false) | |||
2199 | << "with: " << *Origin << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: setOperand:" << Idx << "\n" << "for: " << *Inst << "\n" << "with: " << *Origin << "\n"; } } while ( false); | |||
2200 | Inst->setOperand(Idx, Origin); | |||
2201 | } | |||
2202 | }; | |||
2203 | ||||
2204 | /// Hide the operands of an instruction. | |||
2205 | /// Do as if this instruction was not using any of its operands. | |||
2206 | class OperandsHider : public TypePromotionAction { | |||
2207 | /// The list of original operands. | |||
2208 | SmallVector<Value *, 4> OriginalValues; | |||
2209 | ||||
2210 | public: | |||
2211 | /// Remove \p Inst from the uses of the operands of \p Inst. | |||
2212 | OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) { | |||
2213 | LLVM_DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: OperandsHider: " << *Inst << "\n"; } } while (false); | |||
2214 | unsigned NumOpnds = Inst->getNumOperands(); | |||
2215 | OriginalValues.reserve(NumOpnds); | |||
2216 | for (unsigned It = 0; It < NumOpnds; ++It) { | |||
2217 | // Save the current operand. | |||
2218 | Value *Val = Inst->getOperand(It); | |||
2219 | OriginalValues.push_back(Val); | |||
2220 | // Set a dummy one. | |||
2221 | // We could use OperandSetter here, but that would imply an overhead | |||
2222 | // that we are not willing to pay. | |||
2223 | Inst->setOperand(It, UndefValue::get(Val->getType())); | |||
2224 | } | |||
2225 | } | |||
2226 | ||||
2227 | /// Restore the original list of uses. | |||
2228 | void undo() override { | |||
2229 | LLVM_DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: OperandsHider: " << *Inst << "\n"; } } while (false); | |||
2230 | for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It) | |||
2231 | Inst->setOperand(It, OriginalValues[It]); | |||
2232 | } | |||
2233 | }; | |||
2234 | ||||
2235 | /// Build a truncate instruction. | |||
2236 | class TruncBuilder : public TypePromotionAction { | |||
2237 | Value *Val; | |||
2238 | ||||
2239 | public: | |||
2240 | /// Build a truncate instruction of \p Opnd producing a \p Ty | |||
2241 | /// result. | |||
2242 | /// trunc Opnd to Ty. | |||
2243 | TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) { | |||
2244 | IRBuilder<> Builder(Opnd); | |||
2245 | Val = Builder.CreateTrunc(Opnd, Ty, "promoted"); | |||
2246 | LLVM_DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: TruncBuilder: " << *Val << "\n"; } } while (false); | |||
2247 | } | |||
2248 | ||||
2249 | /// Get the built value. | |||
2250 | Value *getBuiltValue() { return Val; } | |||
2251 | ||||
2252 | /// Remove the built instruction. | |||
2253 | void undo() override { | |||
2254 | LLVM_DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: TruncBuilder: " << *Val << "\n"; } } while (false); | |||
2255 | if (Instruction *IVal = dyn_cast<Instruction>(Val)) | |||
2256 | IVal->eraseFromParent(); | |||
2257 | } | |||
2258 | }; | |||
2259 | ||||
2260 | /// Build a sign extension instruction. | |||
2261 | class SExtBuilder : public TypePromotionAction { | |||
2262 | Value *Val; | |||
2263 | ||||
2264 | public: | |||
2265 | /// Build a sign extension instruction of \p Opnd producing a \p Ty | |||
2266 | /// result. | |||
2267 | /// sext Opnd to Ty. | |||
2268 | SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) | |||
2269 | : TypePromotionAction(InsertPt) { | |||
2270 | IRBuilder<> Builder(InsertPt); | |||
2271 | Val = Builder.CreateSExt(Opnd, Ty, "promoted"); | |||
2272 | LLVM_DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: SExtBuilder: " << *Val << "\n"; } } while (false); | |||
2273 | } | |||
2274 | ||||
2275 | /// Get the built value. | |||
2276 | Value *getBuiltValue() { return Val; } | |||
2277 | ||||
2278 | /// Remove the built instruction. | |||
2279 | void undo() override { | |||
2280 | LLVM_DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: SExtBuilder: " << *Val << "\n"; } } while (false); | |||
2281 | if (Instruction *IVal = dyn_cast<Instruction>(Val)) | |||
2282 | IVal->eraseFromParent(); | |||
2283 | } | |||
2284 | }; | |||
2285 | ||||
2286 | /// Build a zero extension instruction. | |||
2287 | class ZExtBuilder : public TypePromotionAction { | |||
2288 | Value *Val; | |||
2289 | ||||
2290 | public: | |||
2291 | /// Build a zero extension instruction of \p Opnd producing a \p Ty | |||
2292 | /// result. | |||
2293 | /// zext Opnd to Ty. | |||
2294 | ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) | |||
2295 | : TypePromotionAction(InsertPt) { | |||
2296 | IRBuilder<> Builder(InsertPt); | |||
2297 | Val = Builder.CreateZExt(Opnd, Ty, "promoted"); | |||
2298 | LLVM_DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: ZExtBuilder: " << *Val << "\n"; } } while (false); | |||
2299 | } | |||
2300 | ||||
2301 | /// Get the built value. | |||
2302 | Value *getBuiltValue() { return Val; } | |||
2303 | ||||
2304 | /// Remove the built instruction. | |||
2305 | void undo() override { | |||
2306 | LLVM_DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: ZExtBuilder: " << *Val << "\n"; } } while (false); | |||
2307 | if (Instruction *IVal = dyn_cast<Instruction>(Val)) | |||
2308 | IVal->eraseFromParent(); | |||
2309 | } | |||
2310 | }; | |||
2311 | ||||
2312 | /// Mutate an instruction to another type. | |||
2313 | class TypeMutator : public TypePromotionAction { | |||
2314 | /// Record the original type. | |||
2315 | Type *OrigTy; | |||
2316 | ||||
2317 | public: | |||
2318 | /// Mutate the type of \p Inst into \p NewTy. | |||
2319 | TypeMutator(Instruction *Inst, Type *NewTy) | |||
2320 | : TypePromotionAction(Inst), OrigTy(Inst->getType()) { | |||
2321 | LLVM_DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTydo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy << "\n"; } } while (false) | |||
2322 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy << "\n"; } } while (false); | |||
2323 | Inst->mutateType(NewTy); | |||
2324 | } | |||
2325 | ||||
2326 | /// Mutate the instruction back to its original type. | |||
2327 | void undo() override { | |||
2328 | LLVM_DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTydo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy << "\n"; } } while (false) | |||
2329 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy << "\n"; } } while (false); | |||
2330 | Inst->mutateType(OrigTy); | |||
2331 | } | |||
2332 | }; | |||
2333 | ||||
2334 | /// Replace the uses of an instruction by another instruction. | |||
2335 | class UsesReplacer : public TypePromotionAction { | |||
2336 | /// Helper structure to keep track of the replaced uses. | |||
2337 | struct InstructionAndIdx { | |||
2338 | /// The instruction using the instruction. | |||
2339 | Instruction *Inst; | |||
2340 | ||||
2341 | /// The index where this instruction is used for Inst. | |||
2342 | unsigned Idx; | |||
2343 | ||||
2344 | InstructionAndIdx(Instruction *Inst, unsigned Idx) | |||
2345 | : Inst(Inst), Idx(Idx) {} | |||
2346 | }; | |||
2347 | ||||
2348 | /// Keep track of the original uses (pair Instruction, Index). | |||
2349 | SmallVector<InstructionAndIdx, 4> OriginalUses; | |||
2350 | ||||
2351 | using use_iterator = SmallVectorImpl<InstructionAndIdx>::iterator; | |||
2352 | ||||
2353 | public: | |||
2354 | /// Replace all the use of \p Inst by \p New. | |||
2355 | UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) { | |||
2356 | LLVM_DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *Newdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New << "\n"; } } while (false) | |||
2357 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New << "\n"; } } while (false); | |||
2358 | // Record the original uses. | |||
2359 | for (Use &U : Inst->uses()) { | |||
2360 | Instruction *UserI = cast<Instruction>(U.getUser()); | |||
2361 | OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo())); | |||
2362 | } | |||
2363 | // Now, we can replace the uses. | |||
2364 | Inst->replaceAllUsesWith(New); | |||
2365 | } | |||
2366 | ||||
2367 | /// Reassign the original uses of Inst to Inst. | |||
2368 | void undo() override { | |||
2369 | LLVM_DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: UsersReplacer: " << *Inst << "\n"; } } while (false); | |||
2370 | for (use_iterator UseIt = OriginalUses.begin(), | |||
2371 | EndIt = OriginalUses.end(); | |||
2372 | UseIt != EndIt; ++UseIt) { | |||
2373 | UseIt->Inst->setOperand(UseIt->Idx, Inst); | |||
2374 | } | |||
2375 | } | |||
2376 | }; | |||
2377 | ||||
2378 | /// Remove an instruction from the IR. | |||
2379 | class InstructionRemover : public TypePromotionAction { | |||
2380 | /// Original position of the instruction. | |||
2381 | InsertionHandler Inserter; | |||
2382 | ||||
2383 | /// Helper structure to hide all the link to the instruction. In other | |||
2384 | /// words, this helps to do as if the instruction was removed. | |||
2385 | OperandsHider Hider; | |||
2386 | ||||
2387 | /// Keep track of the uses replaced, if any. | |||
2388 | UsesReplacer *Replacer = nullptr; | |||
2389 | ||||
2390 | /// Keep track of instructions removed. | |||
2391 | SetOfInstrs &RemovedInsts; | |||
2392 | ||||
2393 | public: | |||
2394 | /// Remove all reference of \p Inst and optionally replace all its | |||
2395 | /// uses with New. | |||
2396 | /// \p RemovedInsts Keep track of the instructions removed by this Action. | |||
2397 | /// \pre If !Inst->use_empty(), then New != nullptr | |||
2398 | InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts, | |||
2399 | Value *New = nullptr) | |||
2400 | : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst), | |||
2401 | RemovedInsts(RemovedInsts) { | |||
2402 | if (New) | |||
2403 | Replacer = new UsesReplacer(Inst, New); | |||
2404 | LLVM_DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: InstructionRemover: " << *Inst << "\n"; } } while (false); | |||
2405 | RemovedInsts.insert(Inst); | |||
2406 | /// The instructions removed here will be freed after completing | |||
2407 | /// optimizeBlock() for all blocks as we need to keep track of the | |||
2408 | /// removed instructions during promotion. | |||
2409 | Inst->removeFromParent(); | |||
2410 | } | |||
2411 | ||||
2412 | ~InstructionRemover() override { delete Replacer; } | |||
2413 | ||||
2414 | /// Resurrect the instruction and reassign it to the proper uses if | |||
2415 | /// new value was provided when build this action. | |||
2416 | void undo() override { | |||
2417 | LLVM_DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: InstructionRemover: " << *Inst << "\n"; } } while (false); | |||
2418 | Inserter.insert(Inst); | |||
2419 | if (Replacer) | |||
2420 | Replacer->undo(); | |||
2421 | Hider.undo(); | |||
2422 | RemovedInsts.erase(Inst); | |||
2423 | } | |||
2424 | }; | |||
2425 | ||||
2426 | public: | |||
2427 | /// Restoration point. | |||
2428 | /// The restoration point is a pointer to an action instead of an iterator | |||
2429 | /// because the iterator may be invalidated but not the pointer. | |||
2430 | using ConstRestorationPt = const TypePromotionAction *; | |||
2431 | ||||
2432 | TypePromotionTransaction(SetOfInstrs &RemovedInsts) | |||
2433 | : RemovedInsts(RemovedInsts) {} | |||
2434 | ||||
2435 | /// Advocate every changes made in that transaction. | |||
2436 | void commit(); | |||
2437 | ||||
2438 | /// Undo all the changes made after the given point. | |||
2439 | void rollback(ConstRestorationPt Point); | |||
2440 | ||||
2441 | /// Get the current restoration point. | |||
2442 | ConstRestorationPt getRestorationPoint() const; | |||
2443 | ||||
2444 | /// \name API for IR modification with state keeping to support rollback. | |||
2445 | /// @{ | |||
2446 | /// Same as Instruction::setOperand. | |||
2447 | void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal); | |||
2448 | ||||
2449 | /// Same as Instruction::eraseFromParent. | |||
2450 | void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr); | |||
2451 | ||||
2452 | /// Same as Value::replaceAllUsesWith. | |||
2453 | void replaceAllUsesWith(Instruction *Inst, Value *New); | |||
2454 | ||||
2455 | /// Same as Value::mutateType. | |||
2456 | void mutateType(Instruction *Inst, Type *NewTy); | |||
2457 | ||||
2458 | /// Same as IRBuilder::createTrunc. | |||
2459 | Value *createTrunc(Instruction *Opnd, Type *Ty); | |||
2460 | ||||
2461 | /// Same as IRBuilder::createSExt. | |||
2462 | Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty); | |||
2463 | ||||
2464 | /// Same as IRBuilder::createZExt. | |||
2465 | Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty); | |||
2466 | ||||
2467 | /// Same as Instruction::moveBefore. | |||
2468 | void moveBefore(Instruction *Inst, Instruction *Before); | |||
2469 | /// @} | |||
2470 | ||||
2471 | private: | |||
2472 | /// The ordered list of actions made so far. | |||
2473 | SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions; | |||
2474 | ||||
2475 | using CommitPt = SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator; | |||
2476 | ||||
2477 | SetOfInstrs &RemovedInsts; | |||
2478 | }; | |||
2479 | ||||
2480 | } // end anonymous namespace | |||
2481 | ||||
2482 | void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx, | |||
2483 | Value *NewVal) { | |||
2484 | Actions.push_back(llvm::make_unique<TypePromotionTransaction::OperandSetter>( | |||
2485 | Inst, Idx, NewVal)); | |||
2486 | } | |||
2487 | ||||
2488 | void TypePromotionTransaction::eraseInstruction(Instruction *Inst, | |||
2489 | Value *NewVal) { | |||
2490 | Actions.push_back( | |||
2491 | llvm::make_unique<TypePromotionTransaction::InstructionRemover>( | |||
2492 | Inst, RemovedInsts, NewVal)); | |||
2493 | } | |||
2494 | ||||
2495 | void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst, | |||
2496 | Value *New) { | |||
2497 | Actions.push_back( | |||
2498 | llvm::make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New)); | |||
2499 | } | |||
2500 | ||||
2501 | void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) { | |||
2502 | Actions.push_back( | |||
2503 | llvm::make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy)); | |||
2504 | } | |||
2505 | ||||
2506 | Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, | |||
2507 | Type *Ty) { | |||
2508 | std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty)); | |||
2509 | Value *Val = Ptr->getBuiltValue(); | |||
2510 | Actions.push_back(std::move(Ptr)); | |||
2511 | return Val; | |||
2512 | } | |||
2513 | ||||
2514 | Value *TypePromotionTransaction::createSExt(Instruction *Inst, | |||
2515 | Value *Opnd, Type *Ty) { | |||
2516 | std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty)); | |||
2517 | Value *Val = Ptr->getBuiltValue(); | |||
2518 | Actions.push_back(std::move(Ptr)); | |||
2519 | return Val; | |||
2520 | } | |||
2521 | ||||
2522 | Value *TypePromotionTransaction::createZExt(Instruction *Inst, | |||
2523 | Value *Opnd, Type *Ty) { | |||
2524 | std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty)); | |||
2525 | Value *Val = Ptr->getBuiltValue(); | |||
2526 | Actions.push_back(std::move(Ptr)); | |||
2527 | return Val; | |||
2528 | } | |||
2529 | ||||
2530 | void TypePromotionTransaction::moveBefore(Instruction *Inst, | |||
2531 | Instruction *Before) { | |||
2532 | Actions.push_back( | |||
2533 | llvm::make_unique<TypePromotionTransaction::InstructionMoveBefore>( | |||
2534 | Inst, Before)); | |||
2535 | } | |||
2536 | ||||
2537 | TypePromotionTransaction::ConstRestorationPt | |||
2538 | TypePromotionTransaction::getRestorationPoint() const { | |||
2539 | return !Actions.empty() ? Actions.back().get() : nullptr; | |||
2540 | } | |||
2541 | ||||
2542 | void TypePromotionTransaction::commit() { | |||
2543 | for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt; | |||
2544 | ++It) | |||
2545 | (*It)->commit(); | |||
2546 | Actions.clear(); | |||
2547 | } | |||
2548 | ||||
2549 | void TypePromotionTransaction::rollback( | |||
2550 | TypePromotionTransaction::ConstRestorationPt Point) { | |||
2551 | while (!Actions.empty() && Point != Actions.back().get()) { | |||
2552 | std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val(); | |||
2553 | Curr->undo(); | |||
2554 | } | |||
2555 | } | |||
2556 | ||||
2557 | namespace { | |||
2558 | ||||
2559 | /// A helper class for matching addressing modes. | |||
2560 | /// | |||
2561 | /// This encapsulates the logic for matching the target-legal addressing modes. | |||
2562 | class AddressingModeMatcher { | |||
2563 | SmallVectorImpl<Instruction*> &AddrModeInsts; | |||
2564 | const TargetLowering &TLI; | |||
2565 | const TargetRegisterInfo &TRI; | |||
2566 | const DataLayout &DL; | |||
2567 | ||||
2568 | /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and | |||
2569 | /// the memory instruction that we're computing this address for. | |||
2570 | Type *AccessTy; | |||
2571 | unsigned AddrSpace; | |||
2572 | Instruction *MemoryInst; | |||
2573 | ||||
2574 | /// This is the addressing mode that we're building up. This is | |||
2575 | /// part of the return value of this addressing mode matching stuff. | |||
2576 | ExtAddrMode &AddrMode; | |||
2577 | ||||
2578 | /// The instructions inserted by other CodeGenPrepare optimizations. | |||
2579 | const SetOfInstrs &InsertedInsts; | |||
2580 | ||||
2581 | /// A map from the instructions to their type before promotion. | |||
2582 | InstrToOrigTy &PromotedInsts; | |||
2583 | ||||
2584 | /// The ongoing transaction where every action should be registered. | |||
2585 | TypePromotionTransaction &TPT; | |||
2586 | ||||
2587 | // A GEP which has too large offset to be folded into the addressing mode. | |||
2588 | std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP; | |||
2589 | ||||
2590 | /// This is set to true when we should not do profitability checks. | |||
2591 | /// When true, IsProfitableToFoldIntoAddressingMode always returns true. | |||
2592 | bool IgnoreProfitability; | |||
2593 | ||||
2594 | AddressingModeMatcher( | |||
2595 | SmallVectorImpl<Instruction *> &AMI, const TargetLowering &TLI, | |||
2596 | const TargetRegisterInfo &TRI, Type *AT, unsigned AS, Instruction *MI, | |||
2597 | ExtAddrMode &AM, const SetOfInstrs &InsertedInsts, | |||
2598 | InstrToOrigTy &PromotedInsts, TypePromotionTransaction &TPT, | |||
2599 | std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP) | |||
2600 | : AddrModeInsts(AMI), TLI(TLI), TRI(TRI), | |||
2601 | DL(MI->getModule()->getDataLayout()), AccessTy(AT), AddrSpace(AS), | |||
2602 | MemoryInst(MI), AddrMode(AM), InsertedInsts(InsertedInsts), | |||
2603 | PromotedInsts(PromotedInsts), TPT(TPT), LargeOffsetGEP(LargeOffsetGEP) { | |||
2604 | IgnoreProfitability = false; | |||
2605 | } | |||
2606 | ||||
2607 | public: | |||
2608 | /// Find the maximal addressing mode that a load/store of V can fold, | |||
2609 | /// give an access type of AccessTy. This returns a list of involved | |||
2610 | /// instructions in AddrModeInsts. | |||
2611 | /// \p InsertedInsts The instructions inserted by other CodeGenPrepare | |||
2612 | /// optimizations. | |||
2613 | /// \p PromotedInsts maps the instructions to their type before promotion. | |||
2614 | /// \p The ongoing transaction where every action should be registered. | |||
2615 | static ExtAddrMode | |||
2616 | Match(Value *V, Type *AccessTy, unsigned AS, Instruction *MemoryInst, | |||
2617 | SmallVectorImpl<Instruction *> &AddrModeInsts, | |||
2618 | const TargetLowering &TLI, const TargetRegisterInfo &TRI, | |||
2619 | const SetOfInstrs &InsertedInsts, InstrToOrigTy &PromotedInsts, | |||
2620 | TypePromotionTransaction &TPT, | |||
2621 | std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP) { | |||
2622 | ExtAddrMode Result; | |||
2623 | ||||
2624 | bool Success = AddressingModeMatcher(AddrModeInsts, TLI, TRI, AccessTy, AS, | |||
2625 | MemoryInst, Result, InsertedInsts, | |||
2626 | PromotedInsts, TPT, LargeOffsetGEP) | |||
2627 | .matchAddr(V, 0); | |||
2628 | (void)Success; assert(Success && "Couldn't select *anything*?")((Success && "Couldn't select *anything*?") ? static_cast <void> (0) : __assert_fail ("Success && \"Couldn't select *anything*?\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 2628, __PRETTY_FUNCTION__)); | |||
2629 | return Result; | |||
2630 | } | |||
2631 | ||||
2632 | private: | |||
2633 | bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); | |||
2634 | bool matchAddr(Value *Addr, unsigned Depth); | |||
2635 | bool matchOperationAddr(User *AddrInst, unsigned Opcode, unsigned Depth, | |||
2636 | bool *MovedAway = nullptr); | |||
2637 | bool isProfitableToFoldIntoAddressingMode(Instruction *I, | |||
2638 | ExtAddrMode &AMBefore, | |||
2639 | ExtAddrMode &AMAfter); | |||
2640 | bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2); | |||
2641 | bool isPromotionProfitable(unsigned NewCost, unsigned OldCost, | |||
2642 | Value *PromotedOperand) const; | |||
2643 | }; | |||
2644 | ||||
2645 | /// Keep track of simplification of Phi nodes. | |||
2646 | /// Accept the set of all phi nodes and erase phi node from this set | |||
2647 | /// if it is simplified. | |||
2648 | class SimplificationTracker { | |||
2649 | DenseMap<Value *, Value *> Storage; | |||
2650 | const SimplifyQuery &SQ; | |||
2651 | // Tracks newly created Phi nodes. We use a SetVector to get deterministic | |||
2652 | // order when iterating over the set in MatchPhiSet. | |||
2653 | SmallSetVector<PHINode *, 32> AllPhiNodes; | |||
2654 | // Tracks newly created Select nodes. | |||
2655 | SmallPtrSet<SelectInst *, 32> AllSelectNodes; | |||
2656 | ||||
2657 | public: | |||
2658 | SimplificationTracker(const SimplifyQuery &sq) | |||
2659 | : SQ(sq) {} | |||
2660 | ||||
2661 | Value *Get(Value *V) { | |||
2662 | do { | |||
2663 | auto SV = Storage.find(V); | |||
2664 | if (SV == Storage.end()) | |||
2665 | return V; | |||
2666 | V = SV->second; | |||
2667 | } while (true); | |||
2668 | } | |||
2669 | ||||
2670 | Value *Simplify(Value *Val) { | |||
2671 | SmallVector<Value *, 32> WorkList; | |||
2672 | SmallPtrSet<Value *, 32> Visited; | |||
2673 | WorkList.push_back(Val); | |||
2674 | while (!WorkList.empty()) { | |||
2675 | auto P = WorkList.pop_back_val(); | |||
2676 | if (!Visited.insert(P).second) | |||
2677 | continue; | |||
2678 | if (auto *PI = dyn_cast<Instruction>(P)) | |||
2679 | if (Value *V = SimplifyInstruction(cast<Instruction>(PI), SQ)) { | |||
2680 | for (auto *U : PI->users()) | |||
2681 | WorkList.push_back(cast<Value>(U)); | |||
2682 | Put(PI, V); | |||
2683 | PI->replaceAllUsesWith(V); | |||
2684 | if (auto *PHI = dyn_cast<PHINode>(PI)) | |||
2685 | AllPhiNodes.remove(PHI); | |||
2686 | if (auto *Select = dyn_cast<SelectInst>(PI)) | |||
2687 | AllSelectNodes.erase(Select); | |||
2688 | PI->eraseFromParent(); | |||
2689 | } | |||
2690 | } | |||
2691 | return Get(Val); | |||
2692 | } | |||
2693 | ||||
2694 | void Put(Value *From, Value *To) { | |||
2695 | Storage.insert({ From, To }); | |||
2696 | } | |||
2697 | ||||
2698 | void ReplacePhi(PHINode *From, PHINode *To) { | |||
2699 | Value* OldReplacement = Get(From); | |||
2700 | while (OldReplacement != From) { | |||
2701 | From = To; | |||
2702 | To = dyn_cast<PHINode>(OldReplacement); | |||
2703 | OldReplacement = Get(From); | |||
2704 | } | |||
2705 | assert(Get(To) == To && "Replacement PHI node is already replaced.")((Get(To) == To && "Replacement PHI node is already replaced." ) ? static_cast<void> (0) : __assert_fail ("Get(To) == To && \"Replacement PHI node is already replaced.\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 2705, __PRETTY_FUNCTION__)); | |||
2706 | Put(From, To); | |||
2707 | From->replaceAllUsesWith(To); | |||
2708 | AllPhiNodes.remove(From); | |||
2709 | From->eraseFromParent(); | |||
2710 | } | |||
2711 | ||||
2712 | SmallSetVector<PHINode *, 32>& newPhiNodes() { return AllPhiNodes; } | |||
2713 | ||||
2714 | void insertNewPhi(PHINode *PN) { AllPhiNodes.insert(PN); } | |||
2715 | ||||
2716 | void insertNewSelect(SelectInst *SI) { AllSelectNodes.insert(SI); } | |||
2717 | ||||
2718 | unsigned countNewPhiNodes() const { return AllPhiNodes.size(); } | |||
2719 | ||||
2720 | unsigned countNewSelectNodes() const { return AllSelectNodes.size(); } | |||
2721 | ||||
2722 | void destroyNewNodes(Type *CommonType) { | |||
2723 | // For safe erasing, replace the uses with dummy value first. | |||
2724 | auto Dummy = UndefValue::get(CommonType); | |||
2725 | for (auto I : AllPhiNodes) { | |||
2726 | I->replaceAllUsesWith(Dummy); | |||
2727 | I->eraseFromParent(); | |||
2728 | } | |||
2729 | AllPhiNodes.clear(); | |||
2730 | for (auto I : AllSelectNodes) { | |||
2731 | I->replaceAllUsesWith(Dummy); | |||
2732 | I->eraseFromParent(); | |||
2733 | } | |||
2734 | AllSelectNodes.clear(); | |||
2735 | } | |||
2736 | }; | |||
2737 | ||||
2738 | /// A helper class for combining addressing modes. | |||
2739 | class AddressingModeCombiner { | |||
2740 | typedef std::pair<Value *, BasicBlock *> ValueInBB; | |||
2741 | typedef DenseMap<ValueInBB, Value *> FoldAddrToValueMapping; | |||
2742 | typedef std::pair<PHINode *, PHINode *> PHIPair; | |||
2743 | ||||
2744 | private: | |||
2745 | /// The addressing modes we've collected. | |||
2746 | SmallVector<ExtAddrMode, 16> AddrModes; | |||
2747 | ||||
2748 | /// The field in which the AddrModes differ, when we have more than one. | |||
2749 | ExtAddrMode::FieldName DifferentField = ExtAddrMode::NoField; | |||
2750 | ||||
2751 | /// Are the AddrModes that we have all just equal to their original values? | |||
2752 | bool AllAddrModesTrivial = true; | |||
2753 | ||||
2754 | /// Common Type for all different fields in addressing modes. | |||
2755 | Type *CommonType; | |||
2756 | ||||
2757 | /// SimplifyQuery for simplifyInstruction utility. | |||
2758 | const SimplifyQuery &SQ; | |||
2759 | ||||
2760 | /// Original Address. | |||
2761 | ValueInBB Original; | |||
2762 | ||||
2763 | public: | |||
2764 | AddressingModeCombiner(const SimplifyQuery &_SQ, ValueInBB OriginalValue) | |||
2765 | : CommonType(nullptr), SQ(_SQ), Original(OriginalValue) {} | |||
2766 | ||||
2767 | /// Get the combined AddrMode | |||
2768 | const ExtAddrMode &getAddrMode() const { | |||
2769 | return AddrModes[0]; | |||
2770 | } | |||
2771 | ||||
2772 | /// Add a new AddrMode if it's compatible with the AddrModes we already | |||
2773 | /// have. | |||
2774 | /// \return True iff we succeeded in doing so. | |||
2775 | bool addNewAddrMode(ExtAddrMode &NewAddrMode) { | |||
2776 | // Take note of if we have any non-trivial AddrModes, as we need to detect | |||
2777 | // when all AddrModes are trivial as then we would introduce a phi or select | |||
2778 | // which just duplicates what's already there. | |||
2779 | AllAddrModesTrivial = AllAddrModesTrivial && NewAddrMode.isTrivial(); | |||
2780 | ||||
2781 | // If this is the first addrmode then everything is fine. | |||
2782 | if (AddrModes.empty()) { | |||
2783 | AddrModes.emplace_back(NewAddrMode); | |||
2784 | return true; | |||
2785 | } | |||
2786 | ||||
2787 | // Figure out how different this is from the other address modes, which we | |||
2788 | // can do just by comparing against the first one given that we only care | |||
2789 | // about the cumulative difference. | |||
2790 | ExtAddrMode::FieldName ThisDifferentField = | |||
2791 | AddrModes[0].compare(NewAddrMode); | |||
2792 | if (DifferentField == ExtAddrMode::NoField) | |||
2793 | DifferentField = ThisDifferentField; | |||
2794 | else if (DifferentField != ThisDifferentField) | |||
2795 | DifferentField = ExtAddrMode::MultipleFields; | |||
2796 | ||||
2797 | // If NewAddrMode differs in more than one dimension we cannot handle it. | |||
2798 | bool CanHandle = DifferentField != ExtAddrMode::MultipleFields; | |||
2799 | ||||
2800 | // If Scale Field is different then we reject. | |||
2801 | CanHandle = CanHandle && DifferentField != ExtAddrMode::ScaleField; | |||
2802 | ||||
2803 | // We also must reject the case when base offset is different and | |||
2804 | // scale reg is not null, we cannot handle this case due to merge of | |||
2805 | // different offsets will be used as ScaleReg. | |||
2806 | CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseOffsField || | |||
2807 | !NewAddrMode.ScaledReg); | |||
2808 | ||||
2809 | // We also must reject the case when GV is different and BaseReg installed | |||
2810 | // due to we want to use base reg as a merge of GV values. | |||
2811 | CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseGVField || | |||
2812 | !NewAddrMode.HasBaseReg); | |||
2813 | ||||
2814 | // Even if NewAddMode is the same we still need to collect it due to | |||
2815 | // original value is different. And later we will need all original values | |||
2816 | // as anchors during finding the common Phi node. | |||
2817 | if (CanHandle) | |||
2818 | AddrModes.emplace_back(NewAddrMode); | |||
2819 | else | |||
2820 | AddrModes.clear(); | |||
2821 | ||||
2822 | return CanHandle; | |||
2823 | } | |||
2824 | ||||
2825 | /// Combine the addressing modes we've collected into a single | |||
2826 | /// addressing mode. | |||
2827 | /// \return True iff we successfully combined them or we only had one so | |||
2828 | /// didn't need to combine them anyway. | |||
2829 | bool combineAddrModes() { | |||
2830 | // If we have no AddrModes then they can't be combined. | |||
2831 | if (AddrModes.size() == 0) | |||
2832 | return false; | |||
2833 | ||||
2834 | // A single AddrMode can trivially be combined. | |||
2835 | if (AddrModes.size() == 1 || DifferentField == ExtAddrMode::NoField) | |||
2836 | return true; | |||
2837 | ||||
2838 | // If the AddrModes we collected are all just equal to the value they are | |||
2839 | // derived from then combining them wouldn't do anything useful. | |||
2840 | if (AllAddrModesTrivial) | |||
2841 | return false; | |||
2842 | ||||
2843 | if (!addrModeCombiningAllowed()) | |||
2844 | return false; | |||
2845 | ||||
2846 | // Build a map between <original value, basic block where we saw it> to | |||
2847 | // value of base register. | |||
2848 | // Bail out if there is no common type. | |||
2849 | FoldAddrToValueMapping Map; | |||
2850 | if (!initializeMap(Map)) | |||
2851 | return false; | |||
2852 | ||||
2853 | Value *CommonValue = findCommon(Map); | |||
2854 | if (CommonValue) | |||
2855 | AddrModes[0].SetCombinedField(DifferentField, CommonValue, AddrModes); | |||
2856 | return CommonValue != nullptr; | |||
2857 | } | |||
2858 | ||||
2859 | private: | |||
2860 | /// Initialize Map with anchor values. For address seen in some BB | |||
2861 | /// we set the value of different field saw in this address. | |||
2862 | /// If address is not an instruction than basic block is set to null. | |||
2863 | /// At the same time we find a common type for different field we will | |||
2864 | /// use to create new Phi/Select nodes. Keep it in CommonType field. | |||
2865 | /// Return false if there is no common type found. | |||
2866 | bool initializeMap(FoldAddrToValueMapping &Map) { | |||
2867 | // Keep track of keys where the value is null. We will need to replace it | |||
2868 | // with constant null when we know the common type. | |||
2869 | SmallVector<ValueInBB, 2> NullValue; | |||
2870 | Type *IntPtrTy = SQ.DL.getIntPtrType(AddrModes[0].OriginalValue->getType()); | |||
2871 | for (auto &AM : AddrModes) { | |||
2872 | BasicBlock *BB = nullptr; | |||
2873 | if (Instruction *I = dyn_cast<Instruction>(AM.OriginalValue)) | |||
2874 | BB = I->getParent(); | |||
2875 | ||||
2876 | Value *DV = AM.GetFieldAsValue(DifferentField, IntPtrTy); | |||
2877 | if (DV) { | |||
2878 | auto *Type = DV->getType(); | |||
2879 | if (CommonType && CommonType != Type) | |||
2880 | return false; | |||
2881 | CommonType = Type; | |||
2882 | Map[{ AM.OriginalValue, BB }] = DV; | |||
2883 | } else { | |||
2884 | NullValue.push_back({ AM.OriginalValue, BB }); | |||
2885 | } | |||
2886 | } | |||
2887 | assert(CommonType && "At least one non-null value must be!")((CommonType && "At least one non-null value must be!" ) ? static_cast<void> (0) : __assert_fail ("CommonType && \"At least one non-null value must be!\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 2887, __PRETTY_FUNCTION__)); | |||
2888 | for (auto VIBB : NullValue) | |||
2889 | Map[VIBB] = Constant::getNullValue(CommonType); | |||
2890 | return true; | |||
2891 | } | |||
2892 | ||||
2893 | /// We have mapping between value A and basic block where value A | |||
2894 | /// seen to other value B where B was a field in addressing mode represented | |||
2895 | /// by A. Also we have an original value C representing an address in some | |||
2896 | /// basic block. Traversing from C through phi and selects we ended up with | |||
2897 | /// A's in a map. This utility function tries to find a value V which is a | |||
2898 | /// field in addressing mode C and traversing through phi nodes and selects | |||
2899 | /// we will end up in corresponded values B in a map. | |||
2900 | /// The utility will create a new Phi/Selects if needed. | |||
2901 | // The simple example looks as follows: | |||
2902 | // BB1: | |||
2903 | // p1 = b1 + 40 | |||
2904 | // br cond BB2, BB3 | |||
2905 | // BB2: | |||
2906 | // p2 = b2 + 40 | |||
2907 | // br BB3 | |||
2908 | // BB3: | |||
2909 | // p = phi [p1, BB1], [p2, BB2] | |||
2910 | // v = load p | |||
2911 | // Map is | |||
2912 | // <p1, BB1> -> b1 | |||
2913 | // <p2, BB2> -> b2 | |||
2914 | // Request is | |||
2915 | // <p, BB3> -> ? | |||
2916 | // The function tries to find or build phi [b1, BB1], [b2, BB2] in BB3 | |||
2917 | Value *findCommon(FoldAddrToValueMapping &Map) { | |||
2918 | // Tracks the simplification of newly created phi nodes. The reason we use | |||
2919 | // this mapping is because we will add new created Phi nodes in AddrToBase. | |||
2920 | // Simplification of Phi nodes is recursive, so some Phi node may | |||
2921 | // be simplified after we added it to AddrToBase. | |||
2922 | // Using this mapping we can find the current value in AddrToBase. | |||
2923 | SimplificationTracker ST(SQ); | |||
2924 | ||||
2925 | // First step, DFS to create PHI nodes for all intermediate blocks. | |||
2926 | // Also fill traverse order for the second step. | |||
2927 | SmallVector<ValueInBB, 32> TraverseOrder; | |||
2928 | InsertPlaceholders(Map, TraverseOrder, ST); | |||
2929 | ||||
2930 | // Second Step, fill new nodes by merged values and simplify if possible. | |||
2931 | FillPlaceholders(Map, TraverseOrder, ST); | |||
2932 | ||||
2933 | if (!AddrSinkNewSelects && ST.countNewSelectNodes() > 0) { | |||
2934 | ST.destroyNewNodes(CommonType); | |||
2935 | return nullptr; | |||
2936 | } | |||
2937 | ||||
2938 | // Now we'd like to match New Phi nodes to existed ones. | |||
2939 | unsigned PhiNotMatchedCount = 0; | |||
2940 | if (!MatchPhiSet(ST, AddrSinkNewPhis, PhiNotMatchedCount)) { | |||
2941 | ST.destroyNewNodes(CommonType); | |||
2942 | return nullptr; | |||
2943 | } | |||
2944 | ||||
2945 | auto *Result = ST.Get(Map.find(Original)->second); | |||
2946 | if (Result) { | |||
2947 | NumMemoryInstsPhiCreated += ST.countNewPhiNodes() + PhiNotMatchedCount; | |||
2948 | NumMemoryInstsSelectCreated += ST.countNewSelectNodes(); | |||
2949 | } | |||
2950 | return Result; | |||
2951 | } | |||
2952 | ||||
2953 | /// Try to match PHI node to Candidate. | |||
2954 | /// Matcher tracks the matched Phi nodes. | |||
2955 | bool MatchPhiNode(PHINode *PHI, PHINode *Candidate, | |||
2956 | SmallSetVector<PHIPair, 8> &Matcher, | |||
2957 | SmallSetVector<PHINode *, 32> &PhiNodesToMatch) { | |||
2958 | SmallVector<PHIPair, 8> WorkList; | |||
2959 | Matcher.insert({ PHI, Candidate }); | |||
2960 | WorkList.push_back({ PHI, Candidate }); | |||
2961 | SmallSet<PHIPair, 8> Visited; | |||
2962 | while (!WorkList.empty()) { | |||
2963 | auto Item = WorkList.pop_back_val(); | |||
2964 | if (!Visited.insert(Item).second) | |||
2965 | continue; | |||
2966 | // We iterate over all incoming values to Phi to compare them. | |||
2967 | // If values are different and both of them Phi and the first one is a | |||
2968 | // Phi we added (subject to match) and both of them is in the same basic | |||
2969 | // block then we can match our pair if values match. So we state that | |||
2970 | // these values match and add it to work list to verify that. | |||
2971 | for (auto B : Item.first->blocks()) { | |||
2972 | Value *FirstValue = Item.first->getIncomingValueForBlock(B); | |||
2973 | Value *SecondValue = Item.second->getIncomingValueForBlock(B); | |||
2974 | if (FirstValue == SecondValue) | |||
2975 | continue; | |||
2976 | ||||
2977 | PHINode *FirstPhi = dyn_cast<PHINode>(FirstValue); | |||
2978 | PHINode *SecondPhi = dyn_cast<PHINode>(SecondValue); | |||
2979 | ||||
2980 | // One of them is not Phi or | |||
2981 | // The first one is not Phi node from the set we'd like to match or | |||
2982 | // Phi nodes from different basic blocks then | |||
2983 | // we will not be able to match. | |||
2984 | if (!FirstPhi || !SecondPhi || !PhiNodesToMatch.count(FirstPhi) || | |||
2985 | FirstPhi->getParent() != SecondPhi->getParent()) | |||
2986 | return false; | |||
2987 | ||||
2988 | // If we already matched them then continue. | |||
2989 | if (Matcher.count({ FirstPhi, SecondPhi })) | |||
2990 | continue; | |||
2991 | // So the values are different and does not match. So we need them to | |||
2992 | // match. | |||
2993 | Matcher.insert({ FirstPhi, SecondPhi }); | |||
2994 | // But me must check it. | |||
2995 | WorkList.push_back({ FirstPhi, SecondPhi }); | |||
2996 | } | |||
2997 | } | |||
2998 | return true; | |||
2999 | } | |||
3000 | ||||
3001 | /// For the given set of PHI nodes (in the SimplificationTracker) try | |||
3002 | /// to find their equivalents. | |||
3003 | /// Returns false if this matching fails and creation of new Phi is disabled. | |||
3004 | bool MatchPhiSet(SimplificationTracker &ST, bool AllowNewPhiNodes, | |||
3005 | unsigned &PhiNotMatchedCount) { | |||
3006 | // Use a SetVector for Matched to make sure we do replacements (ReplacePhi) | |||
3007 | // in a deterministic order below. | |||
3008 | SmallSetVector<PHIPair, 8> Matched; | |||
3009 | SmallPtrSet<PHINode *, 8> WillNotMatch; | |||
3010 | SmallSetVector<PHINode *, 32> &PhiNodesToMatch = ST.newPhiNodes(); | |||
3011 | while (PhiNodesToMatch.size()) { | |||
3012 | PHINode *PHI = *PhiNodesToMatch.begin(); | |||
3013 | ||||
3014 | // Add us, if no Phi nodes in the basic block we do not match. | |||
3015 | WillNotMatch.clear(); | |||
3016 | WillNotMatch.insert(PHI); | |||
3017 | ||||
3018 | // Traverse all Phis until we found equivalent or fail to do that. | |||
3019 | bool IsMatched = false; | |||
3020 | for (auto &P : PHI->getParent()->phis()) { | |||
3021 | if (&P == PHI) | |||
3022 | continue; | |||
3023 | if ((IsMatched = MatchPhiNode(PHI, &P, Matched, PhiNodesToMatch))) | |||
3024 | break; | |||
3025 | // If it does not match, collect all Phi nodes from matcher. | |||
3026 | // if we end up with no match, them all these Phi nodes will not match | |||
3027 | // later. | |||
3028 | for (auto M : Matched) | |||
3029 | WillNotMatch.insert(M.first); | |||
3030 | Matched.clear(); | |||
3031 | } | |||
3032 | if (IsMatched) { | |||
3033 | // Replace all matched values and erase them. | |||
3034 | for (auto MV : Matched) | |||
3035 | ST.ReplacePhi(MV.first, MV.second); | |||
3036 | Matched.clear(); | |||
3037 | continue; | |||
3038 | } | |||
3039 | // If we are not allowed to create new nodes then bail out. | |||
3040 | if (!AllowNewPhiNodes) | |||
3041 | return false; | |||
3042 | // Just remove all seen values in matcher. They will not match anything. | |||
3043 | PhiNotMatchedCount += WillNotMatch.size(); | |||
3044 | for (auto *P : WillNotMatch) | |||
3045 | PhiNodesToMatch.remove(P); | |||
3046 | } | |||
3047 | return true; | |||
3048 | } | |||
3049 | /// Fill the placeholder with values from predecessors and simplify it. | |||
3050 | void FillPlaceholders(FoldAddrToValueMapping &Map, | |||
3051 | SmallVectorImpl<ValueInBB> &TraverseOrder, | |||
3052 | SimplificationTracker &ST) { | |||
3053 | while (!TraverseOrder.empty()) { | |||
3054 | auto Current = TraverseOrder.pop_back_val(); | |||
3055 | assert(Map.find(Current) != Map.end() && "No node to fill!!!")((Map.find(Current) != Map.end() && "No node to fill!!!" ) ? static_cast<void> (0) : __assert_fail ("Map.find(Current) != Map.end() && \"No node to fill!!!\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 3055, __PRETTY_FUNCTION__)); | |||
3056 | Value *CurrentValue = Current.first; | |||
3057 | BasicBlock *CurrentBlock = Current.second; | |||
3058 | Value *V = Map[Current]; | |||
3059 | ||||
3060 | if (SelectInst *Select = dyn_cast<SelectInst>(V)) { | |||
3061 | // CurrentValue also must be Select. | |||
3062 | auto *CurrentSelect = cast<SelectInst>(CurrentValue); | |||
3063 | auto *TrueValue = CurrentSelect->getTrueValue(); | |||
3064 | ValueInBB TrueItem = { TrueValue, isa<Instruction>(TrueValue) | |||
3065 | ? CurrentBlock | |||
3066 | : nullptr }; | |||
3067 | assert(Map.find(TrueItem) != Map.end() && "No True Value!")((Map.find(TrueItem) != Map.end() && "No True Value!" ) ? static_cast<void> (0) : __assert_fail ("Map.find(TrueItem) != Map.end() && \"No True Value!\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 3067, __PRETTY_FUNCTION__)); | |||
3068 | Select->setTrueValue(ST.Get(Map[TrueItem])); | |||
3069 | auto *FalseValue = CurrentSelect->getFalseValue(); | |||
3070 | ValueInBB FalseItem = { FalseValue, isa<Instruction>(FalseValue) | |||
3071 | ? CurrentBlock | |||
3072 | : nullptr }; | |||
3073 | assert(Map.find(FalseItem) != Map.end() && "No False Value!")((Map.find(FalseItem) != Map.end() && "No False Value!" ) ? static_cast<void> (0) : __assert_fail ("Map.find(FalseItem) != Map.end() && \"No False Value!\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 3073, __PRETTY_FUNCTION__)); | |||
3074 | Select->setFalseValue(ST.Get(Map[FalseItem])); | |||
3075 | } else { | |||
3076 | // Must be a Phi node then. | |||
3077 | PHINode *PHI = cast<PHINode>(V); | |||
3078 | // Fill the Phi node with values from predecessors. | |||
3079 | bool IsDefinedInThisBB = | |||
3080 | cast<Instruction>(CurrentValue)->getParent() == CurrentBlock; | |||
3081 | auto *CurrentPhi = dyn_cast<PHINode>(CurrentValue); | |||
3082 | for (auto B : predecessors(CurrentBlock)) { | |||
3083 | Value *PV = IsDefinedInThisBB | |||
3084 | ? CurrentPhi->getIncomingValueForBlock(B) | |||
3085 | : CurrentValue; | |||
3086 | ValueInBB item = { PV, isa<Instruction>(PV) ? B : nullptr }; | |||
3087 | assert(Map.find(item) != Map.end() && "No predecessor Value!")((Map.find(item) != Map.end() && "No predecessor Value!" ) ? static_cast<void> (0) : __assert_fail ("Map.find(item) != Map.end() && \"No predecessor Value!\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 3087, __PRETTY_FUNCTION__)); | |||
3088 | PHI->addIncoming(ST.Get(Map[item]), B); | |||
3089 | } | |||
3090 | } | |||
3091 | // Simplify if possible. | |||
3092 | Map[Current] = ST.Simplify(V); | |||
3093 | } | |||
3094 | } | |||
3095 | ||||
3096 | /// Starting from value recursively iterates over predecessors up to known | |||
3097 | /// ending values represented in a map. For each traversed block inserts | |||
3098 | /// a placeholder Phi or Select. | |||
3099 | /// Reports all new created Phi/Select nodes by adding them to set. | |||
3100 | /// Also reports and order in what basic blocks have been traversed. | |||
3101 | void InsertPlaceholders(FoldAddrToValueMapping &Map, | |||
3102 | SmallVectorImpl<ValueInBB> &TraverseOrder, | |||
3103 | SimplificationTracker &ST) { | |||
3104 | SmallVector<ValueInBB, 32> Worklist; | |||
3105 | assert((isa<PHINode>(Original.first) || isa<SelectInst>(Original.first)) &&(((isa<PHINode>(Original.first) || isa<SelectInst> (Original.first)) && "Address must be a Phi or Select node" ) ? static_cast<void> (0) : __assert_fail ("(isa<PHINode>(Original.first) || isa<SelectInst>(Original.first)) && \"Address must be a Phi or Select node\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 3106, __PRETTY_FUNCTION__)) | |||
3106 | "Address must be a Phi or Select node")(((isa<PHINode>(Original.first) || isa<SelectInst> (Original.first)) && "Address must be a Phi or Select node" ) ? static_cast<void> (0) : __assert_fail ("(isa<PHINode>(Original.first) || isa<SelectInst>(Original.first)) && \"Address must be a Phi or Select node\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 3106, __PRETTY_FUNCTION__)); | |||
3107 | auto *Dummy = UndefValue::get(CommonType); | |||
3108 | Worklist.push_back(Original); | |||
3109 | while (!Worklist.empty()) { | |||
3110 | auto Current = Worklist.pop_back_val(); | |||
3111 | // If value is not an instruction it is something global, constant, | |||
3112 | // parameter and we can say that this value is observable in any block. | |||
3113 | // Set block to null to denote it. | |||
3114 | // Also please take into account that it is how we build anchors. | |||
3115 | if (!isa<Instruction>(Current.first)) | |||
3116 | Current.second = nullptr; | |||
3117 | // if it is already visited or it is an ending value then skip it. | |||
3118 | if (Map.find(Current) != Map.end()) | |||
3119 | continue; | |||
3120 | TraverseOrder.push_back(Current); | |||
3121 | ||||
3122 | Value *CurrentValue = Current.first; | |||
3123 | BasicBlock *CurrentBlock = Current.second; | |||
3124 | // CurrentValue must be a Phi node or select. All others must be covered | |||
3125 | // by anchors. | |||
3126 | Instruction *CurrentI = cast<Instruction>(CurrentValue); | |||
3127 | bool IsDefinedInThisBB = CurrentI->getParent() == CurrentBlock; | |||
3128 | ||||
3129 | unsigned PredCount = pred_size(CurrentBlock); | |||
3130 | // if Current Value is not defined in this basic block we are interested | |||
3131 | // in values in predecessors. | |||
3132 | if (!IsDefinedInThisBB) { | |||
3133 | assert(PredCount && "Unreachable block?!")((PredCount && "Unreachable block?!") ? static_cast< void> (0) : __assert_fail ("PredCount && \"Unreachable block?!\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 3133, __PRETTY_FUNCTION__)); | |||
3134 | PHINode *PHI = PHINode::Create(CommonType, PredCount, "sunk_phi", | |||
3135 | &CurrentBlock->front()); | |||
| ||||
3136 | Map[Current] = PHI; | |||
3137 | ST.insertNewPhi(PHI); | |||
3138 | // Add all predecessors in work list. | |||
3139 | for (auto B : predecessors(CurrentBlock)) | |||
3140 | Worklist.push_back({ CurrentValue, B }); | |||
3141 | continue; | |||
3142 | } | |||
3143 | // Value is defined in this basic block. | |||
3144 | if (SelectInst *OrigSelect = dyn_cast<SelectInst>(CurrentI)) { | |||
3145 | // Is it OK to get metadata from OrigSelect?! | |||
3146 | // Create a Select placeholder with dummy value. | |||
3147 | SelectInst *Select = | |||
3148 | SelectInst::Create(OrigSelect->getCondition(), Dummy, Dummy, | |||
3149 | OrigSelect->getName(), OrigSelect, OrigSelect); | |||
3150 | Map[Current] = Select; | |||
3151 | ST.insertNewSelect(Select); | |||
3152 | // We are interested in True and False value in this basic block. | |||
3153 | Worklist.push_back({ OrigSelect->getTrueValue(), CurrentBlock }); | |||
3154 | Worklist.push_back({ OrigSelect->getFalseValue(), CurrentBlock }); | |||
3155 | } else { | |||
3156 | // It must be a Phi node then. | |||
3157 | auto *CurrentPhi = cast<PHINode>(CurrentI); | |||
3158 | // Create new Phi node for merge of bases. | |||
3159 | assert(PredCount && "Unreachable block?!")((PredCount && "Unreachable block?!") ? static_cast< void> (0) : __assert_fail ("PredCount && \"Unreachable block?!\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 3159, __PRETTY_FUNCTION__)); | |||
3160 | PHINode *PHI = PHINode::Create(CommonType, PredCount, "sunk_phi", | |||
3161 | &CurrentBlock->front()); | |||
3162 | Map[Current] = PHI; | |||
3163 | ST.insertNewPhi(PHI); | |||
3164 | ||||
3165 | // Add all predecessors in work list. | |||
3166 | for (auto B : predecessors(CurrentBlock)) | |||
3167 | Worklist.push_back({ CurrentPhi->getIncomingValueForBlock(B), B }); | |||
3168 | } | |||
3169 | } | |||
3170 | } | |||
3171 | ||||
3172 | bool addrModeCombiningAllowed() { | |||
3173 | if (DisableComplexAddrModes) | |||
3174 | return false; | |||
3175 | switch (DifferentField) { | |||
3176 | default: | |||
3177 | return false; | |||
3178 | case ExtAddrMode::BaseRegField: | |||
3179 | return AddrSinkCombineBaseReg; | |||
3180 | case ExtAddrMode::BaseGVField: | |||
3181 | return AddrSinkCombineBaseGV; | |||
3182 | case ExtAddrMode::BaseOffsField: | |||
3183 | return AddrSinkCombineBaseOffs; | |||
3184 | case ExtAddrMode::ScaledRegField: | |||
3185 | return AddrSinkCombineScaledReg; | |||
3186 | } | |||
3187 | } | |||
3188 | }; | |||
3189 | } // end anonymous namespace | |||
3190 | ||||
3191 | /// Try adding ScaleReg*Scale to the current addressing mode. | |||
3192 | /// Return true and update AddrMode if this addr mode is legal for the target, | |||
3193 | /// false if not. | |||
3194 | bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale, | |||
3195 | unsigned Depth) { | |||
3196 | // If Scale is 1, then this is the same as adding ScaleReg to the addressing | |||
3197 | // mode. Just process that directly. | |||
3198 | if (Scale == 1) | |||
3199 | return matchAddr(ScaleReg, Depth); | |||
3200 | ||||
3201 | // If the scale is 0, it takes nothing to add this. | |||
3202 | if (Scale == 0) | |||
3203 | return true; | |||
3204 | ||||
3205 | // If we already have a scale of this value, we can add to it, otherwise, we | |||
3206 | // need an available scale field. | |||
3207 | if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) | |||
3208 | return false; | |||
3209 | ||||
3210 | ExtAddrMode TestAddrMode = AddrMode; | |||
3211 | ||||
3212 | // Add scale to turn X*4+X*3 -> X*7. This could also do things like | |||
3213 | // [A+B + A*7] -> [B+A*8]. | |||
3214 | TestAddrMode.Scale += Scale; | |||
3215 | TestAddrMode.ScaledReg = ScaleReg; | |||
3216 | ||||
3217 | // If the new address isn't legal, bail out. | |||
3218 | if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) | |||
3219 | return false; | |||
3220 | ||||
3221 | // It was legal, so commit it. | |||
3222 | AddrMode = TestAddrMode; | |||
3223 | ||||
3224 | // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now | |||
3225 | // to see if ScaleReg is actually X+C. If so, we can turn this into adding | |||
3226 | // X*Scale + C*Scale to addr mode. | |||
3227 | ConstantInt *CI = nullptr; Value *AddLHS = nullptr; | |||
3228 | if (isa<Instruction>(ScaleReg) && // not a constant expr. | |||
3229 | match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) { | |||
3230 | TestAddrMode.ScaledReg = AddLHS; | |||
3231 | TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale; | |||
3232 | ||||
3233 | // If this addressing mode is legal, commit it and remember that we folded | |||
3234 | // this instruction. | |||
3235 | if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) { | |||
3236 | AddrModeInsts.push_back(cast<Instruction>(ScaleReg)); | |||
3237 | AddrMode = TestAddrMode; | |||
3238 | return true; | |||
3239 | } | |||
3240 | } | |||
3241 | ||||
3242 | // Otherwise, not (x+c)*scale, just return what we have. | |||
3243 | return true; | |||
3244 | } | |||
3245 | ||||
3246 | /// This is a little filter, which returns true if an addressing computation | |||
3247 | /// involving I might be folded into a load/store accessing it. | |||
3248 | /// This doesn't need to be perfect, but needs to accept at least | |||
3249 | /// the set of instructions that MatchOperationAddr can. | |||
3250 | static bool MightBeFoldableInst(Instruction *I) { | |||
3251 | switch (I->getOpcode()) { | |||
3252 | case Instruction::BitCast: | |||
3253 | case Instruction::AddrSpaceCast: | |||
3254 | // Don't touch identity bitcasts. | |||
3255 | if (I->getType() == I->getOperand(0)->getType()) | |||
3256 | return false; | |||
3257 | return I->getType()->isIntOrPtrTy(); | |||
3258 | case Instruction::PtrToInt: | |||
3259 | // PtrToInt is always a noop, as we know that the int type is pointer sized. | |||
3260 | return true; | |||
3261 | case Instruction::IntToPtr: | |||
3262 | // We know the input is intptr_t, so this is foldable. | |||
3263 | return true; | |||
3264 | case Instruction::Add: | |||
3265 | return true; | |||
3266 | case Instruction::Mul: | |||
3267 | case Instruction::Shl: | |||
3268 | // Can only handle X*C and X << C. | |||
3269 | return isa<ConstantInt>(I->getOperand(1)); | |||
3270 | case Instruction::GetElementPtr: | |||
3271 | return true; | |||
3272 | default: | |||
3273 | return false; | |||
3274 | } | |||
3275 | } | |||
3276 | ||||
3277 | /// Check whether or not \p Val is a legal instruction for \p TLI. | |||
3278 | /// \note \p Val is assumed to be the product of some type promotion. | |||
3279 | /// Therefore if \p Val has an undefined state in \p TLI, this is assumed | |||
3280 | /// to be legal, as the non-promoted value would have had the same state. | |||
3281 | static bool isPromotedInstructionLegal(const TargetLowering &TLI, | |||
3282 | const DataLayout &DL, Value *Val) { | |||
3283 | Instruction *PromotedInst = dyn_cast<Instruction>(Val); | |||
3284 | if (!PromotedInst) | |||
3285 | return false; | |||
3286 | int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode()); | |||
3287 | // If the ISDOpcode is undefined, it was undefined before the promotion. | |||
3288 | if (!ISDOpcode) | |||
3289 | return true; | |||
3290 | // Otherwise, check if the promoted instruction is legal or not. | |||
3291 | return TLI.isOperationLegalOrCustom( | |||
3292 | ISDOpcode, TLI.getValueType(DL, PromotedInst->getType())); | |||
3293 | } | |||
3294 | ||||
3295 | namespace { | |||
3296 | ||||
3297 | /// Hepler class to perform type promotion. | |||
3298 | class TypePromotionHelper { | |||
3299 | /// Utility function to add a promoted instruction \p ExtOpnd to | |||
3300 | /// \p PromotedInsts and record the type of extension we have seen. | |||
3301 | static void addPromotedInst(InstrToOrigTy &PromotedInsts, | |||
3302 | Instruction *ExtOpnd, | |||
3303 | bool IsSExt) { | |||
3304 | ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension; | |||
3305 | InstrToOrigTy::iterator It = PromotedInsts.find(ExtOpnd); | |||
3306 | if (It != PromotedInsts.end()) { | |||
3307 | // If the new extension is same as original, the information in | |||
3308 | // PromotedInsts[ExtOpnd] is still correct. | |||
3309 | if (It->second.getInt() == ExtTy) | |||
3310 | return; | |||
3311 | ||||
3312 | // Now the new extension is different from old extension, we make | |||
3313 | // the type information invalid by setting extension type to | |||
3314 | // BothExtension. | |||
3315 | ExtTy = BothExtension; | |||
3316 | } | |||
3317 | PromotedInsts[ExtOpnd] = TypeIsSExt(ExtOpnd->getType(), ExtTy); | |||
3318 | } | |||
3319 | ||||
3320 | /// Utility function to query the original type of instruction \p Opnd | |||
3321 | /// with a matched extension type. If the extension doesn't match, we | |||
3322 | /// cannot use the information we had on the original type. | |||
3323 | /// BothExtension doesn't match any extension type. | |||
3324 | static const Type *getOrigType(const InstrToOrigTy &PromotedInsts, | |||
3325 | Instruction *Opnd, | |||
3326 | bool IsSExt) { | |||
3327 | ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension; | |||
3328 | InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd); | |||
3329 | if (It != PromotedInsts.end() && It->second.getInt() == ExtTy) | |||
3330 | return It->second.getPointer(); | |||
3331 | return nullptr; | |||
3332 | } | |||
3333 | ||||
3334 | /// Utility function to check whether or not a sign or zero extension | |||
3335 | /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by | |||
3336 | /// either using the operands of \p Inst or promoting \p Inst. | |||
3337 | /// The type of the extension is defined by \p IsSExt. | |||
3338 | /// In other words, check if: | |||
3339 | /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType. | |||
3340 | /// #1 Promotion applies: | |||
3341 | /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...). | |||
3342 | /// #2 Operand reuses: | |||
3343 | /// ext opnd1 to ConsideredExtType. | |||
3344 | /// \p PromotedInsts maps the instructions to their type before promotion. | |||
3345 | static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType, | |||
3346 | const InstrToOrigTy &PromotedInsts, bool IsSExt); | |||
3347 | ||||
3348 | /// Utility function to determine if \p OpIdx should be promoted when | |||
3349 | /// promoting \p Inst. | |||
3350 | static bool shouldExtOperand(const Instruction *Inst, int OpIdx) { | |||
3351 | return !(isa<SelectInst>(Inst) && OpIdx == 0); | |||
3352 | } | |||
3353 | ||||
3354 | /// Utility function to promote the operand of \p Ext when this | |||
3355 | /// operand is a promotable trunc or sext or zext. | |||
3356 | /// \p PromotedInsts maps the instructions to their type before promotion. | |||
3357 | /// \p CreatedInstsCost[out] contains the cost of all instructions | |||
3358 | /// created to promote the operand of Ext. | |||
3359 | /// Newly added extensions are inserted in \p Exts. | |||
3360 | /// Newly added truncates are inserted in \p Truncs. | |||
3361 | /// Should never be called directly. | |||
3362 | /// \return The promoted value which is used instead of Ext. | |||
3363 | static Value *promoteOperandForTruncAndAnyExt( | |||
3364 | Instruction *Ext, TypePromotionTransaction &TPT, | |||
3365 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, | |||
3366 | SmallVectorImpl<Instruction *> *Exts, | |||
3367 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI); | |||
3368 | ||||
3369 | /// Utility function to promote the operand of \p Ext when this | |||
3370 | /// operand is promotable and is not a supported trunc or sext. | |||
3371 | /// \p PromotedInsts maps the instructions to their type before promotion. | |||
3372 | /// \p CreatedInstsCost[out] contains the cost of all the instructions | |||
3373 | /// created to promote the operand of Ext. | |||
3374 | /// Newly added extensions are inserted in \p Exts. | |||
3375 | /// Newly added truncates are inserted in \p Truncs. | |||
3376 | /// Should never be called directly. | |||
3377 | /// \return The promoted value which is used instead of Ext. | |||
3378 | static Value *promoteOperandForOther(Instruction *Ext, | |||
3379 | TypePromotionTransaction &TPT, | |||
3380 | InstrToOrigTy &PromotedInsts, | |||
3381 | unsigned &CreatedInstsCost, | |||
3382 | SmallVectorImpl<Instruction *> *Exts, | |||
3383 | SmallVectorImpl<Instruction *> *Truncs, | |||
3384 | const TargetLowering &TLI, bool IsSExt); | |||
3385 | ||||
3386 | /// \see promoteOperandForOther. | |||
3387 | static Value *signExtendOperandForOther( | |||
3388 | Instruction *Ext, TypePromotionTransaction &TPT, | |||
3389 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, | |||
3390 | SmallVectorImpl<Instruction *> *Exts, | |||
3391 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { | |||
3392 | return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, | |||
3393 | Exts, Truncs, TLI, true); | |||
3394 | } | |||
3395 | ||||
3396 | /// \see promoteOperandForOther. | |||
3397 | static Value *zeroExtendOperandForOther( | |||
3398 | Instruction *Ext, TypePromotionTransaction &TPT, | |||
3399 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, | |||
3400 | SmallVectorImpl<Instruction *> *Exts, | |||
3401 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { | |||
3402 | return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, | |||
3403 | Exts, Truncs, TLI, false); | |||
3404 | } | |||
3405 | ||||
3406 | public: | |||
3407 | /// Type for the utility function that promotes the operand of Ext. | |||
3408 | using Action = Value *(*)(Instruction *Ext, TypePromotionTransaction &TPT, | |||
3409 | InstrToOrigTy &PromotedInsts, | |||
3410 | unsigned &CreatedInstsCost, | |||
3411 | SmallVectorImpl<Instruction *> *Exts, | |||
3412 | SmallVectorImpl<Instruction *> *Truncs, | |||
3413 | const TargetLowering &TLI); | |||
3414 | ||||
3415 | /// Given a sign/zero extend instruction \p Ext, return the appropriate | |||
3416 | /// action to promote the operand of \p Ext instead of using Ext. | |||
3417 | /// \return NULL if no promotable action is possible with the current | |||
3418 | /// sign extension. | |||
3419 | /// \p InsertedInsts keeps track of all the instructions inserted by the | |||
3420 | /// other CodeGenPrepare optimizations. This information is important | |||
3421 | /// because we do not want to promote these instructions as CodeGenPrepare | |||
3422 | /// will reinsert them later. Thus creating an infinite loop: create/remove. | |||
3423 | /// \p PromotedInsts maps the instructions to their type before promotion. | |||
3424 | static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts, | |||
3425 | const TargetLowering &TLI, | |||
3426 | const InstrToOrigTy &PromotedInsts); | |||
3427 | }; | |||
3428 | ||||
3429 | } // end anonymous namespace | |||
3430 | ||||
3431 | bool TypePromotionHelper::canGetThrough(const Instruction *Inst, | |||
3432 | Type *ConsideredExtType, | |||
3433 | const InstrToOrigTy &PromotedInsts, | |||
3434 | bool IsSExt) { | |||
3435 | // The promotion helper does not know how to deal with vector types yet. | |||
3436 | // To be able to fix that, we would need to fix the places where we | |||
3437 | // statically extend, e.g., constants and such. | |||
3438 | if (Inst->getType()->isVectorTy()) | |||
3439 | return false; | |||
3440 | ||||
3441 | // We can always get through zext. | |||
3442 | if (isa<ZExtInst>(Inst)) | |||
3443 | return true; | |||
3444 | ||||
3445 | // sext(sext) is ok too. | |||
3446 | if (IsSExt && isa<SExtInst>(Inst)) | |||
3447 | return true; | |||
3448 | ||||
3449 | // We can get through binary operator, if it is legal. In other words, the | |||
3450 | // binary operator must have a nuw or nsw flag. | |||
3451 | const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst); | |||
3452 | if (BinOp && isa<OverflowingBinaryOperator>(BinOp) && | |||
3453 | ((!IsSExt && BinOp->hasNoUnsignedWrap()) || | |||
3454 | (IsSExt && BinOp->hasNoSignedWrap()))) | |||
3455 | return true; | |||
3456 | ||||
3457 | // ext(and(opnd, cst)) --> and(ext(opnd), ext(cst)) | |||
3458 | if ((Inst->getOpcode() == Instruction::And || | |||
3459 | Inst->getOpcode() == Instruction::Or)) | |||
3460 | return true; | |||
3461 | ||||
3462 | // ext(xor(opnd, cst)) --> xor(ext(opnd), ext(cst)) | |||
3463 | if (Inst->getOpcode() == Instruction::Xor) { | |||
3464 | const ConstantInt *Cst = dyn_cast<ConstantInt>(Inst->getOperand(1)); | |||
3465 | // Make sure it is not a NOT. | |||
3466 | if (Cst && !Cst->getValue().isAllOnesValue()) | |||
3467 | return true; | |||
3468 | } | |||
3469 | ||||
3470 | // zext(shrl(opnd, cst)) --> shrl(zext(opnd), zext(cst)) | |||
3471 | // It may change a poisoned value into a regular value, like | |||
3472 | // zext i32 (shrl i8 %val, 12) --> shrl i32 (zext i8 %val), 12 | |||
3473 | // poisoned value regular value | |||
3474 | // It should be OK since undef covers valid value. | |||
3475 | if (Inst->getOpcode() == Instruction::LShr && !IsSExt) | |||
3476 | return true; | |||
3477 | ||||
3478 | // and(ext(shl(opnd, cst)), cst) --> and(shl(ext(opnd), ext(cst)), cst) | |||
3479 | // It may change a poisoned value into a regular value, like | |||
3480 | // zext i32 (shl i8 %val, 12) --> shl i32 (zext i8 %val), 12 | |||
3481 | // poisoned value regular value | |||
3482 | // It should be OK since undef covers valid value. | |||
3483 | if (Inst->getOpcode() == Instruction::Shl && Inst->hasOneUse()) { | |||
3484 | const Instruction *ExtInst = | |||
3485 | dyn_cast<const Instruction>(*Inst->user_begin()); | |||
3486 | if (ExtInst->hasOneUse()) { | |||
3487 | const Instruction *AndInst = | |||
3488 | dyn_cast<const Instruction>(*ExtInst->user_begin()); | |||
3489 | if (AndInst && AndInst->getOpcode() == Instruction::And) { | |||
3490 | const ConstantInt *Cst = dyn_cast<ConstantInt>(AndInst->getOperand(1)); | |||
3491 | if (Cst && | |||
3492 | Cst->getValue().isIntN(Inst->getType()->getIntegerBitWidth())) | |||
3493 | return true; | |||
3494 | } | |||
3495 | } | |||
3496 | } | |||
3497 | ||||
3498 | // Check if we can do the following simplification. | |||
3499 | // ext(trunc(opnd)) --> ext(opnd) | |||
3500 | if (!isa<TruncInst>(Inst)) | |||
3501 | return false; | |||
3502 | ||||
3503 | Value *OpndVal = Inst->getOperand(0); | |||
3504 | // Check if we can use this operand in the extension. | |||
3505 | // If the type is larger than the result type of the extension, we cannot. | |||
3506 | if (!OpndVal->getType()->isIntegerTy() || | |||
3507 | OpndVal->getType()->getIntegerBitWidth() > | |||
3508 | ConsideredExtType->getIntegerBitWidth()) | |||
3509 | return false; | |||
3510 | ||||
3511 | // If the operand of the truncate is not an instruction, we will not have | |||
3512 | // any information on the dropped bits. | |||
3513 | // (Actually we could for constant but it is not worth the extra logic). | |||
3514 | Instruction *Opnd = dyn_cast<Instruction>(OpndVal); | |||
3515 | if (!Opnd) | |||
3516 | return false; | |||
3517 | ||||
3518 | // Check if the source of the type is narrow enough. | |||
3519 | // I.e., check that trunc just drops extended bits of the same kind of | |||
3520 | // the extension. | |||
3521 | // #1 get the type of the operand and check the kind of the extended bits. | |||
3522 | const Type *OpndType = getOrigType(PromotedInsts, Opnd, IsSExt); | |||
3523 | if (OpndType) | |||
3524 | ; | |||
3525 | else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd))) | |||
3526 | OpndType = Opnd->getOperand(0)->getType(); | |||
3527 | else | |||
3528 | return false; | |||
3529 | ||||
3530 | // #2 check that the truncate just drops extended bits. | |||
3531 | return Inst->getType()->getIntegerBitWidth() >= | |||
3532 | OpndType->getIntegerBitWidth(); | |||
3533 | } | |||
3534 | ||||
3535 | TypePromotionHelper::Action TypePromotionHelper::getAction( | |||
3536 | Instruction *Ext, const SetOfInstrs &InsertedInsts, | |||
3537 | const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) { | |||
3538 | assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&(((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && "Unexpected instruction type") ? static_cast<void> (0) : __assert_fail ("(isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && \"Unexpected instruction type\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 3539, __PRETTY_FUNCTION__)) | |||
3539 | "Unexpected instruction type")(((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && "Unexpected instruction type") ? static_cast<void> (0) : __assert_fail ("(isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && \"Unexpected instruction type\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 3539, __PRETTY_FUNCTION__)); | |||
3540 | Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0)); | |||
3541 | Type *ExtTy = Ext->getType(); | |||
3542 | bool IsSExt = isa<SExtInst>(Ext); | |||
3543 | // If the operand of the extension is not an instruction, we cannot | |||
3544 | // get through. | |||
3545 | // If it, check we can get through. | |||
3546 | if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt)) | |||
3547 | return nullptr; | |||
3548 | ||||
3549 | // Do not promote if the operand has been added by codegenprepare. | |||
3550 | // Otherwise, it means we are undoing an optimization that is likely to be | |||
3551 | // redone, thus causing potential infinite loop. | |||
3552 | if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd)) | |||
3553 | return nullptr; | |||
3554 | ||||
3555 | // SExt or Trunc instructions. | |||
3556 | // Return the related handler. | |||
3557 | if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) || | |||
3558 | isa<ZExtInst>(ExtOpnd)) | |||
3559 | return promoteOperandForTruncAndAnyExt; | |||
3560 | ||||
3561 | // Regular instruction. | |||
3562 | // Abort early if we will have to insert non-free instructions. | |||
3563 | if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType())) | |||
3564 | return nullptr; | |||
3565 | return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther; | |||
3566 | } | |||
3567 | ||||
3568 | Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt( | |||
3569 | Instruction *SExt, TypePromotionTransaction &TPT, | |||
3570 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, | |||
3571 | SmallVectorImpl<Instruction *> *Exts, | |||
3572 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { | |||
3573 | // By construction, the operand of SExt is an instruction. Otherwise we cannot | |||
3574 | // get through it and this method should not be called. | |||
3575 | Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); | |||
3576 | Value *ExtVal = SExt; | |||
3577 | bool HasMergedNonFreeExt = false; | |||
3578 | if (isa<ZExtInst>(SExtOpnd)) { | |||
3579 | // Replace s|zext(zext(opnd)) | |||
3580 | // => zext(opnd). | |||
3581 | HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd); | |||
3582 | Value *ZExt = | |||
3583 | TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType()); | |||
3584 | TPT.replaceAllUsesWith(SExt, ZExt); | |||
3585 | TPT.eraseInstruction(SExt); | |||
3586 | ExtVal = ZExt; | |||
3587 | } else { | |||
3588 | // Replace z|sext(trunc(opnd)) or sext(sext(opnd)) | |||
3589 | // => z|sext(opnd). | |||
3590 | TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0)); | |||
3591 | } | |||
3592 | CreatedInstsCost = 0; | |||
3593 | ||||
3594 | // Remove dead code. | |||
3595 | if (SExtOpnd->use_empty()) | |||
3596 | TPT.eraseInstruction(SExtOpnd); | |||
3597 | ||||
3598 | // Check if the extension is still needed. | |||
3599 | Instruction *ExtInst = dyn_cast<Instruction>(ExtVal); | |||
3600 | if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) { | |||
3601 | if (ExtInst) { | |||
3602 | if (Exts) | |||
3603 | Exts->push_back(ExtInst); | |||
3604 | CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt; | |||
3605 | } | |||
3606 | return ExtVal; | |||
3607 | } | |||
3608 | ||||
3609 | // At this point we have: ext ty opnd to ty. | |||
3610 | // Reassign the uses of ExtInst to the opnd and remove ExtInst. | |||
3611 | Value *NextVal = ExtInst->getOperand(0); | |||
3612 | TPT.eraseInstruction(ExtInst, NextVal); | |||
3613 | return NextVal; | |||
3614 | } | |||
3615 | ||||
3616 | Value *TypePromotionHelper::promoteOperandForOther( | |||
3617 | Instruction *Ext, TypePromotionTransaction &TPT, | |||
3618 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, | |||
3619 | SmallVectorImpl<Instruction *> *Exts, | |||
3620 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI, | |||
3621 | bool IsSExt) { | |||
3622 | // By construction, the operand of Ext is an instruction. Otherwise we cannot | |||
3623 | // get through it and this method should not be called. | |||
3624 | Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0)); | |||
3625 | CreatedInstsCost = 0; | |||
3626 | if (!ExtOpnd->hasOneUse()) { | |||
3627 | // ExtOpnd will be promoted. | |||
3628 | // All its uses, but Ext, will need to use a truncated value of the | |||
3629 | // promoted version. | |||
3630 | // Create the truncate now. | |||
3631 | Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType()); | |||
3632 | if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) { | |||
3633 | // Insert it just after the definition. | |||
3634 | ITrunc->moveAfter(ExtOpnd); | |||
3635 | if (Truncs) | |||
3636 | Truncs->push_back(ITrunc); | |||
3637 | } | |||
3638 | ||||
3639 | TPT.replaceAllUsesWith(ExtOpnd, Trunc); | |||
3640 | // Restore the operand of Ext (which has been replaced by the previous call | |||
3641 | // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext. | |||
3642 | TPT.setOperand(Ext, 0, ExtOpnd); | |||
3643 | } | |||
3644 | ||||
3645 | // Get through the Instruction: | |||
3646 | // 1. Update its type. | |||
3647 | // 2. Replace the uses of Ext by Inst. | |||
3648 | // 3. Extend each operand that needs to be extended. | |||
3649 | ||||
3650 | // Remember the original type of the instruction before promotion. | |||
3651 | // This is useful to know that the high bits are sign extended bits. | |||
3652 | addPromotedInst(PromotedInsts, ExtOpnd, IsSExt); | |||
3653 | // Step #1. | |||
3654 | TPT.mutateType(ExtOpnd, Ext->getType()); | |||
3655 | // Step #2. | |||
3656 | TPT.replaceAllUsesWith(Ext, ExtOpnd); | |||
3657 | // Step #3. | |||
3658 | Instruction *ExtForOpnd = Ext; | |||
3659 | ||||
3660 | LLVM_DEBUG(dbgs() << "Propagate Ext to operands\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Propagate Ext to operands\n" ; } } while (false); | |||
3661 | for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx; | |||
3662 | ++OpIdx) { | |||
3663 | LLVM_DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Operand:\n" << * (ExtOpnd->getOperand(OpIdx)) << '\n'; } } while (false ); | |||
3664 | if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() || | |||
3665 | !shouldExtOperand(ExtOpnd, OpIdx)) { | |||
3666 | LLVM_DEBUG(dbgs() << "No need to propagate\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "No need to propagate\n" ; } } while (false); | |||
3667 | continue; | |||
3668 | } | |||
3669 | // Check if we can statically extend the operand. | |||
3670 | Value *Opnd = ExtOpnd->getOperand(OpIdx); | |||
3671 | if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) { | |||
3672 | LLVM_DEBUG(dbgs() << "Statically extend\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Statically extend\n"; } } while (false); | |||
3673 | unsigned BitWidth = Ext->getType()->getIntegerBitWidth(); | |||
3674 | APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth) | |||
3675 | : Cst->getValue().zext(BitWidth); | |||
3676 | TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal)); | |||
3677 | continue; | |||
3678 | } | |||
3679 | // UndefValue are typed, so we have to statically sign extend them. | |||
3680 | if (isa<UndefValue>(Opnd)) { | |||
3681 | LLVM_DEBUG(dbgs() << "Statically extend\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Statically extend\n"; } } while (false); | |||
3682 | TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType())); | |||
3683 | continue; | |||
3684 | } | |||
3685 | ||||
3686 | // Otherwise we have to explicitly sign extend the operand. | |||
3687 | // Check if Ext was reused to extend an operand. | |||
3688 | if (!ExtForOpnd) { | |||
3689 | // If yes, create a new one. | |||
3690 | LLVM_DEBUG(dbgs() << "More operands to ext\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "More operands to ext\n" ; } } while (false); | |||
3691 | Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType()) | |||
3692 | : TPT.createZExt(Ext, Opnd, Ext->getType()); | |||
3693 | if (!isa<Instruction>(ValForExtOpnd)) { | |||
3694 | TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd); | |||
3695 | continue; | |||
3696 | } | |||
3697 | ExtForOpnd = cast<Instruction>(ValForExtOpnd); | |||
3698 | } | |||
3699 | if (Exts) | |||
3700 | Exts->push_back(ExtForOpnd); | |||
3701 | TPT.setOperand(ExtForOpnd, 0, Opnd); | |||
3702 | ||||
3703 | // Move the sign extension before the insertion point. | |||
3704 | TPT.moveBefore(ExtForOpnd, ExtOpnd); | |||
3705 | TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd); | |||
3706 | CreatedInstsCost += !TLI.isExtFree(ExtForOpnd); | |||
3707 | // If more sext are required, new instructions will have to be created. | |||
3708 | ExtForOpnd = nullptr; | |||
3709 | } | |||
3710 | if (ExtForOpnd == Ext) { | |||
3711 | LLVM_DEBUG(dbgs() << "Extension is useless now\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Extension is useless now\n" ; } } while (false); | |||
3712 | TPT.eraseInstruction(Ext); | |||
3713 | } | |||
3714 | return ExtOpnd; | |||
3715 | } | |||
3716 | ||||
3717 | /// Check whether or not promoting an instruction to a wider type is profitable. | |||
3718 | /// \p NewCost gives the cost of extension instructions created by the | |||
3719 | /// promotion. | |||
3720 | /// \p OldCost gives the cost of extension instructions before the promotion | |||
3721 | /// plus the number of instructions that have been | |||
3722 | /// matched in the addressing mode the promotion. | |||
3723 | /// \p PromotedOperand is the value that has been promoted. | |||
3724 | /// \return True if the promotion is profitable, false otherwise. | |||
3725 | bool AddressingModeMatcher::isPromotionProfitable( | |||
3726 | unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const { | |||
3727 | LLVM_DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCostdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost << '\n'; } } while (false) | |||
3728 | << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost << '\n'; } } while (false); | |||
3729 | // The cost of the new extensions is greater than the cost of the | |||
3730 | // old extension plus what we folded. | |||
3731 | // This is not profitable. | |||
3732 | if (NewCost > OldCost) | |||
3733 | return false; | |||
3734 | if (NewCost < OldCost) | |||
3735 | return true; | |||
3736 | // The promotion is neutral but it may help folding the sign extension in | |||
3737 | // loads for instance. | |||
3738 | // Check that we did not create an illegal instruction. | |||
3739 | return isPromotedInstructionLegal(TLI, DL, PromotedOperand); | |||
3740 | } | |||
3741 | ||||
3742 | /// Given an instruction or constant expr, see if we can fold the operation | |||
3743 | /// into the addressing mode. If so, update the addressing mode and return | |||
3744 | /// true, otherwise return false without modifying AddrMode. | |||
3745 | /// If \p MovedAway is not NULL, it contains the information of whether or | |||
3746 | /// not AddrInst has to be folded into the addressing mode on success. | |||
3747 | /// If \p MovedAway == true, \p AddrInst will not be part of the addressing | |||
3748 | /// because it has been moved away. | |||
3749 | /// Thus AddrInst must not be added in the matched instructions. | |||
3750 | /// This state can happen when AddrInst is a sext, since it may be moved away. | |||
3751 | /// Therefore, AddrInst may not be valid when MovedAway is true and it must | |||
3752 | /// not be referenced anymore. | |||
3753 | bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode, | |||
3754 | unsigned Depth, | |||
3755 | bool *MovedAway) { | |||
3756 | // Avoid exponential behavior on extremely deep expression trees. | |||
3757 | if (Depth >= 5) return false; | |||
3758 | ||||
3759 | // By default, all matched instructions stay in place. | |||
3760 | if (MovedAway) | |||
3761 | *MovedAway = false; | |||
3762 | ||||
3763 | switch (Opcode) { | |||
3764 | case Instruction::PtrToInt: | |||
3765 | // PtrToInt is always a noop, as we know that the int type is pointer sized. | |||
3766 | return matchAddr(AddrInst->getOperand(0), Depth); | |||
3767 | case Instruction::IntToPtr: { | |||
3768 | auto AS = AddrInst->getType()->getPointerAddressSpace(); | |||
3769 | auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); | |||
3770 | // This inttoptr is a no-op if the integer type is pointer sized. | |||
3771 | if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy) | |||
3772 | return matchAddr(AddrInst->getOperand(0), Depth); | |||
3773 | return false; | |||
3774 | } | |||
3775 | case Instruction::BitCast: | |||
3776 | // BitCast is always a noop, and we can handle it as long as it is | |||
3777 | // int->int or pointer->pointer (we don't want int<->fp or something). | |||
3778 | if (AddrInst->getOperand(0)->getType()->isIntOrPtrTy() && | |||
3779 | // Don't touch identity bitcasts. These were probably put here by LSR, | |||
3780 | // and we don't want to mess around with them. Assume it knows what it | |||
3781 | // is doing. | |||
3782 | AddrInst->getOperand(0)->getType() != AddrInst->getType()) | |||
3783 | return matchAddr(AddrInst->getOperand(0), Depth); | |||
3784 | return false; | |||
3785 | case Instruction::AddrSpaceCast: { | |||
3786 | unsigned SrcAS | |||
3787 | = AddrInst->getOperand(0)->getType()->getPointerAddressSpace(); | |||
3788 | unsigned DestAS = AddrInst->getType()->getPointerAddressSpace(); | |||
3789 | if (TLI.isNoopAddrSpaceCast(SrcAS, DestAS)) | |||
3790 | return matchAddr(AddrInst->getOperand(0), Depth); | |||
3791 | return false; | |||
3792 | } | |||
3793 | case Instruction::Add: { | |||
3794 | // Check to see if we can merge in the RHS then the LHS. If so, we win. | |||
3795 | ExtAddrMode BackupAddrMode = AddrMode; | |||
3796 | unsigned OldSize = AddrModeInsts.size(); | |||
3797 | // Start a transaction at this point. | |||
3798 | // The LHS may match but not the RHS. | |||
3799 | // Therefore, we need a higher level restoration point to undo partially | |||
3800 | // matched operation. | |||
3801 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
3802 | TPT.getRestorationPoint(); | |||
3803 | ||||
3804 | if (matchAddr(AddrInst->getOperand(1), Depth+1) && | |||
3805 | matchAddr(AddrInst->getOperand(0), Depth+1)) | |||
3806 | return true; | |||
3807 | ||||
3808 | // Restore the old addr mode info. | |||
3809 | AddrMode = BackupAddrMode; | |||
3810 | AddrModeInsts.resize(OldSize); | |||
3811 | TPT.rollback(LastKnownGood); | |||
3812 | ||||
3813 | // Otherwise this was over-aggressive. Try merging in the LHS then the RHS. | |||
3814 | if (matchAddr(AddrInst->getOperand(0), Depth+1) && | |||
3815 | matchAddr(AddrInst->getOperand(1), Depth+1)) | |||
3816 | return true; | |||
3817 | ||||
3818 | // Otherwise we definitely can't merge the ADD in. | |||
3819 | AddrMode = BackupAddrMode; | |||
3820 | AddrModeInsts.resize(OldSize); | |||
3821 | TPT.rollback(LastKnownGood); | |||
3822 | break; | |||
3823 | } | |||
3824 | //case Instruction::Or: | |||
3825 | // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. | |||
3826 | //break; | |||
3827 | case Instruction::Mul: | |||
3828 | case Instruction::Shl: { | |||
3829 | // Can only handle X*C and X << C. | |||
3830 | ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1)); | |||
3831 | if (!RHS || RHS->getBitWidth() > 64) | |||
3832 | return false; | |||
3833 | int64_t Scale = RHS->getSExtValue(); | |||
3834 | if (Opcode == Instruction::Shl) | |||
3835 | Scale = 1LL << Scale; | |||
3836 | ||||
3837 | return matchScaledValue(AddrInst->getOperand(0), Scale, Depth); | |||
3838 | } | |||
3839 | case Instruction::GetElementPtr: { | |||
3840 | // Scan the GEP. We check it if it contains constant offsets and at most | |||
3841 | // one variable offset. | |||
3842 | int VariableOperand = -1; | |||
3843 | unsigned VariableScale = 0; | |||
3844 | ||||
3845 | int64_t ConstantOffset = 0; | |||
3846 | gep_type_iterator GTI = gep_type_begin(AddrInst); | |||
3847 | for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { | |||
3848 | if (StructType *STy = GTI.getStructTypeOrNull()) { | |||
3849 | const StructLayout *SL = DL.getStructLayout(STy); | |||
3850 | unsigned Idx = | |||
3851 | cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue(); | |||
3852 | ConstantOffset += SL->getElementOffset(Idx); | |||
3853 | } else { | |||
3854 | uint64_t TypeSize = DL.getTypeAllocSize(GTI.getIndexedType()); | |||
3855 | if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) { | |||
3856 | const APInt &CVal = CI->getValue(); | |||
3857 | if (CVal.getMinSignedBits() <= 64) { | |||
3858 | ConstantOffset += CVal.getSExtValue() * TypeSize; | |||
3859 | continue; | |||
3860 | } | |||
3861 | } | |||
3862 | if (TypeSize) { // Scales of zero don't do anything. | |||
3863 | // We only allow one variable index at the moment. | |||
3864 | if (VariableOperand != -1) | |||
3865 | return false; | |||
3866 | ||||
3867 | // Remember the variable index. | |||
3868 | VariableOperand = i; | |||
3869 | VariableScale = TypeSize; | |||
3870 | } | |||
3871 | } | |||
3872 | } | |||
3873 | ||||
3874 | // A common case is for the GEP to only do a constant offset. In this case, | |||
3875 | // just add it to the disp field and check validity. | |||
3876 | if (VariableOperand == -1) { | |||
3877 | AddrMode.BaseOffs += ConstantOffset; | |||
3878 | if (ConstantOffset == 0 || | |||
3879 | TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) { | |||
3880 | // Check to see if we can fold the base pointer in too. | |||
3881 | if (matchAddr(AddrInst->getOperand(0), Depth+1)) | |||
3882 | return true; | |||
3883 | } else if (EnableGEPOffsetSplit && isa<GetElementPtrInst>(AddrInst) && | |||
3884 | TLI.shouldConsiderGEPOffsetSplit() && Depth == 0 && | |||
3885 | ConstantOffset > 0) { | |||
3886 | // Record GEPs with non-zero offsets as candidates for splitting in the | |||
3887 | // event that the offset cannot fit into the r+i addressing mode. | |||
3888 | // Simple and common case that only one GEP is used in calculating the | |||
3889 | // address for the memory access. | |||
3890 | Value *Base = AddrInst->getOperand(0); | |||
3891 | auto *BaseI = dyn_cast<Instruction>(Base); | |||
3892 | auto *GEP = cast<GetElementPtrInst>(AddrInst); | |||
3893 | if (isa<Argument>(Base) || isa<GlobalValue>(Base) || | |||
3894 | (BaseI && !isa<CastInst>(BaseI) && | |||
3895 | !isa<GetElementPtrInst>(BaseI))) { | |||
3896 | // If the base is an instruction, make sure the GEP is not in the same | |||
3897 | // basic block as the base. If the base is an argument or global | |||
3898 | // value, make sure the GEP is not in the entry block. Otherwise, | |||
3899 | // instruction selection can undo the split. Also make sure the | |||
3900 | // parent block allows inserting non-PHI instructions before the | |||
3901 | // terminator. | |||
3902 | BasicBlock *Parent = | |||
3903 | BaseI ? BaseI->getParent() : &GEP->getFunction()->getEntryBlock(); | |||
3904 | if (GEP->getParent() != Parent && !Parent->getTerminator()->isEHPad()) | |||
3905 | LargeOffsetGEP = std::make_pair(GEP, ConstantOffset); | |||
3906 | } | |||
3907 | } | |||
3908 | AddrMode.BaseOffs -= ConstantOffset; | |||
3909 | return false; | |||
3910 | } | |||
3911 | ||||
3912 | // Save the valid addressing mode in case we can't match. | |||
3913 | ExtAddrMode BackupAddrMode = AddrMode; | |||
3914 | unsigned OldSize = AddrModeInsts.size(); | |||
3915 | ||||
3916 | // See if the scale and offset amount is valid for this target. | |||
3917 | AddrMode.BaseOffs += ConstantOffset; | |||
3918 | ||||
3919 | // Match the base operand of the GEP. | |||
3920 | if (!matchAddr(AddrInst->getOperand(0), Depth+1)) { | |||
3921 | // If it couldn't be matched, just stuff the value in a register. | |||
3922 | if (AddrMode.HasBaseReg) { | |||
3923 | AddrMode = BackupAddrMode; | |||
3924 | AddrModeInsts.resize(OldSize); | |||
3925 | return false; | |||
3926 | } | |||
3927 | AddrMode.HasBaseReg = true; | |||
3928 | AddrMode.BaseReg = AddrInst->getOperand(0); | |||
3929 | } | |||
3930 | ||||
3931 | // Match the remaining variable portion of the GEP. | |||
3932 | if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale, | |||
3933 | Depth)) { | |||
3934 | // If it couldn't be matched, try stuffing the base into a register | |||
3935 | // instead of matching it, and retrying the match of the scale. | |||
3936 | AddrMode = BackupAddrMode; | |||
3937 | AddrModeInsts.resize(OldSize); | |||
3938 | if (AddrMode.HasBaseReg) | |||
3939 | return false; | |||
3940 | AddrMode.HasBaseReg = true; | |||
3941 | AddrMode.BaseReg = AddrInst->getOperand(0); | |||
3942 | AddrMode.BaseOffs += ConstantOffset; | |||
3943 | if (!matchScaledValue(AddrInst->getOperand(VariableOperand), | |||
3944 | VariableScale, Depth)) { | |||
3945 | // If even that didn't work, bail. | |||
3946 | AddrMode = BackupAddrMode; | |||
3947 | AddrModeInsts.resize(OldSize); | |||
3948 | return false; | |||
3949 | } | |||
3950 | } | |||
3951 | ||||
3952 | return true; | |||
3953 | } | |||
3954 | case Instruction::SExt: | |||
3955 | case Instruction::ZExt: { | |||
3956 | Instruction *Ext = dyn_cast<Instruction>(AddrInst); | |||
3957 | if (!Ext) | |||
3958 | return false; | |||
3959 | ||||
3960 | // Try to move this ext out of the way of the addressing mode. | |||
3961 | // Ask for a method for doing so. | |||
3962 | TypePromotionHelper::Action TPH = | |||
3963 | TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts); | |||
3964 | if (!TPH) | |||
3965 | return false; | |||
3966 | ||||
3967 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
3968 | TPT.getRestorationPoint(); | |||
3969 | unsigned CreatedInstsCost = 0; | |||
3970 | unsigned ExtCost = !TLI.isExtFree(Ext); | |||
3971 | Value *PromotedOperand = | |||
3972 | TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI); | |||
3973 | // SExt has been moved away. | |||
3974 | // Thus either it will be rematched later in the recursive calls or it is | |||
3975 | // gone. Anyway, we must not fold it into the addressing mode at this point. | |||
3976 | // E.g., | |||
3977 | // op = add opnd, 1 | |||
3978 | // idx = ext op | |||
3979 | // addr = gep base, idx | |||
3980 | // is now: | |||
3981 | // promotedOpnd = ext opnd <- no match here | |||
3982 | // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls) | |||
3983 | // addr = gep base, op <- match | |||
3984 | if (MovedAway) | |||
3985 | *MovedAway = true; | |||
3986 | ||||
3987 | assert(PromotedOperand &&((PromotedOperand && "TypePromotionHelper should have filtered out those cases" ) ? static_cast<void> (0) : __assert_fail ("PromotedOperand && \"TypePromotionHelper should have filtered out those cases\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 3988, __PRETTY_FUNCTION__)) | |||
3988 | "TypePromotionHelper should have filtered out those cases")((PromotedOperand && "TypePromotionHelper should have filtered out those cases" ) ? static_cast<void> (0) : __assert_fail ("PromotedOperand && \"TypePromotionHelper should have filtered out those cases\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 3988, __PRETTY_FUNCTION__)); | |||
3989 | ||||
3990 | ExtAddrMode BackupAddrMode = AddrMode; | |||
3991 | unsigned OldSize = AddrModeInsts.size(); | |||
3992 | ||||
3993 | if (!matchAddr(PromotedOperand, Depth) || | |||
3994 | // The total of the new cost is equal to the cost of the created | |||
3995 | // instructions. | |||
3996 | // The total of the old cost is equal to the cost of the extension plus | |||
3997 | // what we have saved in the addressing mode. | |||
3998 | !isPromotionProfitable(CreatedInstsCost, | |||
3999 | ExtCost + (AddrModeInsts.size() - OldSize), | |||
4000 | PromotedOperand)) { | |||
4001 | AddrMode = BackupAddrMode; | |||
4002 | AddrModeInsts.resize(OldSize); | |||
4003 | LLVM_DEBUG(dbgs() << "Sign extension does not pay off: rollback\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Sign extension does not pay off: rollback\n" ; } } while (false); | |||
4004 | TPT.rollback(LastKnownGood); | |||
4005 | return false; | |||
4006 | } | |||
4007 | return true; | |||
4008 | } | |||
4009 | } | |||
4010 | return false; | |||
4011 | } | |||
4012 | ||||
4013 | /// If we can, try to add the value of 'Addr' into the current addressing mode. | |||
4014 | /// If Addr can't be added to AddrMode this returns false and leaves AddrMode | |||
4015 | /// unmodified. This assumes that Addr is either a pointer type or intptr_t | |||
4016 | /// for the target. | |||
4017 | /// | |||
4018 | bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) { | |||
4019 | // Start a transaction at this point that we will rollback if the matching | |||
4020 | // fails. | |||
4021 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
4022 | TPT.getRestorationPoint(); | |||
4023 | if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) { | |||
4024 | // Fold in immediates if legal for the target. | |||
4025 | AddrMode.BaseOffs += CI->getSExtValue(); | |||
4026 | if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) | |||
4027 | return true; | |||
4028 | AddrMode.BaseOffs -= CI->getSExtValue(); | |||
4029 | } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) { | |||
4030 | // If this is a global variable, try to fold it into the addressing mode. | |||
4031 | if (!AddrMode.BaseGV) { | |||
4032 | AddrMode.BaseGV = GV; | |||
4033 | if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) | |||
4034 | return true; | |||
4035 | AddrMode.BaseGV = nullptr; | |||
4036 | } | |||
4037 | } else if (Instruction *I = dyn_cast<Instruction>(Addr)) { | |||
4038 | ExtAddrMode BackupAddrMode = AddrMode; | |||
4039 | unsigned OldSize = AddrModeInsts.size(); | |||
4040 | ||||
4041 | // Check to see if it is possible to fold this operation. | |||
4042 | bool MovedAway = false; | |||
4043 | if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) { | |||
4044 | // This instruction may have been moved away. If so, there is nothing | |||
4045 | // to check here. | |||
4046 | if (MovedAway) | |||
4047 | return true; | |||
4048 | // Okay, it's possible to fold this. Check to see if it is actually | |||
4049 | // *profitable* to do so. We use a simple cost model to avoid increasing | |||
4050 | // register pressure too much. | |||
4051 | if (I->hasOneUse() || | |||
4052 | isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) { | |||
4053 | AddrModeInsts.push_back(I); | |||
4054 | return true; | |||
4055 | } | |||
4056 | ||||
4057 | // It isn't profitable to do this, roll back. | |||
4058 | //cerr << "NOT FOLDING: " << *I; | |||
4059 | AddrMode = BackupAddrMode; | |||
4060 | AddrModeInsts.resize(OldSize); | |||
4061 | TPT.rollback(LastKnownGood); | |||
4062 | } | |||
4063 | } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) { | |||
4064 | if (matchOperationAddr(CE, CE->getOpcode(), Depth)) | |||
4065 | return true; | |||
4066 | TPT.rollback(LastKnownGood); | |||
4067 | } else if (isa<ConstantPointerNull>(Addr)) { | |||
4068 | // Null pointer gets folded without affecting the addressing mode. | |||
4069 | return true; | |||
4070 | } | |||
4071 | ||||
4072 | // Worse case, the target should support [reg] addressing modes. :) | |||
4073 | if (!AddrMode.HasBaseReg) { | |||
4074 | AddrMode.HasBaseReg = true; | |||
4075 | AddrMode.BaseReg = Addr; | |||
4076 | // Still check for legality in case the target supports [imm] but not [i+r]. | |||
4077 | if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) | |||
4078 | return true; | |||
4079 | AddrMode.HasBaseReg = false; | |||
4080 | AddrMode.BaseReg = nullptr; | |||
4081 | } | |||
4082 | ||||
4083 | // If the base register is already taken, see if we can do [r+r]. | |||
4084 | if (AddrMode.Scale == 0) { | |||
4085 | AddrMode.Scale = 1; | |||
4086 | AddrMode.ScaledReg = Addr; | |||
4087 | if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) | |||
4088 | return true; | |||
4089 | AddrMode.Scale = 0; | |||
4090 | AddrMode.ScaledReg = nullptr; | |||
4091 | } | |||
4092 | // Couldn't match. | |||
4093 | TPT.rollback(LastKnownGood); | |||
4094 | return false; | |||
4095 | } | |||
4096 | ||||
4097 | /// Check to see if all uses of OpVal by the specified inline asm call are due | |||
4098 | /// to memory operands. If so, return true, otherwise return false. | |||
4099 | static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, | |||
4100 | const TargetLowering &TLI, | |||
4101 | const TargetRegisterInfo &TRI) { | |||
4102 | const Function *F = CI->getFunction(); | |||
4103 | TargetLowering::AsmOperandInfoVector TargetConstraints = | |||
4104 | TLI.ParseConstraints(F->getParent()->getDataLayout(), &TRI, | |||
4105 | ImmutableCallSite(CI)); | |||
4106 | ||||
4107 | for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { | |||
4108 | TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; | |||
4109 | ||||
4110 | // Compute the constraint code and ConstraintType to use. | |||
4111 | TLI.ComputeConstraintToUse(OpInfo, SDValue()); | |||
4112 | ||||
4113 | // If this asm operand is our Value*, and if it isn't an indirect memory | |||
4114 | // operand, we can't fold it! | |||
4115 | if (OpInfo.CallOperandVal == OpVal && | |||
4116 | (OpInfo.ConstraintType != TargetLowering::C_Memory || | |||
4117 | !OpInfo.isIndirect)) | |||
4118 | return false; | |||
4119 | } | |||
4120 | ||||
4121 | return true; | |||
4122 | } | |||
4123 | ||||
4124 | // Max number of memory uses to look at before aborting the search to conserve | |||
4125 | // compile time. | |||
4126 | static constexpr int MaxMemoryUsesToScan = 20; | |||
4127 | ||||
4128 | /// Recursively walk all the uses of I until we find a memory use. | |||
4129 | /// If we find an obviously non-foldable instruction, return true. | |||
4130 | /// Add the ultimately found memory instructions to MemoryUses. | |||
4131 | static bool FindAllMemoryUses( | |||
4132 | Instruction *I, | |||
4133 | SmallVectorImpl<std::pair<Instruction *, unsigned>> &MemoryUses, | |||
4134 | SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI, | |||
4135 | const TargetRegisterInfo &TRI, int SeenInsts = 0) { | |||
4136 | // If we already considered this instruction, we're done. | |||
4137 | if (!ConsideredInsts.insert(I).second) | |||
4138 | return false; | |||
4139 | ||||
4140 | // If this is an obviously unfoldable instruction, bail out. | |||
4141 | if (!MightBeFoldableInst(I)) | |||
4142 | return true; | |||
4143 | ||||
4144 | const bool OptSize = I->getFunction()->optForSize(); | |||
4145 | ||||
4146 | // Loop over all the uses, recursively processing them. | |||
4147 | for (Use &U : I->uses()) { | |||
4148 | // Conservatively return true if we're seeing a large number or a deep chain | |||
4149 | // of users. This avoids excessive compilation times in pathological cases. | |||
4150 | if (SeenInsts++ >= MaxMemoryUsesToScan) | |||
4151 | return true; | |||
4152 | ||||
4153 | Instruction *UserI = cast<Instruction>(U.getUser()); | |||
4154 | if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) { | |||
4155 | MemoryUses.push_back(std::make_pair(LI, U.getOperandNo())); | |||
4156 | continue; | |||
4157 | } | |||
4158 | ||||
4159 | if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) { | |||
4160 | unsigned opNo = U.getOperandNo(); | |||
4161 | if (opNo != StoreInst::getPointerOperandIndex()) | |||
4162 | return true; // Storing addr, not into addr. | |||
4163 | MemoryUses.push_back(std::make_pair(SI, opNo)); | |||
4164 | continue; | |||
4165 | } | |||
4166 | ||||
4167 | if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) { | |||
4168 | unsigned opNo = U.getOperandNo(); | |||
4169 | if (opNo != AtomicRMWInst::getPointerOperandIndex()) | |||
4170 | return true; // Storing addr, not into addr. | |||
4171 | MemoryUses.push_back(std::make_pair(RMW, opNo)); | |||
4172 | continue; | |||
4173 | } | |||
4174 | ||||
4175 | if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) { | |||
4176 | unsigned opNo = U.getOperandNo(); | |||
4177 | if (opNo != AtomicCmpXchgInst::getPointerOperandIndex()) | |||
4178 | return true; // Storing addr, not into addr. | |||
4179 | MemoryUses.push_back(std::make_pair(CmpX, opNo)); | |||
4180 | continue; | |||
4181 | } | |||
4182 | ||||
4183 | if (CallInst *CI = dyn_cast<CallInst>(UserI)) { | |||
4184 | // If this is a cold call, we can sink the addressing calculation into | |||
4185 | // the cold path. See optimizeCallInst | |||
4186 | if (!OptSize && CI->hasFnAttr(Attribute::Cold)) | |||
4187 | continue; | |||
4188 | ||||
4189 | InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue()); | |||
4190 | if (!IA) return true; | |||
4191 | ||||
4192 | // If this is a memory operand, we're cool, otherwise bail out. | |||
4193 | if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI)) | |||
4194 | return true; | |||
4195 | continue; | |||
4196 | } | |||
4197 | ||||
4198 | if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI, | |||
4199 | SeenInsts)) | |||
4200 | return true; | |||
4201 | } | |||
4202 | ||||
4203 | return false; | |||
4204 | } | |||
4205 | ||||
4206 | /// Return true if Val is already known to be live at the use site that we're | |||
4207 | /// folding it into. If so, there is no cost to include it in the addressing | |||
4208 | /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the | |||
4209 | /// instruction already. | |||
4210 | bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,Value *KnownLive1, | |||
4211 | Value *KnownLive2) { | |||
4212 | // If Val is either of the known-live values, we know it is live! | |||
4213 | if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2) | |||
4214 | return true; | |||
4215 | ||||
4216 | // All values other than instructions and arguments (e.g. constants) are live. | |||
4217 | if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true; | |||
4218 | ||||
4219 | // If Val is a constant sized alloca in the entry block, it is live, this is | |||
4220 | // true because it is just a reference to the stack/frame pointer, which is | |||
4221 | // live for the whole function. | |||
4222 | if (AllocaInst *AI = dyn_cast<AllocaInst>(Val)) | |||
4223 | if (AI->isStaticAlloca()) | |||
4224 | return true; | |||
4225 | ||||
4226 | // Check to see if this value is already used in the memory instruction's | |||
4227 | // block. If so, it's already live into the block at the very least, so we | |||
4228 | // can reasonably fold it. | |||
4229 | return Val->isUsedInBasicBlock(MemoryInst->getParent()); | |||
4230 | } | |||
4231 | ||||
4232 | /// It is possible for the addressing mode of the machine to fold the specified | |||
4233 | /// instruction into a load or store that ultimately uses it. | |||
4234 | /// However, the specified instruction has multiple uses. | |||
4235 | /// Given this, it may actually increase register pressure to fold it | |||
4236 | /// into the load. For example, consider this code: | |||
4237 | /// | |||
4238 | /// X = ... | |||
4239 | /// Y = X+1 | |||
4240 | /// use(Y) -> nonload/store | |||
4241 | /// Z = Y+1 | |||
4242 | /// load Z | |||
4243 | /// | |||
4244 | /// In this case, Y has multiple uses, and can be folded into the load of Z | |||
4245 | /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to | |||
4246 | /// be live at the use(Y) line. If we don't fold Y into load Z, we use one | |||
4247 | /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the | |||
4248 | /// number of computations either. | |||
4249 | /// | |||
4250 | /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If | |||
4251 | /// X was live across 'load Z' for other reasons, we actually *would* want to | |||
4252 | /// fold the addressing mode in the Z case. This would make Y die earlier. | |||
4253 | bool AddressingModeMatcher:: | |||
4254 | isProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore, | |||
4255 | ExtAddrMode &AMAfter) { | |||
4256 | if (IgnoreProfitability) return true; | |||
4257 | ||||
4258 | // AMBefore is the addressing mode before this instruction was folded into it, | |||
4259 | // and AMAfter is the addressing mode after the instruction was folded. Get | |||
4260 | // the set of registers referenced by AMAfter and subtract out those | |||
4261 | // referenced by AMBefore: this is the set of values which folding in this | |||
4262 | // address extends the lifetime of. | |||
4263 | // | |||
4264 | // Note that there are only two potential values being referenced here, | |||
4265 | // BaseReg and ScaleReg (global addresses are always available, as are any | |||
4266 | // folded immediates). | |||
4267 | Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg; | |||
4268 | ||||
4269 | // If the BaseReg or ScaledReg was referenced by the previous addrmode, their | |||
4270 | // lifetime wasn't extended by adding this instruction. | |||
4271 | if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg)) | |||
4272 | BaseReg = nullptr; | |||
4273 | if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg)) | |||
4274 | ScaledReg = nullptr; | |||
4275 | ||||
4276 | // If folding this instruction (and it's subexprs) didn't extend any live | |||
4277 | // ranges, we're ok with it. | |||
4278 | if (!BaseReg && !ScaledReg) | |||
4279 | return true; | |||
4280 | ||||
4281 | // If all uses of this instruction can have the address mode sunk into them, | |||
4282 | // we can remove the addressing mode and effectively trade one live register | |||
4283 | // for another (at worst.) In this context, folding an addressing mode into | |||
4284 | // the use is just a particularly nice way of sinking it. | |||
4285 | SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses; | |||
4286 | SmallPtrSet<Instruction*, 16> ConsideredInsts; | |||
4287 | if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI)) | |||
4288 | return false; // Has a non-memory, non-foldable use! | |||
4289 | ||||
4290 | // Now that we know that all uses of this instruction are part of a chain of | |||
4291 | // computation involving only operations that could theoretically be folded | |||
4292 | // into a memory use, loop over each of these memory operation uses and see | |||
4293 | // if they could *actually* fold the instruction. The assumption is that | |||
4294 | // addressing modes are cheap and that duplicating the computation involved | |||
4295 | // many times is worthwhile, even on a fastpath. For sinking candidates | |||
4296 | // (i.e. cold call sites), this serves as a way to prevent excessive code | |||
4297 | // growth since most architectures have some reasonable small and fast way to | |||
4298 | // compute an effective address. (i.e LEA on x86) | |||
4299 | SmallVector<Instruction*, 32> MatchedAddrModeInsts; | |||
4300 | for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) { | |||
4301 | Instruction *User = MemoryUses[i].first; | |||
4302 | unsigned OpNo = MemoryUses[i].second; | |||
4303 | ||||
4304 | // Get the access type of this use. If the use isn't a pointer, we don't | |||
4305 | // know what it accesses. | |||
4306 | Value *Address = User->getOperand(OpNo); | |||
4307 | PointerType *AddrTy = dyn_cast<PointerType>(Address->getType()); | |||
4308 | if (!AddrTy) | |||
4309 | return false; | |||
4310 | Type *AddressAccessTy = AddrTy->getElementType(); | |||
4311 | unsigned AS = AddrTy->getAddressSpace(); | |||
4312 | ||||
4313 | // Do a match against the root of this address, ignoring profitability. This | |||
4314 | // will tell us if the addressing mode for the memory operation will | |||
4315 | // *actually* cover the shared instruction. | |||
4316 | ExtAddrMode Result; | |||
4317 | std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr, | |||
4318 | 0); | |||
4319 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
4320 | TPT.getRestorationPoint(); | |||
4321 | AddressingModeMatcher Matcher( | |||
4322 | MatchedAddrModeInsts, TLI, TRI, AddressAccessTy, AS, MemoryInst, Result, | |||
4323 | InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP); | |||
4324 | Matcher.IgnoreProfitability = true; | |||
4325 | bool Success = Matcher.matchAddr(Address, 0); | |||
4326 | (void)Success; assert(Success && "Couldn't select *anything*?")((Success && "Couldn't select *anything*?") ? static_cast <void> (0) : __assert_fail ("Success && \"Couldn't select *anything*?\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 4326, __PRETTY_FUNCTION__)); | |||
4327 | ||||
4328 | // The match was to check the profitability, the changes made are not | |||
4329 | // part of the original matcher. Therefore, they should be dropped | |||
4330 | // otherwise the original matcher will not present the right state. | |||
4331 | TPT.rollback(LastKnownGood); | |||
4332 | ||||
4333 | // If the match didn't cover I, then it won't be shared by it. | |||
4334 | if (!is_contained(MatchedAddrModeInsts, I)) | |||
4335 | return false; | |||
4336 | ||||
4337 | MatchedAddrModeInsts.clear(); | |||
4338 | } | |||
4339 | ||||
4340 | return true; | |||
4341 | } | |||
4342 | ||||
4343 | /// Return true if the specified values are defined in a | |||
4344 | /// different basic block than BB. | |||
4345 | static bool IsNonLocalValue(Value *V, BasicBlock *BB) { | |||
4346 | if (Instruction *I = dyn_cast<Instruction>(V)) | |||
4347 | return I->getParent() != BB; | |||
4348 | return false; | |||
4349 | } | |||
4350 | ||||
4351 | /// Sink addressing mode computation immediate before MemoryInst if doing so | |||
4352 | /// can be done without increasing register pressure. The need for the | |||
4353 | /// register pressure constraint means this can end up being an all or nothing | |||
4354 | /// decision for all uses of the same addressing computation. | |||
4355 | /// | |||
4356 | /// Load and Store Instructions often have addressing modes that can do | |||
4357 | /// significant amounts of computation. As such, instruction selection will try | |||
4358 | /// to get the load or store to do as much computation as possible for the | |||
4359 | /// program. The problem is that isel can only see within a single block. As | |||
4360 | /// such, we sink as much legal addressing mode work into the block as possible. | |||
4361 | /// | |||
4362 | /// This method is used to optimize both load/store and inline asms with memory | |||
4363 | /// operands. It's also used to sink addressing computations feeding into cold | |||
4364 | /// call sites into their (cold) basic block. | |||
4365 | /// | |||
4366 | /// The motivation for handling sinking into cold blocks is that doing so can | |||
4367 | /// both enable other address mode sinking (by satisfying the register pressure | |||
4368 | /// constraint above), and reduce register pressure globally (by removing the | |||
4369 | /// addressing mode computation from the fast path entirely.). | |||
4370 | bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, | |||
4371 | Type *AccessTy, unsigned AddrSpace) { | |||
4372 | Value *Repl = Addr; | |||
4373 | ||||
4374 | // Try to collapse single-value PHI nodes. This is necessary to undo | |||
4375 | // unprofitable PRE transformations. | |||
4376 | SmallVector<Value*, 8> worklist; | |||
4377 | SmallPtrSet<Value*, 16> Visited; | |||
4378 | worklist.push_back(Addr); | |||
4379 | ||||
4380 | // Use a worklist to iteratively look through PHI and select nodes, and | |||
4381 | // ensure that the addressing mode obtained from the non-PHI/select roots of | |||
4382 | // the graph are compatible. | |||
4383 | bool PhiOrSelectSeen = false; | |||
4384 | SmallVector<Instruction*, 16> AddrModeInsts; | |||
4385 | const SimplifyQuery SQ(*DL, TLInfo); | |||
4386 | AddressingModeCombiner AddrModes(SQ, { Addr, MemoryInst->getParent() }); | |||
4387 | TypePromotionTransaction TPT(RemovedInsts); | |||
4388 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
4389 | TPT.getRestorationPoint(); | |||
4390 | while (!worklist.empty()) { | |||
| ||||
4391 | Value *V = worklist.back(); | |||
4392 | worklist.pop_back(); | |||
4393 | ||||
4394 | // We allow traversing cyclic Phi nodes. | |||
4395 | // In case of success after this loop we ensure that traversing through | |||
4396 | // Phi nodes ends up with all cases to compute address of the form | |||
4397 | // BaseGV + Base + Scale * Index + Offset | |||
4398 | // where Scale and Offset are constans and BaseGV, Base and Index | |||
4399 | // are exactly the same Values in all cases. | |||
4400 | // It means that BaseGV, Scale and Offset dominate our memory instruction | |||
4401 | // and have the same value as they had in address computation represented | |||
4402 | // as Phi. So we can safely sink address computation to memory instruction. | |||
4403 | if (!Visited.insert(V).second) | |||
4404 | continue; | |||
4405 | ||||
4406 | // For a PHI node, push all of its incoming values. | |||
4407 | if (PHINode *P = dyn_cast<PHINode>(V)) { | |||
4408 | for (Value *IncValue : P->incoming_values()) | |||
4409 | worklist.push_back(IncValue); | |||
4410 | PhiOrSelectSeen = true; | |||
4411 | continue; | |||
4412 | } | |||
4413 | // Similar for select. | |||
4414 | if (SelectInst *SI = dyn_cast<SelectInst>(V)) { | |||
4415 | worklist.push_back(SI->getFalseValue()); | |||
4416 | worklist.push_back(SI->getTrueValue()); | |||
4417 | PhiOrSelectSeen = true; | |||
4418 | continue; | |||
4419 | } | |||
4420 | ||||
4421 | // For non-PHIs, determine the addressing mode being computed. Note that | |||
4422 | // the result may differ depending on what other uses our candidate | |||
4423 | // addressing instructions might have. | |||
4424 | AddrModeInsts.clear(); | |||
4425 | std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr, | |||
4426 | 0); | |||
4427 | ExtAddrMode NewAddrMode = AddressingModeMatcher::Match( | |||
4428 | V, AccessTy, AddrSpace, MemoryInst, AddrModeInsts, *TLI, *TRI, | |||
4429 | InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP); | |||
4430 | ||||
4431 | GetElementPtrInst *GEP = LargeOffsetGEP.first; | |||
4432 | if (GEP && GEP->getParent() != MemoryInst->getParent() && | |||
4433 | !NewGEPBases.count(GEP)) { | |||
4434 | // If splitting the underlying data structure can reduce the offset of a | |||
4435 | // GEP, collect the GEP. Skip the GEPs that are the new bases of | |||
4436 | // previously split data structures. | |||
4437 | LargeOffsetGEPMap[GEP->getPointerOperand()].push_back(LargeOffsetGEP); | |||
4438 | if (LargeOffsetGEPID.find(GEP) == LargeOffsetGEPID.end()) | |||
4439 | LargeOffsetGEPID[GEP] = LargeOffsetGEPID.size(); | |||
4440 | } | |||
4441 | ||||
4442 | NewAddrMode.OriginalValue = V; | |||
4443 | if (!AddrModes.addNewAddrMode(NewAddrMode)) | |||
4444 | break; | |||
4445 | } | |||
4446 | ||||
4447 | // Try to combine the AddrModes we've collected. If we couldn't collect any, | |||
4448 | // or we have multiple but either couldn't combine them or combining them | |||
4449 | // wouldn't do anything useful, bail out now. | |||
4450 | if (!AddrModes.combineAddrModes()) { | |||
4451 | TPT.rollback(LastKnownGood); | |||
4452 | return false; | |||
4453 | } | |||
4454 | TPT.commit(); | |||
4455 | ||||
4456 | // Get the combined AddrMode (or the only AddrMode, if we only had one). | |||
4457 | ExtAddrMode AddrMode = AddrModes.getAddrMode(); | |||
4458 | ||||
4459 | // If all the instructions matched are already in this BB, don't do anything. | |||
4460 | // If we saw a Phi node then it is not local definitely, and if we saw a select | |||
4461 | // then we want to push the address calculation past it even if it's already | |||
4462 | // in this BB. | |||
4463 | if (!PhiOrSelectSeen && none_of(AddrModeInsts, [&](Value *V) { | |||
4464 | return IsNonLocalValue(V, MemoryInst->getParent()); | |||
4465 | })) { | |||
4466 | LLVM_DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrModedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n"; } } while (false) | |||
4467 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n"; } } while (false); | |||
4468 | return false; | |||
4469 | } | |||
4470 | ||||
4471 | // Insert this computation right after this user. Since our caller is | |||
4472 | // scanning from the top of the BB to the bottom, reuse of the expr are | |||
4473 | // guaranteed to happen later. | |||
4474 | IRBuilder<> Builder(MemoryInst); | |||
4475 | ||||
4476 | // Now that we determined the addressing expression we want to use and know | |||
4477 | // that we have to sink it into this block. Check to see if we have already | |||
4478 | // done this for some other load/store instr in this block. If so, reuse | |||
4479 | // the computation. Before attempting reuse, check if the address is valid | |||
4480 | // as it may have been erased. | |||
4481 | ||||
4482 | WeakTrackingVH SunkAddrVH = SunkAddrs[Addr]; | |||
4483 | ||||
4484 | Value * SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr; | |||
4485 | if (SunkAddr) { | |||
4486 | LLVM_DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrModedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (false) | |||
4487 | << " for " << *MemoryInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (false); | |||
4488 | if (SunkAddr->getType() != Addr->getType()) | |||
4489 | SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); | |||
4490 | } else if (AddrSinkUsingGEPs || | |||
4491 | (!AddrSinkUsingGEPs.getNumOccurrences() && TM && TTI->useAA())) { | |||
4492 | // By default, we use the GEP-based method when AA is used later. This | |||
4493 | // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities. | |||
4494 | LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrModedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (false) | |||
4495 | << " for " << *MemoryInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (false); | |||
4496 | Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); | |||
4497 | Value *ResultPtr = nullptr, *ResultIndex = nullptr; | |||
4498 | ||||
4499 | // First, find the pointer. | |||
4500 | if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) { | |||
4501 | ResultPtr = AddrMode.BaseReg; | |||
4502 | AddrMode.BaseReg = nullptr; | |||
4503 | } | |||
4504 | ||||
4505 | if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) { | |||
4506 | // We can't add more than one pointer together, nor can we scale a | |||
4507 | // pointer (both of which seem meaningless). | |||
4508 | if (ResultPtr || AddrMode.Scale != 1) | |||
4509 | return false; | |||
4510 | ||||
4511 | ResultPtr = AddrMode.ScaledReg; | |||
4512 | AddrMode.Scale = 0; | |||
4513 | } | |||
4514 | ||||
4515 | // It is only safe to sign extend the BaseReg if we know that the math | |||
4516 | // required to create it did not overflow before we extend it. Since | |||
4517 | // the original IR value was tossed in favor of a constant back when | |||
4518 | // the AddrMode was created we need to bail out gracefully if widths | |||
4519 | // do not match instead of extending it. | |||
4520 | // | |||
4521 | // (See below for code to add the scale.) | |||
4522 | if (AddrMode.Scale) { | |||
4523 | Type *ScaledRegTy = AddrMode.ScaledReg->getType(); | |||
4524 | if (cast<IntegerType>(IntPtrTy)->getBitWidth() > | |||
4525 | cast<IntegerType>(ScaledRegTy)->getBitWidth()) | |||
4526 | return false; | |||
4527 | } | |||
4528 | ||||
4529 | if (AddrMode.BaseGV) { | |||
4530 | if (ResultPtr) | |||
4531 | return false; | |||
4532 | ||||
4533 | ResultPtr = AddrMode.BaseGV; | |||
4534 | } | |||
4535 | ||||
4536 | // If the real base value actually came from an inttoptr, then the matcher | |||
4537 | // will look through it and provide only the integer value. In that case, | |||
4538 | // use it here. | |||
4539 | if (!DL->isNonIntegralPointerType(Addr->getType())) { | |||
4540 | if (!ResultPtr && AddrMode.BaseReg) { | |||
4541 | ResultPtr = Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), | |||
4542 | "sunkaddr"); | |||
4543 | AddrMode.BaseReg = nullptr; | |||
4544 | } else if (!ResultPtr && AddrMode.Scale == 1) { | |||
4545 | ResultPtr = Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), | |||
4546 | "sunkaddr"); | |||
4547 | AddrMode.Scale = 0; | |||
4548 | } | |||
4549 | } | |||
4550 | ||||
4551 | if (!ResultPtr && | |||
4552 | !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) { | |||
4553 | SunkAddr = Constant::getNullValue(Addr->getType()); | |||
4554 | } else if (!ResultPtr) { | |||
4555 | return false; | |||
4556 | } else { | |||
4557 | Type *I8PtrTy = | |||
4558 | Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace()); | |||
4559 | Type *I8Ty = Builder.getInt8Ty(); | |||
4560 | ||||
4561 | // Start with the base register. Do this first so that subsequent address | |||
4562 | // matching finds it last, which will prevent it from trying to match it | |||
4563 | // as the scaled value in case it happens to be a mul. That would be | |||
4564 | // problematic if we've sunk a different mul for the scale, because then | |||
4565 | // we'd end up sinking both muls. | |||
4566 | if (AddrMode.BaseReg) { | |||
4567 | Value *V = AddrMode.BaseReg; | |||
4568 | if (V->getType() != IntPtrTy) | |||
4569 | V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); | |||
4570 | ||||
4571 | ResultIndex = V; | |||
4572 | } | |||
4573 | ||||
4574 | // Add the scale value. | |||
4575 | if (AddrMode.Scale) { | |||
4576 | Value *V = AddrMode.ScaledReg; | |||
4577 | if (V->getType() == IntPtrTy) { | |||
4578 | // done. | |||
4579 | } else { | |||
4580 | assert(cast<IntegerType>(IntPtrTy)->getBitWidth() <((cast<IntegerType>(IntPtrTy)->getBitWidth() < cast <IntegerType>(V->getType())->getBitWidth() && "We can't transform if ScaledReg is too narrow") ? static_cast <void> (0) : __assert_fail ("cast<IntegerType>(IntPtrTy)->getBitWidth() < cast<IntegerType>(V->getType())->getBitWidth() && \"We can't transform if ScaledReg is too narrow\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 4582, __PRETTY_FUNCTION__)) | |||
4581 | cast<IntegerType>(V->getType())->getBitWidth() &&((cast<IntegerType>(IntPtrTy)->getBitWidth() < cast <IntegerType>(V->getType())->getBitWidth() && "We can't transform if ScaledReg is too narrow") ? static_cast <void> (0) : __assert_fail ("cast<IntegerType>(IntPtrTy)->getBitWidth() < cast<IntegerType>(V->getType())->getBitWidth() && \"We can't transform if ScaledReg is too narrow\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 4582, __PRETTY_FUNCTION__)) | |||
4582 | "We can't transform if ScaledReg is too narrow")((cast<IntegerType>(IntPtrTy)->getBitWidth() < cast <IntegerType>(V->getType())->getBitWidth() && "We can't transform if ScaledReg is too narrow") ? static_cast <void> (0) : __assert_fail ("cast<IntegerType>(IntPtrTy)->getBitWidth() < cast<IntegerType>(V->getType())->getBitWidth() && \"We can't transform if ScaledReg is too narrow\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 4582, __PRETTY_FUNCTION__)); | |||
4583 | V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); | |||
4584 | } | |||
4585 | ||||
4586 | if (AddrMode.Scale != 1) | |||
4587 | V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), | |||
4588 | "sunkaddr"); | |||
4589 | if (ResultIndex) | |||
4590 | ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr"); | |||
4591 | else | |||
4592 | ResultIndex = V; | |||
4593 | } | |||
4594 | ||||
4595 | // Add in the Base Offset if present. | |||
4596 | if (AddrMode.BaseOffs) { | |||
4597 | Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); | |||
4598 | if (ResultIndex) { | |||
4599 | // We need to add this separately from the scale above to help with | |||
4600 | // SDAG consecutive load/store merging. | |||
4601 | if (ResultPtr->getType() != I8PtrTy) | |||
4602 | ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); | |||
4603 | ResultPtr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); | |||
4604 | } | |||
4605 | ||||
4606 | ResultIndex = V; | |||
4607 | } | |||
4608 | ||||
4609 | if (!ResultIndex) { | |||
4610 | SunkAddr = ResultPtr; | |||
4611 | } else { | |||
4612 | if (ResultPtr->getType() != I8PtrTy) | |||
4613 | ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); | |||
4614 | SunkAddr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); | |||
4615 | } | |||
4616 | ||||
4617 | if (SunkAddr->getType() != Addr->getType()) | |||
4618 | SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); | |||
4619 | } | |||
4620 | } else { | |||
4621 | // We'd require a ptrtoint/inttoptr down the line, which we can't do for | |||
4622 | // non-integral pointers, so in that case bail out now. | |||
4623 | Type *BaseTy = AddrMode.BaseReg ? AddrMode.BaseReg->getType() : nullptr; | |||
4624 | Type *ScaleTy = AddrMode.Scale ? AddrMode.ScaledReg->getType() : nullptr; | |||
4625 | PointerType *BasePtrTy = dyn_cast_or_null<PointerType>(BaseTy); | |||
4626 | PointerType *ScalePtrTy = dyn_cast_or_null<PointerType>(ScaleTy); | |||
4627 | if (DL->isNonIntegralPointerType(Addr->getType()) || | |||
4628 | (BasePtrTy && DL->isNonIntegralPointerType(BasePtrTy)) || | |||
4629 | (ScalePtrTy && DL->isNonIntegralPointerType(ScalePtrTy)) || | |||
4630 | (AddrMode.BaseGV && | |||
4631 | DL->isNonIntegralPointerType(AddrMode.BaseGV->getType()))) | |||
4632 | return false; | |||
4633 | ||||
4634 | LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrModedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (false) | |||
4635 | << " for " << *MemoryInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (false); | |||
4636 | Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); | |||
4637 | Value *Result = nullptr; | |||
4638 | ||||
4639 | // Start with the base register. Do this first so that subsequent address | |||
4640 | // matching finds it last, which will prevent it from trying to match it | |||
4641 | // as the scaled value in case it happens to be a mul. That would be | |||
4642 | // problematic if we've sunk a different mul for the scale, because then | |||
4643 | // we'd end up sinking both muls. | |||
4644 | if (AddrMode.BaseReg) { | |||
4645 | Value *V = AddrMode.BaseReg; | |||
4646 | if (V->getType()->isPointerTy()) | |||
4647 | V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); | |||
4648 | if (V->getType() != IntPtrTy) | |||
4649 | V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); | |||
4650 | Result = V; | |||
4651 | } | |||
4652 | ||||
4653 | // Add the scale value. | |||
4654 | if (AddrMode.Scale) { | |||
4655 | Value *V = AddrMode.ScaledReg; | |||
4656 | if (V->getType() == IntPtrTy) { | |||
4657 | // done. | |||
4658 | } else if (V->getType()->isPointerTy()) { | |||
4659 | V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); | |||
4660 | } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < | |||
4661 | cast<IntegerType>(V->getType())->getBitWidth()) { | |||
4662 | V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); | |||
4663 | } else { | |||
4664 | // It is only safe to sign extend the BaseReg if we know that the math | |||
4665 | // required to create it did not overflow before we extend it. Since | |||
4666 | // the original IR value was tossed in favor of a constant back when | |||
4667 | // the AddrMode was created we need to bail out gracefully if widths | |||
4668 | // do not match instead of extending it. | |||
4669 | Instruction *I = dyn_cast_or_null<Instruction>(Result); | |||
4670 | if (I && (Result != AddrMode.BaseReg)) | |||
4671 | I->eraseFromParent(); | |||
4672 | return false; | |||
4673 | } | |||
4674 | if (AddrMode.Scale != 1) | |||
4675 | V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), | |||
4676 | "sunkaddr"); | |||
4677 | if (Result) | |||
4678 | Result = Builder.CreateAdd(Result, V, "sunkaddr"); | |||
4679 | else | |||
4680 | Result = V; | |||
4681 | } | |||
4682 | ||||
4683 | // Add in the BaseGV if present. | |||
4684 | if (AddrMode.BaseGV) { | |||
4685 | Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr"); | |||
4686 | if (Result) | |||
4687 | Result = Builder.CreateAdd(Result, V, "sunkaddr"); | |||
4688 | else | |||
4689 | Result = V; | |||
4690 | } | |||
4691 | ||||
4692 | // Add in the Base Offset if present. | |||
4693 | if (AddrMode.BaseOffs) { | |||
4694 | Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); | |||
4695 | if (Result) | |||
4696 | Result = Builder.CreateAdd(Result, V, "sunkaddr"); | |||
4697 | else | |||
4698 | Result = V; | |||
4699 | } | |||
4700 | ||||
4701 | if (!Result) | |||
4702 | SunkAddr = Constant::getNullValue(Addr->getType()); | |||
4703 | else | |||
4704 | SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr"); | |||
4705 | } | |||
4706 | ||||
4707 | MemoryInst->replaceUsesOfWith(Repl, SunkAddr); | |||
4708 | // Store the newly computed address into the cache. In the case we reused a | |||
4709 | // value, this should be idempotent. | |||
4710 | SunkAddrs[Addr] = WeakTrackingVH(SunkAddr); | |||
4711 | ||||
4712 | // If we have no uses, recursively delete the value and all dead instructions | |||
4713 | // using it. | |||
4714 | if (Repl->use_empty()) { | |||
4715 | // This can cause recursive deletion, which can invalidate our iterator. | |||
4716 | // Use a WeakTrackingVH to hold onto it in case this happens. | |||
4717 | Value *CurValue = &*CurInstIterator; | |||
4718 | WeakTrackingVH IterHandle(CurValue); | |||
4719 | BasicBlock *BB = CurInstIterator->getParent(); | |||
4720 | ||||
4721 | RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo); | |||
4722 | ||||
4723 | if (IterHandle != CurValue) { | |||
4724 | // If the iterator instruction was recursively deleted, start over at the | |||
4725 | // start of the block. | |||
4726 | CurInstIterator = BB->begin(); | |||
4727 | SunkAddrs.clear(); | |||
4728 | } | |||
4729 | } | |||
4730 | ++NumMemoryInsts; | |||
4731 | return true; | |||
4732 | } | |||
4733 | ||||
4734 | /// If there are any memory operands, use OptimizeMemoryInst to sink their | |||
4735 | /// address computing into the block when possible / profitable. | |||
4736 | bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) { | |||
4737 | bool MadeChange = false; | |||
4738 | ||||
4739 | const TargetRegisterInfo *TRI = | |||
4740 | TM->getSubtargetImpl(*CS->getFunction())->getRegisterInfo(); | |||
4741 | TargetLowering::AsmOperandInfoVector TargetConstraints = | |||
4742 | TLI->ParseConstraints(*DL, TRI, CS); | |||
4743 | unsigned ArgNo = 0; | |||
4744 | for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { | |||
4745 | TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; | |||
4746 | ||||
4747 | // Compute the constraint code and ConstraintType to use. | |||
4748 | TLI->ComputeConstraintToUse(OpInfo, SDValue()); | |||
4749 | ||||
4750 | if (OpInfo.ConstraintType == TargetLowering::C_Memory && | |||
4751 | OpInfo.isIndirect) { | |||
4752 | Value *OpVal = CS->getArgOperand(ArgNo++); | |||
4753 | MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u); | |||
4754 | } else if (OpInfo.Type == InlineAsm::isInput) | |||
4755 | ArgNo++; | |||
4756 | } | |||
4757 | ||||
4758 | return MadeChange; | |||
4759 | } | |||
4760 | ||||
4761 | /// Check if all the uses of \p Val are equivalent (or free) zero or | |||
4762 | /// sign extensions. | |||
4763 | static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) { | |||
4764 | assert(!Val->use_empty() && "Input must have at least one use")((!Val->use_empty() && "Input must have at least one use" ) ? static_cast<void> (0) : __assert_fail ("!Val->use_empty() && \"Input must have at least one use\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 4764, __PRETTY_FUNCTION__)); | |||
4765 | const Instruction *FirstUser = cast<Instruction>(*Val->user_begin()); | |||
4766 | bool IsSExt = isa<SExtInst>(FirstUser); | |||
4767 | Type *ExtTy = FirstUser->getType(); | |||
4768 | for (const User *U : Val->users()) { | |||
4769 | const Instruction *UI = cast<Instruction>(U); | |||
4770 | if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI))) | |||
4771 | return false; | |||
4772 | Type *CurTy = UI->getType(); | |||
4773 | // Same input and output types: Same instruction after CSE. | |||
4774 | if (CurTy == ExtTy) | |||
4775 | continue; | |||
4776 | ||||
4777 | // If IsSExt is true, we are in this situation: | |||
4778 | // a = Val | |||
4779 | // b = sext ty1 a to ty2 | |||
4780 | // c = sext ty1 a to ty3 | |||
4781 | // Assuming ty2 is shorter than ty3, this could be turned into: | |||
4782 | // a = Val | |||
4783 | // b = sext ty1 a to ty2 | |||
4784 | // c = sext ty2 b to ty3 | |||
4785 | // However, the last sext is not free. | |||
4786 | if (IsSExt) | |||
4787 | return false; | |||
4788 | ||||
4789 | // This is a ZExt, maybe this is free to extend from one type to another. | |||
4790 | // In that case, we would not account for a different use. | |||
4791 | Type *NarrowTy; | |||
4792 | Type *LargeTy; | |||
4793 | if (ExtTy->getScalarType()->getIntegerBitWidth() > | |||
4794 | CurTy->getScalarType()->getIntegerBitWidth()) { | |||
4795 | NarrowTy = CurTy; | |||
4796 | LargeTy = ExtTy; | |||
4797 | } else { | |||
4798 | NarrowTy = ExtTy; | |||
4799 | LargeTy = CurTy; | |||
4800 | } | |||
4801 | ||||
4802 | if (!TLI.isZExtFree(NarrowTy, LargeTy)) | |||
4803 | return false; | |||
4804 | } | |||
4805 | // All uses are the same or can be derived from one another for free. | |||
4806 | return true; | |||
4807 | } | |||
4808 | ||||
4809 | /// Try to speculatively promote extensions in \p Exts and continue | |||
4810 | /// promoting through newly promoted operands recursively as far as doing so is | |||
4811 | /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts. | |||
4812 | /// When some promotion happened, \p TPT contains the proper state to revert | |||
4813 | /// them. | |||
4814 | /// | |||
4815 | /// \return true if some promotion happened, false otherwise. | |||
4816 | bool CodeGenPrepare::tryToPromoteExts( | |||
4817 | TypePromotionTransaction &TPT, const SmallVectorImpl<Instruction *> &Exts, | |||
4818 | SmallVectorImpl<Instruction *> &ProfitablyMovedExts, | |||
4819 | unsigned CreatedInstsCost) { | |||
4820 | bool Promoted = false; | |||
4821 | ||||
4822 | // Iterate over all the extensions to try to promote them. | |||
4823 | for (auto I : Exts) { | |||
4824 | // Early check if we directly have ext(load). | |||
4825 | if (isa<LoadInst>(I->getOperand(0))) { | |||
4826 | ProfitablyMovedExts.push_back(I); | |||
4827 | continue; | |||
4828 | } | |||
4829 | ||||
4830 | // Check whether or not we want to do any promotion. The reason we have | |||
4831 | // this check inside the for loop is to catch the case where an extension | |||
4832 | // is directly fed by a load because in such case the extension can be moved | |||
4833 | // up without any promotion on its operands. | |||
4834 | if (!TLI || !TLI->enableExtLdPromotion() || DisableExtLdPromotion) | |||
4835 | return false; | |||
4836 | ||||
4837 | // Get the action to perform the promotion. | |||
4838 | TypePromotionHelper::Action TPH = | |||
4839 | TypePromotionHelper::getAction(I, InsertedInsts, *TLI, PromotedInsts); | |||
4840 | // Check if we can promote. | |||
4841 | if (!TPH) { | |||
4842 | // Save the current extension as we cannot move up through its operand. | |||
4843 | ProfitablyMovedExts.push_back(I); | |||
4844 | continue; | |||
4845 | } | |||
4846 | ||||
4847 | // Save the current state. | |||
4848 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
4849 | TPT.getRestorationPoint(); | |||
4850 | SmallVector<Instruction *, 4> NewExts; | |||
4851 | unsigned NewCreatedInstsCost = 0; | |||
4852 | unsigned ExtCost = !TLI->isExtFree(I); | |||
4853 | // Promote. | |||
4854 | Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost, | |||
4855 | &NewExts, nullptr, *TLI); | |||
4856 | assert(PromotedVal &&((PromotedVal && "TypePromotionHelper should have filtered out those cases" ) ? static_cast<void> (0) : __assert_fail ("PromotedVal && \"TypePromotionHelper should have filtered out those cases\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 4857, __PRETTY_FUNCTION__)) | |||
4857 | "TypePromotionHelper should have filtered out those cases")((PromotedVal && "TypePromotionHelper should have filtered out those cases" ) ? static_cast<void> (0) : __assert_fail ("PromotedVal && \"TypePromotionHelper should have filtered out those cases\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 4857, __PRETTY_FUNCTION__)); | |||
4858 | ||||
4859 | // We would be able to merge only one extension in a load. | |||
4860 | // Therefore, if we have more than 1 new extension we heuristically | |||
4861 | // cut this search path, because it means we degrade the code quality. | |||
4862 | // With exactly 2, the transformation is neutral, because we will merge | |||
4863 | // one extension but leave one. However, we optimistically keep going, | |||
4864 | // because the new extension may be removed too. | |||
4865 | long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost; | |||
4866 | // FIXME: It would be possible to propagate a negative value instead of | |||
4867 | // conservatively ceiling it to 0. | |||
4868 | TotalCreatedInstsCost = | |||
4869 | std::max((long long)0, (TotalCreatedInstsCost - ExtCost)); | |||
4870 | if (!StressExtLdPromotion && | |||
4871 | (TotalCreatedInstsCost > 1 || | |||
4872 | !isPromotedInstructionLegal(*TLI, *DL, PromotedVal))) { | |||
4873 | // This promotion is not profitable, rollback to the previous state, and | |||
4874 | // save the current extension in ProfitablyMovedExts as the latest | |||
4875 | // speculative promotion turned out to be unprofitable. | |||
4876 | TPT.rollback(LastKnownGood); | |||
4877 | ProfitablyMovedExts.push_back(I); | |||
4878 | continue; | |||
4879 | } | |||
4880 | // Continue promoting NewExts as far as doing so is profitable. | |||
4881 | SmallVector<Instruction *, 2> NewlyMovedExts; | |||
4882 | (void)tryToPromoteExts(TPT, NewExts, NewlyMovedExts, TotalCreatedInstsCost); | |||
4883 | bool NewPromoted = false; | |||
4884 | for (auto ExtInst : NewlyMovedExts) { | |||
4885 | Instruction *MovedExt = cast<Instruction>(ExtInst); | |||
4886 | Value *ExtOperand = MovedExt->getOperand(0); | |||
4887 | // If we have reached to a load, we need this extra profitability check | |||
4888 | // as it could potentially be merged into an ext(load). | |||
4889 | if (isa<LoadInst>(ExtOperand) && | |||
4890 | !(StressExtLdPromotion || NewCreatedInstsCost <= ExtCost || | |||
4891 | (ExtOperand->hasOneUse() || hasSameExtUse(ExtOperand, *TLI)))) | |||
4892 | continue; | |||
4893 | ||||
4894 | ProfitablyMovedExts.push_back(MovedExt); | |||
4895 | NewPromoted = true; | |||
4896 | } | |||
4897 | ||||
4898 | // If none of speculative promotions for NewExts is profitable, rollback | |||
4899 | // and save the current extension (I) as the last profitable extension. | |||
4900 | if (!NewPromoted) { | |||
4901 | TPT.rollback(LastKnownGood); | |||
4902 | ProfitablyMovedExts.push_back(I); | |||
4903 | continue; | |||
4904 | } | |||
4905 | // The promotion is profitable. | |||
4906 | Promoted = true; | |||
4907 | } | |||
4908 | return Promoted; | |||
4909 | } | |||
4910 | ||||
4911 | /// Merging redundant sexts when one is dominating the other. | |||
4912 | bool CodeGenPrepare::mergeSExts(Function &F) { | |||
4913 | DominatorTree DT(F); | |||
4914 | bool Changed = false; | |||
4915 | for (auto &Entry : ValToSExtendedUses) { | |||
4916 | SExts &Insts = Entry.second; | |||
4917 | SExts CurPts; | |||
4918 | for (Instruction *Inst : Insts) { | |||
4919 | if (RemovedInsts.count(Inst) || !isa<SExtInst>(Inst) || | |||
4920 | Inst->getOperand(0) != Entry.first) | |||
4921 | continue; | |||
4922 | bool inserted = false; | |||
4923 | for (auto &Pt : CurPts) { | |||
4924 | if (DT.dominates(Inst, Pt)) { | |||
4925 | Pt->replaceAllUsesWith(Inst); | |||
4926 | RemovedInsts.insert(Pt); | |||
4927 | Pt->removeFromParent(); | |||
4928 | Pt = Inst; | |||
4929 | inserted = true; | |||
4930 | Changed = true; | |||
4931 | break; | |||
4932 | } | |||
4933 | if (!DT.dominates(Pt, Inst)) | |||
4934 | // Give up if we need to merge in a common dominator as the | |||
4935 | // experiments show it is not profitable. | |||
4936 | continue; | |||
4937 | Inst->replaceAllUsesWith(Pt); | |||
4938 | RemovedInsts.insert(Inst); | |||
4939 | Inst->removeFromParent(); | |||
4940 | inserted = true; | |||
4941 | Changed = true; | |||
4942 | break; | |||
4943 | } | |||
4944 | if (!inserted) | |||
4945 | CurPts.push_back(Inst); | |||
4946 | } | |||
4947 | } | |||
4948 | return Changed; | |||
4949 | } | |||
4950 | ||||
4951 | // Spliting large data structures so that the GEPs accessing them can have | |||
4952 | // smaller offsets so that they can be sunk to the same blocks as their users. | |||
4953 | // For example, a large struct starting from %base is splitted into two parts | |||
4954 | // where the second part starts from %new_base. | |||
4955 | // | |||
4956 | // Before: | |||
4957 | // BB0: | |||
4958 | // %base = | |||
4959 | // | |||
4960 | // BB1: | |||
4961 | // %gep0 = gep %base, off0 | |||
4962 | // %gep1 = gep %base, off1 | |||
4963 | // %gep2 = gep %base, off2 | |||
4964 | // | |||
4965 | // BB2: | |||
4966 | // %load1 = load %gep0 | |||
4967 | // %load2 = load %gep1 | |||
4968 | // %load3 = load %gep2 | |||
4969 | // | |||
4970 | // After: | |||
4971 | // BB0: | |||
4972 | // %base = | |||
4973 | // %new_base = gep %base, off0 | |||
4974 | // | |||
4975 | // BB1: | |||
4976 | // %new_gep0 = %new_base | |||
4977 | // %new_gep1 = gep %new_base, off1 - off0 | |||
4978 | // %new_gep2 = gep %new_base, off2 - off0 | |||
4979 | // | |||
4980 | // BB2: | |||
4981 | // %load1 = load i32, i32* %new_gep0 | |||
4982 | // %load2 = load i32, i32* %new_gep1 | |||
4983 | // %load3 = load i32, i32* %new_gep2 | |||
4984 | // | |||
4985 | // %new_gep1 and %new_gep2 can be sunk to BB2 now after the splitting because | |||
4986 | // their offsets are smaller enough to fit into the addressing mode. | |||
4987 | bool CodeGenPrepare::splitLargeGEPOffsets() { | |||
4988 | bool Changed = false; | |||
4989 | for (auto &Entry : LargeOffsetGEPMap) { | |||
4990 | Value *OldBase = Entry.first; | |||
4991 | SmallVectorImpl<std::pair<AssertingVH<GetElementPtrInst>, int64_t>> | |||
4992 | &LargeOffsetGEPs = Entry.second; | |||
4993 | auto compareGEPOffset = | |||
4994 | [&](const std::pair<GetElementPtrInst *, int64_t> &LHS, | |||
4995 | const std::pair<GetElementPtrInst *, int64_t> &RHS) { | |||
4996 | if (LHS.first == RHS.first) | |||
4997 | return false; | |||
4998 | if (LHS.second != RHS.second) | |||
4999 | return LHS.second < RHS.second; | |||
5000 | return LargeOffsetGEPID[LHS.first] < LargeOffsetGEPID[RHS.first]; | |||
5001 | }; | |||
5002 | // Sorting all the GEPs of the same data structures based on the offsets. | |||
5003 | llvm::sort(LargeOffsetGEPs, compareGEPOffset); | |||
5004 | LargeOffsetGEPs.erase( | |||
5005 | std::unique(LargeOffsetGEPs.begin(), LargeOffsetGEPs.end()), | |||
5006 | LargeOffsetGEPs.end()); | |||
5007 | // Skip if all the GEPs have the same offsets. | |||
5008 | if (LargeOffsetGEPs.front().second == LargeOffsetGEPs.back().second) | |||
5009 | continue; | |||
5010 | GetElementPtrInst *BaseGEP = LargeOffsetGEPs.begin()->first; | |||
5011 | int64_t BaseOffset = LargeOffsetGEPs.begin()->second; | |||
5012 | Value *NewBaseGEP = nullptr; | |||
5013 | ||||
5014 | auto LargeOffsetGEP = LargeOffsetGEPs.begin(); | |||
5015 | while (LargeOffsetGEP != LargeOffsetGEPs.end()) { | |||
5016 | GetElementPtrInst *GEP = LargeOffsetGEP->first; | |||
5017 | int64_t Offset = LargeOffsetGEP->second; | |||
5018 | if (Offset != BaseOffset) { | |||
5019 | TargetLowering::AddrMode AddrMode; | |||
5020 | AddrMode.BaseOffs = Offset - BaseOffset; | |||
5021 | // The result type of the GEP might not be the type of the memory | |||
5022 | // access. | |||
5023 | if (!TLI->isLegalAddressingMode(*DL, AddrMode, | |||
5024 | GEP->getResultElementType(), | |||
5025 | GEP->getAddressSpace())) { | |||
5026 | // We need to create a new base if the offset to the current base is | |||
5027 | // too large to fit into the addressing mode. So, a very large struct | |||
5028 | // may be splitted into several parts. | |||
5029 | BaseGEP = GEP; | |||
5030 | BaseOffset = Offset; | |||
5031 | NewBaseGEP = nullptr; | |||
5032 | } | |||
5033 | } | |||
5034 | ||||
5035 | // Generate a new GEP to replace the current one. | |||
5036 | IRBuilder<> Builder(GEP); | |||
5037 | Type *IntPtrTy = DL->getIntPtrType(GEP->getType()); | |||
5038 | Type *I8PtrTy = | |||
5039 | Builder.getInt8PtrTy(GEP->getType()->getPointerAddressSpace()); | |||
5040 | Type *I8Ty = Builder.getInt8Ty(); | |||
5041 | ||||
5042 | if (!NewBaseGEP) { | |||
5043 | // Create a new base if we don't have one yet. Find the insertion | |||
5044 | // pointer for the new base first. | |||
5045 | BasicBlock::iterator NewBaseInsertPt; | |||
5046 | BasicBlock *NewBaseInsertBB; | |||
5047 | if (auto *BaseI = dyn_cast<Instruction>(OldBase)) { | |||
5048 | // If the base of the struct is an instruction, the new base will be | |||
5049 | // inserted close to it. | |||
5050 | NewBaseInsertBB = BaseI->getParent(); | |||
5051 | if (isa<PHINode>(BaseI)) | |||
5052 | NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); | |||
5053 | else if (InvokeInst *Invoke = dyn_cast<InvokeInst>(BaseI)) { | |||
5054 | NewBaseInsertBB = | |||
5055 | SplitEdge(NewBaseInsertBB, Invoke->getNormalDest()); | |||
5056 | NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); | |||
5057 | } else | |||
5058 | NewBaseInsertPt = std::next(BaseI->getIterator()); | |||
5059 | } else { | |||
5060 | // If the current base is an argument or global value, the new base | |||
5061 | // will be inserted to the entry block. | |||
5062 | NewBaseInsertBB = &BaseGEP->getFunction()->getEntryBlock(); | |||
5063 | NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt(); | |||
5064 | } | |||
5065 | IRBuilder<> NewBaseBuilder(NewBaseInsertBB, NewBaseInsertPt); | |||
5066 | // Create a new base. | |||
5067 | Value *BaseIndex = ConstantInt::get(IntPtrTy, BaseOffset); | |||
5068 | NewBaseGEP = OldBase; | |||
5069 | if (NewBaseGEP->getType() != I8PtrTy) | |||
5070 | NewBaseGEP = NewBaseBuilder.CreatePointerCast(NewBaseGEP, I8PtrTy); | |||
5071 | NewBaseGEP = | |||
5072 | NewBaseBuilder.CreateGEP(I8Ty, NewBaseGEP, BaseIndex, "splitgep"); | |||
5073 | NewGEPBases.insert(NewBaseGEP); | |||
5074 | } | |||
5075 | ||||
5076 | Value *NewGEP = NewBaseGEP; | |||
5077 | if (Offset == BaseOffset) { | |||
5078 | if (GEP->getType() != I8PtrTy) | |||
5079 | NewGEP = Builder.CreatePointerCast(NewGEP, GEP->getType()); | |||
5080 | } else { | |||
5081 | // Calculate the new offset for the new GEP. | |||
5082 | Value *Index = ConstantInt::get(IntPtrTy, Offset - BaseOffset); | |||
5083 | NewGEP = Builder.CreateGEP(I8Ty, NewBaseGEP, Index); | |||
5084 | ||||
5085 | if (GEP->getType() != I8PtrTy) | |||
5086 | NewGEP = Builder.CreatePointerCast(NewGEP, GEP->getType()); | |||
5087 | } | |||
5088 | GEP->replaceAllUsesWith(NewGEP); | |||
5089 | LargeOffsetGEPID.erase(GEP); | |||
5090 | LargeOffsetGEP = LargeOffsetGEPs.erase(LargeOffsetGEP); | |||
5091 | GEP->eraseFromParent(); | |||
5092 | Changed = true; | |||
5093 | } | |||
5094 | } | |||
5095 | return Changed; | |||
5096 | } | |||
5097 | ||||
5098 | /// Return true, if an ext(load) can be formed from an extension in | |||
5099 | /// \p MovedExts. | |||
5100 | bool CodeGenPrepare::canFormExtLd( | |||
5101 | const SmallVectorImpl<Instruction *> &MovedExts, LoadInst *&LI, | |||
5102 | Instruction *&Inst, bool HasPromoted) { | |||
5103 | for (auto *MovedExtInst : MovedExts) { | |||
5104 | if (isa<LoadInst>(MovedExtInst->getOperand(0))) { | |||
5105 | LI = cast<LoadInst>(MovedExtInst->getOperand(0)); | |||
5106 | Inst = MovedExtInst; | |||
5107 | break; | |||
5108 | } | |||
5109 | } | |||
5110 | if (!LI) | |||
5111 | return false; | |||
5112 | ||||
5113 | // If they're already in the same block, there's nothing to do. | |||
5114 | // Make the cheap checks first if we did not promote. | |||
5115 | // If we promoted, we need to check if it is indeed profitable. | |||
5116 | if (!HasPromoted && LI->getParent() == Inst->getParent()) | |||
5117 | return false; | |||
5118 | ||||
5119 | return TLI->isExtLoad(LI, Inst, *DL); | |||
5120 | } | |||
5121 | ||||
5122 | /// Move a zext or sext fed by a load into the same basic block as the load, | |||
5123 | /// unless conditions are unfavorable. This allows SelectionDAG to fold the | |||
5124 | /// extend into the load. | |||
5125 | /// | |||
5126 | /// E.g., | |||
5127 | /// \code | |||
5128 | /// %ld = load i32* %addr | |||
5129 | /// %add = add nuw i32 %ld, 4 | |||
5130 | /// %zext = zext i32 %add to i64 | |||
5131 | // \endcode | |||
5132 | /// => | |||
5133 | /// \code | |||
5134 | /// %ld = load i32* %addr | |||
5135 | /// %zext = zext i32 %ld to i64 | |||
5136 | /// %add = add nuw i64 %zext, 4 | |||
5137 | /// \encode | |||
5138 | /// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which | |||
5139 | /// allow us to match zext(load i32*) to i64. | |||
5140 | /// | |||
5141 | /// Also, try to promote the computations used to obtain a sign extended | |||
5142 | /// value used into memory accesses. | |||
5143 | /// E.g., | |||
5144 | /// \code | |||
5145 | /// a = add nsw i32 b, 3 | |||
5146 | /// d = sext i32 a to i64 | |||
5147 | /// e = getelementptr ..., i64 d | |||
5148 | /// \endcode | |||
5149 | /// => | |||
5150 | /// \code | |||
5151 | /// f = sext i32 b to i64 | |||
5152 | /// a = add nsw i64 f, 3 | |||
5153 | /// e = getelementptr ..., i64 a | |||
5154 | /// \endcode | |||
5155 | /// | |||
5156 | /// \p Inst[in/out] the extension may be modified during the process if some | |||
5157 | /// promotions apply. | |||
5158 | bool CodeGenPrepare::optimizeExt(Instruction *&Inst) { | |||
5159 | // ExtLoad formation and address type promotion infrastructure requires TLI to | |||
5160 | // be effective. | |||
5161 | if (!TLI) | |||
5162 | return false; | |||
5163 | ||||
5164 | bool AllowPromotionWithoutCommonHeader = false; | |||
5165 | /// See if it is an interesting sext operations for the address type | |||
5166 | /// promotion before trying to promote it, e.g., the ones with the right | |||
5167 | /// type and used in memory accesses. | |||
5168 | bool ATPConsiderable = TTI->shouldConsiderAddressTypePromotion( | |||
5169 | *Inst, AllowPromotionWithoutCommonHeader); | |||
5170 | TypePromotionTransaction TPT(RemovedInsts); | |||
5171 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
5172 | TPT.getRestorationPoint(); | |||
5173 | SmallVector<Instruction *, 1> Exts; | |||
5174 | SmallVector<Instruction *, 2> SpeculativelyMovedExts; | |||
5175 | Exts.push_back(Inst); | |||
5176 | ||||
5177 | bool HasPromoted = tryToPromoteExts(TPT, Exts, SpeculativelyMovedExts); | |||
5178 | ||||
5179 | // Look for a load being extended. | |||
5180 | LoadInst *LI = nullptr; | |||
5181 | Instruction *ExtFedByLoad; | |||
5182 | ||||
5183 | // Try to promote a chain of computation if it allows to form an extended | |||
5184 | // load. | |||
5185 | if (canFormExtLd(SpeculativelyMovedExts, LI, ExtFedByLoad, HasPromoted)) { | |||
5186 | assert(LI && ExtFedByLoad && "Expect a valid load and extension")((LI && ExtFedByLoad && "Expect a valid load and extension" ) ? static_cast<void> (0) : __assert_fail ("LI && ExtFedByLoad && \"Expect a valid load and extension\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 5186, __PRETTY_FUNCTION__)); | |||
5187 | TPT.commit(); | |||
5188 | // Move the extend into the same block as the load | |||
5189 | ExtFedByLoad->moveAfter(LI); | |||
5190 | // CGP does not check if the zext would be speculatively executed when moved | |||
5191 | // to the same basic block as the load. Preserving its original location | |||
5192 | // would pessimize the debugging experience, as well as negatively impact | |||
5193 | // the quality of sample pgo. We don't want to use "line 0" as that has a | |||
5194 | // size cost in the line-table section and logically the zext can be seen as | |||
5195 | // part of the load. Therefore we conservatively reuse the same debug | |||
5196 | // location for the load and the zext. | |||
5197 | ExtFedByLoad->setDebugLoc(LI->getDebugLoc()); | |||
5198 | ++NumExtsMoved; | |||
5199 | Inst = ExtFedByLoad; | |||
5200 | return true; | |||
5201 | } | |||
5202 | ||||
5203 | // Continue promoting SExts if known as considerable depending on targets. | |||
5204 | if (ATPConsiderable && | |||
5205 | performAddressTypePromotion(Inst, AllowPromotionWithoutCommonHeader, | |||
5206 | HasPromoted, TPT, SpeculativelyMovedExts)) | |||
5207 | return true; | |||
5208 | ||||
5209 | TPT.rollback(LastKnownGood); | |||
5210 | return false; | |||
5211 | } | |||
5212 | ||||
5213 | // Perform address type promotion if doing so is profitable. | |||
5214 | // If AllowPromotionWithoutCommonHeader == false, we should find other sext | |||
5215 | // instructions that sign extended the same initial value. However, if | |||
5216 | // AllowPromotionWithoutCommonHeader == true, we expect promoting the | |||
5217 | // extension is just profitable. | |||
5218 | bool CodeGenPrepare::performAddressTypePromotion( | |||
5219 | Instruction *&Inst, bool AllowPromotionWithoutCommonHeader, | |||
5220 | bool HasPromoted, TypePromotionTransaction &TPT, | |||
5221 | SmallVectorImpl<Instruction *> &SpeculativelyMovedExts) { | |||
5222 | bool Promoted = false; | |||
5223 | SmallPtrSet<Instruction *, 1> UnhandledExts; | |||
5224 | bool AllSeenFirst = true; | |||
5225 | for (auto I : SpeculativelyMovedExts) { | |||
5226 | Value *HeadOfChain = I->getOperand(0); | |||
5227 | DenseMap<Value *, Instruction *>::iterator AlreadySeen = | |||
5228 | SeenChainsForSExt.find(HeadOfChain); | |||
5229 | // If there is an unhandled SExt which has the same header, try to promote | |||
5230 | // it as well. | |||
5231 | if (AlreadySeen != SeenChainsForSExt.end()) { | |||
5232 | if (AlreadySeen->second != nullptr) | |||
5233 | UnhandledExts.insert(AlreadySeen->second); | |||
5234 | AllSeenFirst = false; | |||
5235 | } | |||
5236 | } | |||
5237 | ||||
5238 | if (!AllSeenFirst || (AllowPromotionWithoutCommonHeader && | |||
5239 | SpeculativelyMovedExts.size() == 1)) { | |||
5240 | TPT.commit(); | |||
5241 | if (HasPromoted) | |||
5242 | Promoted = true; | |||
5243 | for (auto I : SpeculativelyMovedExts) { | |||
5244 | Value *HeadOfChain = I->getOperand(0); | |||
5245 | SeenChainsForSExt[HeadOfChain] = nullptr; | |||
5246 | ValToSExtendedUses[HeadOfChain].push_back(I); | |||
5247 | } | |||
5248 | // Update Inst as promotion happen. | |||
5249 | Inst = SpeculativelyMovedExts.pop_back_val(); | |||
5250 | } else { | |||
5251 | // This is the first chain visited from the header, keep the current chain | |||
5252 | // as unhandled. Defer to promote this until we encounter another SExt | |||
5253 | // chain derived from the same header. | |||
5254 | for (auto I : SpeculativelyMovedExts) { | |||
5255 | Value *HeadOfChain = I->getOperand(0); | |||
5256 | SeenChainsForSExt[HeadOfChain] = Inst; | |||
5257 | } | |||
5258 | return false; | |||
5259 | } | |||
5260 | ||||
5261 | if (!AllSeenFirst && !UnhandledExts.empty()) | |||
5262 | for (auto VisitedSExt : UnhandledExts) { | |||
5263 | if (RemovedInsts.count(VisitedSExt)) | |||
5264 | continue; | |||
5265 | TypePromotionTransaction TPT(RemovedInsts); | |||
5266 | SmallVector<Instruction *, 1> Exts; | |||
5267 | SmallVector<Instruction *, 2> Chains; | |||
5268 | Exts.push_back(VisitedSExt); | |||
5269 | bool HasPromoted = tryToPromoteExts(TPT, Exts, Chains); | |||
5270 | TPT.commit(); | |||
5271 | if (HasPromoted) | |||
5272 | Promoted = true; | |||
5273 | for (auto I : Chains) { | |||
5274 | Value *HeadOfChain = I->getOperand(0); | |||
5275 | // Mark this as handled. | |||
5276 | SeenChainsForSExt[HeadOfChain] = nullptr; | |||
5277 | ValToSExtendedUses[HeadOfChain].push_back(I); | |||
5278 | } | |||
5279 | } | |||
5280 | return Promoted; | |||
5281 | } | |||
5282 | ||||
5283 | bool CodeGenPrepare::optimizeExtUses(Instruction *I) { | |||
5284 | BasicBlock *DefBB = I->getParent(); | |||
5285 | ||||
5286 | // If the result of a {s|z}ext and its source are both live out, rewrite all | |||
5287 | // other uses of the source with result of extension. | |||
5288 | Value *Src = I->getOperand(0); | |||
5289 | if (Src->hasOneUse()) | |||
5290 | return false; | |||
5291 | ||||
5292 | // Only do this xform if truncating is free. | |||
5293 | if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType())) | |||
5294 | return false; | |||
5295 | ||||
5296 | // Only safe to perform the optimization if the source is also defined in | |||
5297 | // this block. | |||
5298 | if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) | |||
5299 | return false; | |||
5300 | ||||
5301 | bool DefIsLiveOut = false; | |||
5302 | for (User *U : I->users()) { | |||
5303 | Instruction *UI = cast<Instruction>(U); | |||
5304 | ||||
5305 | // Figure out which BB this ext is used in. | |||
5306 | BasicBlock *UserBB = UI->getParent(); | |||
5307 | if (UserBB == DefBB) continue; | |||
5308 | DefIsLiveOut = true; | |||
5309 | break; | |||
5310 | } | |||
5311 | if (!DefIsLiveOut) | |||
5312 | return false; | |||
5313 | ||||
5314 | // Make sure none of the uses are PHI nodes. | |||
5315 | for (User *U : Src->users()) { | |||
5316 | Instruction *UI = cast<Instruction>(U); | |||
5317 | BasicBlock *UserBB = UI->getParent(); | |||
5318 | if (UserBB == DefBB) continue; | |||
5319 | // Be conservative. We don't want this xform to end up introducing | |||
5320 | // reloads just before load / store instructions. | |||
5321 | if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI)) | |||
5322 | return false; | |||
5323 | } | |||
5324 | ||||
5325 | // InsertedTruncs - Only insert one trunc in each block once. | |||
5326 | DenseMap<BasicBlock*, Instruction*> InsertedTruncs; | |||
5327 | ||||
5328 | bool MadeChange = false; | |||
5329 | for (Use &U : Src->uses()) { | |||
5330 | Instruction *User = cast<Instruction>(U.getUser()); | |||
5331 | ||||
5332 | // Figure out which BB this ext is used in. | |||
5333 | BasicBlock *UserBB = User->getParent(); | |||
5334 | if (UserBB == DefBB) continue; | |||
5335 | ||||
5336 | // Both src and def are live in this block. Rewrite the use. | |||
5337 | Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; | |||
5338 | ||||
5339 | if (!InsertedTrunc) { | |||
5340 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); | |||
5341 | assert(InsertPt != UserBB->end())((InsertPt != UserBB->end()) ? static_cast<void> (0) : __assert_fail ("InsertPt != UserBB->end()", "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 5341, __PRETTY_FUNCTION__)); | |||
5342 | InsertedTrunc = new TruncInst(I, Src->getType(), "", &*InsertPt); | |||
5343 | InsertedInsts.insert(InsertedTrunc); | |||
5344 | } | |||
5345 | ||||
5346 | // Replace a use of the {s|z}ext source with a use of the result. | |||
5347 | U = InsertedTrunc; | |||
5348 | ++NumExtUses; | |||
5349 | MadeChange = true; | |||
5350 | } | |||
5351 | ||||
5352 | return MadeChange; | |||
5353 | } | |||
5354 | ||||
5355 | // Find loads whose uses only use some of the loaded value's bits. Add an "and" | |||
5356 | // just after the load if the target can fold this into one extload instruction, | |||
5357 | // with the hope of eliminating some of the other later "and" instructions using | |||
5358 | // the loaded value. "and"s that are made trivially redundant by the insertion | |||
5359 | // of the new "and" are removed by this function, while others (e.g. those whose | |||
5360 | // path from the load goes through a phi) are left for isel to potentially | |||
5361 | // remove. | |||
5362 | // | |||
5363 | // For example: | |||
5364 | // | |||
5365 | // b0: | |||
5366 | // x = load i32 | |||
5367 | // ... | |||
5368 | // b1: | |||
5369 | // y = and x, 0xff | |||
5370 | // z = use y | |||
5371 | // | |||
5372 | // becomes: | |||
5373 | // | |||
5374 | // b0: | |||
5375 | // x = load i32 | |||
5376 | // x' = and x, 0xff | |||
5377 | // ... | |||
5378 | // b1: | |||
5379 | // z = use x' | |||
5380 | // | |||
5381 | // whereas: | |||
5382 | // | |||
5383 | // b0: | |||
5384 | // x1 = load i32 | |||
5385 | // ... | |||
5386 | // b1: | |||
5387 | // x2 = load i32 | |||
5388 | // ... | |||
5389 | // b2: | |||
5390 | // x = phi x1, x2 | |||
5391 | // y = and x, 0xff | |||
5392 | // | |||
5393 | // becomes (after a call to optimizeLoadExt for each load): | |||
5394 | // | |||
5395 | // b0: | |||
5396 | // x1 = load i32 | |||
5397 | // x1' = and x1, 0xff | |||
5398 | // ... | |||
5399 | // b1: | |||
5400 | // x2 = load i32 | |||
5401 | // x2' = and x2, 0xff | |||
5402 | // ... | |||
5403 | // b2: | |||
5404 | // x = phi x1', x2' | |||
5405 | // y = and x, 0xff | |||
5406 | bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) { | |||
5407 | if (!Load->isSimple() || !Load->getType()->isIntOrPtrTy()) | |||
5408 | return false; | |||
5409 | ||||
5410 | // Skip loads we've already transformed. | |||
5411 | if (Load->hasOneUse() && | |||
5412 | InsertedInsts.count(cast<Instruction>(*Load->user_begin()))) | |||
5413 | return false; | |||
5414 | ||||
5415 | // Look at all uses of Load, looking through phis, to determine how many bits | |||
5416 | // of the loaded value are needed. | |||
5417 | SmallVector<Instruction *, 8> WorkList; | |||
5418 | SmallPtrSet<Instruction *, 16> Visited; | |||
5419 | SmallVector<Instruction *, 8> AndsToMaybeRemove; | |||
5420 | for (auto *U : Load->users()) | |||
5421 | WorkList.push_back(cast<Instruction>(U)); | |||
5422 | ||||
5423 | EVT LoadResultVT = TLI->getValueType(*DL, Load->getType()); | |||
5424 | unsigned BitWidth = LoadResultVT.getSizeInBits(); | |||
5425 | APInt DemandBits(BitWidth, 0); | |||
5426 | APInt WidestAndBits(BitWidth, 0); | |||
5427 | ||||
5428 | while (!WorkList.empty()) { | |||
5429 | Instruction *I = WorkList.back(); | |||
5430 | WorkList.pop_back(); | |||
5431 | ||||
5432 | // Break use-def graph loops. | |||
5433 | if (!Visited.insert(I).second) | |||
5434 | continue; | |||
5435 | ||||
5436 | // For a PHI node, push all of its users. | |||
5437 | if (auto *Phi = dyn_cast<PHINode>(I)) { | |||
5438 | for (auto *U : Phi->users()) | |||
5439 | WorkList.push_back(cast<Instruction>(U)); | |||
5440 | continue; | |||
5441 | } | |||
5442 | ||||
5443 | switch (I->getOpcode()) { | |||
5444 | case Instruction::And: { | |||
5445 | auto *AndC = dyn_cast<ConstantInt>(I->getOperand(1)); | |||
5446 | if (!AndC) | |||
5447 | return false; | |||
5448 | APInt AndBits = AndC->getValue(); | |||
5449 | DemandBits |= AndBits; | |||
5450 | // Keep track of the widest and mask we see. | |||
5451 | if (AndBits.ugt(WidestAndBits)) | |||
5452 | WidestAndBits = AndBits; | |||
5453 | if (AndBits == WidestAndBits && I->getOperand(0) == Load) | |||
5454 | AndsToMaybeRemove.push_back(I); | |||
5455 | break; | |||
5456 | } | |||
5457 | ||||
5458 | case Instruction::Shl: { | |||
5459 | auto *ShlC = dyn_cast<ConstantInt>(I->getOperand(1)); | |||
5460 | if (!ShlC) | |||
5461 | return false; | |||
5462 | uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1); | |||
5463 | DemandBits.setLowBits(BitWidth - ShiftAmt); | |||
5464 | break; | |||
5465 | } | |||
5466 | ||||
5467 | case Instruction::Trunc: { | |||
5468 | EVT TruncVT = TLI->getValueType(*DL, I->getType()); | |||
5469 | unsigned TruncBitWidth = TruncVT.getSizeInBits(); | |||
5470 | DemandBits.setLowBits(TruncBitWidth); | |||
5471 | break; | |||
5472 | } | |||
5473 | ||||
5474 | default: | |||
5475 | return false; | |||
5476 | } | |||
5477 | } | |||
5478 | ||||
5479 | uint32_t ActiveBits = DemandBits.getActiveBits(); | |||
5480 | // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the | |||
5481 | // target even if isLoadExtLegal says an i1 EXTLOAD is valid. For example, | |||
5482 | // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but | |||
5483 | // (and (load x) 1) is not matched as a single instruction, rather as a LDR | |||
5484 | // followed by an AND. | |||
5485 | // TODO: Look into removing this restriction by fixing backends to either | |||
5486 | // return false for isLoadExtLegal for i1 or have them select this pattern to | |||
5487 | // a single instruction. | |||
5488 | // | |||
5489 | // Also avoid hoisting if we didn't see any ands with the exact DemandBits | |||
5490 | // mask, since these are the only ands that will be removed by isel. | |||
5491 | if (ActiveBits <= 1 || !DemandBits.isMask(ActiveBits) || | |||
5492 | WidestAndBits != DemandBits) | |||
5493 | return false; | |||
5494 | ||||
5495 | LLVMContext &Ctx = Load->getType()->getContext(); | |||
5496 | Type *TruncTy = Type::getIntNTy(Ctx, ActiveBits); | |||
5497 | EVT TruncVT = TLI->getValueType(*DL, TruncTy); | |||
5498 | ||||
5499 | // Reject cases that won't be matched as extloads. | |||
5500 | if (!LoadResultVT.bitsGT(TruncVT) || !TruncVT.isRound() || | |||
5501 | !TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT)) | |||
5502 | return false; | |||
5503 | ||||
5504 | IRBuilder<> Builder(Load->getNextNode()); | |||
5505 | auto *NewAnd = dyn_cast<Instruction>( | |||
5506 | Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits))); | |||
5507 | // Mark this instruction as "inserted by CGP", so that other | |||
5508 | // optimizations don't touch it. | |||
5509 | InsertedInsts.insert(NewAnd); | |||
5510 | ||||
5511 | // Replace all uses of load with new and (except for the use of load in the | |||
5512 | // new and itself). | |||
5513 | Load->replaceAllUsesWith(NewAnd); | |||
5514 | NewAnd->setOperand(0, Load); | |||
5515 | ||||
5516 | // Remove any and instructions that are now redundant. | |||
5517 | for (auto *And : AndsToMaybeRemove) | |||
5518 | // Check that the and mask is the same as the one we decided to put on the | |||
5519 | // new and. | |||
5520 | if (cast<ConstantInt>(And->getOperand(1))->getValue() == DemandBits) { | |||
5521 | And->replaceAllUsesWith(NewAnd); | |||
5522 | if (&*CurInstIterator == And) | |||
5523 | CurInstIterator = std::next(And->getIterator()); | |||
5524 | And->eraseFromParent(); | |||
5525 | ++NumAndUses; | |||
5526 | } | |||
5527 | ||||
5528 | ++NumAndsAdded; | |||
5529 | return true; | |||
5530 | } | |||
5531 | ||||
5532 | /// Check if V (an operand of a select instruction) is an expensive instruction | |||
5533 | /// that is only used once. | |||
5534 | static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V) { | |||
5535 | auto *I = dyn_cast<Instruction>(V); | |||
5536 | // If it's safe to speculatively execute, then it should not have side | |||
5537 | // effects; therefore, it's safe to sink and possibly *not* execute. | |||
5538 | return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) && | |||
5539 | TTI->getUserCost(I) >= TargetTransformInfo::TCC_Expensive; | |||
5540 | } | |||
5541 | ||||
5542 | /// Returns true if a SelectInst should be turned into an explicit branch. | |||
5543 | static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI, | |||
5544 | const TargetLowering *TLI, | |||
5545 | SelectInst *SI) { | |||
5546 | // If even a predictable select is cheap, then a branch can't be cheaper. | |||
5547 | if (!TLI->isPredictableSelectExpensive()) | |||
5548 | return false; | |||
5549 | ||||
5550 | // FIXME: This should use the same heuristics as IfConversion to determine | |||
5551 | // whether a select is better represented as a branch. | |||
5552 | ||||
5553 | // If metadata tells us that the select condition is obviously predictable, | |||
5554 | // then we want to replace the select with a branch. | |||
5555 | uint64_t TrueWeight, FalseWeight; | |||
5556 | if (SI->extractProfMetadata(TrueWeight, FalseWeight)) { | |||
5557 | uint64_t Max = std::max(TrueWeight, FalseWeight); | |||
5558 | uint64_t Sum = TrueWeight + FalseWeight; | |||
5559 | if (Sum != 0) { | |||
5560 | auto Probability = BranchProbability::getBranchProbability(Max, Sum); | |||
5561 | if (Probability > TLI->getPredictableBranchThreshold()) | |||
5562 | return true; | |||
5563 | } | |||
5564 | } | |||
5565 | ||||
5566 | CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); | |||
5567 | ||||
5568 | // If a branch is predictable, an out-of-order CPU can avoid blocking on its | |||
5569 | // comparison condition. If the compare has more than one use, there's | |||
5570 | // probably another cmov or setcc around, so it's not worth emitting a branch. | |||
5571 | if (!Cmp || !Cmp->hasOneUse()) | |||
5572 | return false; | |||
5573 | ||||
5574 | // If either operand of the select is expensive and only needed on one side | |||
5575 | // of the select, we should form a branch. | |||
5576 | if (sinkSelectOperand(TTI, SI->getTrueValue()) || | |||
5577 | sinkSelectOperand(TTI, SI->getFalseValue())) | |||
5578 | return true; | |||
5579 | ||||
5580 | return false; | |||
5581 | } | |||
5582 | ||||
5583 | /// If \p isTrue is true, return the true value of \p SI, otherwise return | |||
5584 | /// false value of \p SI. If the true/false value of \p SI is defined by any | |||
5585 | /// select instructions in \p Selects, look through the defining select | |||
5586 | /// instruction until the true/false value is not defined in \p Selects. | |||
5587 | static Value *getTrueOrFalseValue( | |||
5588 | SelectInst *SI, bool isTrue, | |||
5589 | const SmallPtrSet<const Instruction *, 2> &Selects) { | |||
5590 | Value *V; | |||
5591 | ||||
5592 | for (SelectInst *DefSI = SI; DefSI != nullptr && Selects.count(DefSI); | |||
5593 | DefSI = dyn_cast<SelectInst>(V)) { | |||
5594 | assert(DefSI->getCondition() == SI->getCondition() &&((DefSI->getCondition() == SI->getCondition() && "The condition of DefSI does not match with SI") ? static_cast <void> (0) : __assert_fail ("DefSI->getCondition() == SI->getCondition() && \"The condition of DefSI does not match with SI\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 5595, __PRETTY_FUNCTION__)) | |||
5595 | "The condition of DefSI does not match with SI")((DefSI->getCondition() == SI->getCondition() && "The condition of DefSI does not match with SI") ? static_cast <void> (0) : __assert_fail ("DefSI->getCondition() == SI->getCondition() && \"The condition of DefSI does not match with SI\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 5595, __PRETTY_FUNCTION__)); | |||
5596 | V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue()); | |||
5597 | } | |||
5598 | return V; | |||
5599 | } | |||
5600 | ||||
5601 | /// If we have a SelectInst that will likely profit from branch prediction, | |||
5602 | /// turn it into a branch. | |||
5603 | bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) { | |||
5604 | // If branch conversion isn't desirable, exit early. | |||
5605 | if (DisableSelectToBranch || OptSize || !TLI) | |||
5606 | return false; | |||
5607 | ||||
5608 | // Find all consecutive select instructions that share the same condition. | |||
5609 | SmallVector<SelectInst *, 2> ASI; | |||
5610 | ASI.push_back(SI); | |||
5611 | for (BasicBlock::iterator It = ++BasicBlock::iterator(SI); | |||
5612 | It != SI->getParent()->end(); ++It) { | |||
5613 | SelectInst *I = dyn_cast<SelectInst>(&*It); | |||
5614 | if (I && SI->getCondition() == I->getCondition()) { | |||
5615 | ASI.push_back(I); | |||
5616 | } else { | |||
5617 | break; | |||
5618 | } | |||
5619 | } | |||
5620 | ||||
5621 | SelectInst *LastSI = ASI.back(); | |||
5622 | // Increment the current iterator to skip all the rest of select instructions | |||
5623 | // because they will be either "not lowered" or "all lowered" to branch. | |||
5624 | CurInstIterator = std::next(LastSI->getIterator()); | |||
5625 | ||||
5626 | bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1); | |||
5627 | ||||
5628 | // Can we convert the 'select' to CF ? | |||
5629 | if (VectorCond || SI->getMetadata(LLVMContext::MD_unpredictable)) | |||
5630 | return false; | |||
5631 | ||||
5632 | TargetLowering::SelectSupportKind SelectKind; | |||
5633 | if (VectorCond) | |||
5634 | SelectKind = TargetLowering::VectorMaskSelect; | |||
5635 | else if (SI->getType()->isVectorTy()) | |||
5636 | SelectKind = TargetLowering::ScalarCondVectorVal; | |||
5637 | else | |||
5638 | SelectKind = TargetLowering::ScalarValSelect; | |||
5639 | ||||
5640 | if (TLI->isSelectSupported(SelectKind) && | |||
5641 | !isFormingBranchFromSelectProfitable(TTI, TLI, SI)) | |||
5642 | return false; | |||
5643 | ||||
5644 | ModifiedDT = true; | |||
5645 | ||||
5646 | // Transform a sequence like this: | |||
5647 | // start: | |||
5648 | // %cmp = cmp uge i32 %a, %b | |||
5649 | // %sel = select i1 %cmp, i32 %c, i32 %d | |||
5650 | // | |||
5651 | // Into: | |||
5652 | // start: | |||
5653 | // %cmp = cmp uge i32 %a, %b | |||
5654 | // br i1 %cmp, label %select.true, label %select.false | |||
5655 | // select.true: | |||
5656 | // br label %select.end | |||
5657 | // select.false: | |||
5658 | // br label %select.end | |||
5659 | // select.end: | |||
5660 | // %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ] | |||
5661 | // | |||
5662 | // In addition, we may sink instructions that produce %c or %d from | |||
5663 | // the entry block into the destination(s) of the new branch. | |||
5664 | // If the true or false blocks do not contain a sunken instruction, that | |||
5665 | // block and its branch may be optimized away. In that case, one side of the | |||
5666 | // first branch will point directly to select.end, and the corresponding PHI | |||
5667 | // predecessor block will be the start block. | |||
5668 | ||||
5669 | // First, we split the block containing the select into 2 blocks. | |||
5670 | BasicBlock *StartBlock = SI->getParent(); | |||
5671 | BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(LastSI)); | |||
5672 | BasicBlock *EndBlock = StartBlock->splitBasicBlock(SplitPt, "select.end"); | |||
5673 | ||||
5674 | // Delete the unconditional branch that was just created by the split. | |||
5675 | StartBlock->getTerminator()->eraseFromParent(); | |||
5676 | ||||
5677 | // These are the new basic blocks for the conditional branch. | |||
5678 | // At least one will become an actual new basic block. | |||
5679 | BasicBlock *TrueBlock = nullptr; | |||
5680 | BasicBlock *FalseBlock = nullptr; | |||
5681 | BranchInst *TrueBranch = nullptr; | |||
5682 | BranchInst *FalseBranch = nullptr; | |||
5683 | ||||
5684 | // Sink expensive instructions into the conditional blocks to avoid executing | |||
5685 | // them speculatively. | |||
5686 | for (SelectInst *SI : ASI) { | |||
5687 | if (sinkSelectOperand(TTI, SI->getTrueValue())) { | |||
5688 | if (TrueBlock == nullptr) { | |||
5689 | TrueBlock = BasicBlock::Create(SI->getContext(), "select.true.sink", | |||
5690 | EndBlock->getParent(), EndBlock); | |||
5691 | TrueBranch = BranchInst::Create(EndBlock, TrueBlock); | |||
5692 | TrueBranch->setDebugLoc(SI->getDebugLoc()); | |||
5693 | } | |||
5694 | auto *TrueInst = cast<Instruction>(SI->getTrueValue()); | |||
5695 | TrueInst->moveBefore(TrueBranch); | |||
5696 | } | |||
5697 | if (sinkSelectOperand(TTI, SI->getFalseValue())) { | |||
5698 | if (FalseBlock == nullptr) { | |||
5699 | FalseBlock = BasicBlock::Create(SI->getContext(), "select.false.sink", | |||
5700 | EndBlock->getParent(), EndBlock); | |||
5701 | FalseBranch = BranchInst::Create(EndBlock, FalseBlock); | |||
5702 | FalseBranch->setDebugLoc(SI->getDebugLoc()); | |||
5703 | } | |||
5704 | auto *FalseInst = cast<Instruction>(SI->getFalseValue()); | |||
5705 | FalseInst->moveBefore(FalseBranch); | |||
5706 | } | |||
5707 | } | |||
5708 | ||||
5709 | // If there was nothing to sink, then arbitrarily choose the 'false' side | |||
5710 | // for a new input value to the PHI. | |||
5711 | if (TrueBlock == FalseBlock) { | |||
5712 | assert(TrueBlock == nullptr &&((TrueBlock == nullptr && "Unexpected basic block transform while optimizing select" ) ? static_cast<void> (0) : __assert_fail ("TrueBlock == nullptr && \"Unexpected basic block transform while optimizing select\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 5713, __PRETTY_FUNCTION__)) | |||
5713 | "Unexpected basic block transform while optimizing select")((TrueBlock == nullptr && "Unexpected basic block transform while optimizing select" ) ? static_cast<void> (0) : __assert_fail ("TrueBlock == nullptr && \"Unexpected basic block transform while optimizing select\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 5713, __PRETTY_FUNCTION__)); | |||
5714 | ||||
5715 | FalseBlock = BasicBlock::Create(SI->getContext(), "select.false", | |||
5716 | EndBlock->getParent(), EndBlock); | |||
5717 | auto *FalseBranch = BranchInst::Create(EndBlock, FalseBlock); | |||
5718 | FalseBranch->setDebugLoc(SI->getDebugLoc()); | |||
5719 | } | |||
5720 | ||||
5721 | // Insert the real conditional branch based on the original condition. | |||
5722 | // If we did not create a new block for one of the 'true' or 'false' paths | |||
5723 | // of the condition, it means that side of the branch goes to the end block | |||
5724 | // directly and the path originates from the start block from the point of | |||
5725 | // view of the new PHI. | |||
5726 | BasicBlock *TT, *FT; | |||
5727 | if (TrueBlock == nullptr) { | |||
5728 | TT = EndBlock; | |||
5729 | FT = FalseBlock; | |||
5730 | TrueBlock = StartBlock; | |||
5731 | } else if (FalseBlock == nullptr) { | |||
5732 | TT = TrueBlock; | |||
5733 | FT = EndBlock; | |||
5734 | FalseBlock = StartBlock; | |||
5735 | } else { | |||
5736 | TT = TrueBlock; | |||
5737 | FT = FalseBlock; | |||
5738 | } | |||
5739 | IRBuilder<>(SI).CreateCondBr(SI->getCondition(), TT, FT, SI); | |||
5740 | ||||
5741 | SmallPtrSet<const Instruction *, 2> INS; | |||
5742 | INS.insert(ASI.begin(), ASI.end()); | |||
5743 | // Use reverse iterator because later select may use the value of the | |||
5744 | // earlier select, and we need to propagate value through earlier select | |||
5745 | // to get the PHI operand. | |||
5746 | for (auto It = ASI.rbegin(); It != ASI.rend(); ++It) { | |||
5747 | SelectInst *SI = *It; | |||
5748 | // The select itself is replaced with a PHI Node. | |||
5749 | PHINode *PN = PHINode::Create(SI->getType(), 2, "", &EndBlock->front()); | |||
5750 | PN->takeName(SI); | |||
5751 | PN->addIncoming(getTrueOrFalseValue(SI, true, INS), TrueBlock); | |||
5752 | PN->addIncoming(getTrueOrFalseValue(SI, false, INS), FalseBlock); | |||
5753 | PN->setDebugLoc(SI->getDebugLoc()); | |||
5754 | ||||
5755 | SI->replaceAllUsesWith(PN); | |||
5756 | SI->eraseFromParent(); | |||
5757 | INS.erase(SI); | |||
5758 | ++NumSelectsExpanded; | |||
5759 | } | |||
5760 | ||||
5761 | // Instruct OptimizeBlock to skip to the next block. | |||
5762 | CurInstIterator = StartBlock->end(); | |||
5763 | return true; | |||
5764 | } | |||
5765 | ||||
5766 | static bool isBroadcastShuffle(ShuffleVectorInst *SVI) { | |||
5767 | SmallVector<int, 16> Mask(SVI->getShuffleMask()); | |||
5768 | int SplatElem = -1; | |||
5769 | for (unsigned i = 0; i < Mask.size(); ++i) { | |||
5770 | if (SplatElem != -1 && Mask[i] != -1 && Mask[i] != SplatElem) | |||
5771 | return false; | |||
5772 | SplatElem = Mask[i]; | |||
5773 | } | |||
5774 | ||||
5775 | return true; | |||
5776 | } | |||
5777 | ||||
5778 | /// Some targets have expensive vector shifts if the lanes aren't all the same | |||
5779 | /// (e.g. x86 only introduced "vpsllvd" and friends with AVX2). In these cases | |||
5780 | /// it's often worth sinking a shufflevector splat down to its use so that | |||
5781 | /// codegen can spot all lanes are identical. | |||
5782 | bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) { | |||
5783 | BasicBlock *DefBB = SVI->getParent(); | |||
5784 | ||||
5785 | // Only do this xform if variable vector shifts are particularly expensive. | |||
5786 | if (!TLI || !TLI->isVectorShiftByScalarCheap(SVI->getType())) | |||
5787 | return false; | |||
5788 | ||||
5789 | // We only expect better codegen by sinking a shuffle if we can recognise a | |||
5790 | // constant splat. | |||
5791 | if (!isBroadcastShuffle(SVI)) | |||
5792 | return false; | |||
5793 | ||||
5794 | // InsertedShuffles - Only insert a shuffle in each block once. | |||
5795 | DenseMap<BasicBlock*, Instruction*> InsertedShuffles; | |||
5796 | ||||
5797 | bool MadeChange = false; | |||
5798 | for (User *U : SVI->users()) { | |||
5799 | Instruction *UI = cast<Instruction>(U); | |||
5800 | ||||
5801 | // Figure out which BB this ext is used in. | |||
5802 | BasicBlock *UserBB = UI->getParent(); | |||
5803 | if (UserBB == DefBB) continue; | |||
5804 | ||||
5805 | // For now only apply this when the splat is used by a shift instruction. | |||
5806 | if (!UI->isShift()) continue; | |||
5807 | ||||
5808 | // Everything checks out, sink the shuffle if the user's block doesn't | |||
5809 | // already have a copy. | |||
5810 | Instruction *&InsertedShuffle = InsertedShuffles[UserBB]; | |||
5811 | ||||
5812 | if (!InsertedShuffle) { | |||
5813 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); | |||
5814 | assert(InsertPt != UserBB->end())((InsertPt != UserBB->end()) ? static_cast<void> (0) : __assert_fail ("InsertPt != UserBB->end()", "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 5814, __PRETTY_FUNCTION__)); | |||
5815 | InsertedShuffle = | |||
5816 | new ShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1), | |||
5817 | SVI->getOperand(2), "", &*InsertPt); | |||
5818 | } | |||
5819 | ||||
5820 | UI->replaceUsesOfWith(SVI, InsertedShuffle); | |||
5821 | MadeChange = true; | |||
5822 | } | |||
5823 | ||||
5824 | // If we removed all uses, nuke the shuffle. | |||
5825 | if (SVI->use_empty()) { | |||
5826 | SVI->eraseFromParent(); | |||
5827 | MadeChange = true; | |||
5828 | } | |||
5829 | ||||
5830 | return MadeChange; | |||
5831 | } | |||
5832 | ||||
5833 | bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) { | |||
5834 | if (!TLI || !DL) | |||
5835 | return false; | |||
5836 | ||||
5837 | Value *Cond = SI->getCondition(); | |||
5838 | Type *OldType = Cond->getType(); | |||
5839 | LLVMContext &Context = Cond->getContext(); | |||
5840 | MVT RegType = TLI->getRegisterType(Context, TLI->getValueType(*DL, OldType)); | |||
5841 | unsigned RegWidth = RegType.getSizeInBits(); | |||
5842 | ||||
5843 | if (RegWidth <= cast<IntegerType>(OldType)->getBitWidth()) | |||
5844 | return false; | |||
5845 | ||||
5846 | // If the register width is greater than the type width, expand the condition | |||
5847 | // of the switch instruction and each case constant to the width of the | |||
5848 | // register. By widening the type of the switch condition, subsequent | |||
5849 | // comparisons (for case comparisons) will not need to be extended to the | |||
5850 | // preferred register width, so we will potentially eliminate N-1 extends, | |||
5851 | // where N is the number of cases in the switch. | |||
5852 | auto *NewType = Type::getIntNTy(Context, RegWidth); | |||
5853 | ||||
5854 | // Zero-extend the switch condition and case constants unless the switch | |||
5855 | // condition is a function argument that is already being sign-extended. | |||
5856 | // In that case, we can avoid an unnecessary mask/extension by sign-extending | |||
5857 | // everything instead. | |||
5858 | Instruction::CastOps ExtType = Instruction::ZExt; | |||
5859 | if (auto *Arg = dyn_cast<Argument>(Cond)) | |||
5860 | if (Arg->hasSExtAttr()) | |||
5861 | ExtType = Instruction::SExt; | |||
5862 | ||||
5863 | auto *ExtInst = CastInst::Create(ExtType, Cond, NewType); | |||
5864 | ExtInst->insertBefore(SI); | |||
5865 | ExtInst->setDebugLoc(SI->getDebugLoc()); | |||
5866 | SI->setCondition(ExtInst); | |||
5867 | for (auto Case : SI->cases()) { | |||
5868 | APInt NarrowConst = Case.getCaseValue()->getValue(); | |||
5869 | APInt WideConst = (ExtType == Instruction::ZExt) ? | |||
5870 | NarrowConst.zext(RegWidth) : NarrowConst.sext(RegWidth); | |||
5871 | Case.setValue(ConstantInt::get(Context, WideConst)); | |||
5872 | } | |||
5873 | ||||
5874 | return true; | |||
5875 | } | |||
5876 | ||||
5877 | ||||
5878 | namespace { | |||
5879 | ||||
5880 | /// Helper class to promote a scalar operation to a vector one. | |||
5881 | /// This class is used to move downward extractelement transition. | |||
5882 | /// E.g., | |||
5883 | /// a = vector_op <2 x i32> | |||
5884 | /// b = extractelement <2 x i32> a, i32 0 | |||
5885 | /// c = scalar_op b | |||
5886 | /// store c | |||
5887 | /// | |||
5888 | /// => | |||
5889 | /// a = vector_op <2 x i32> | |||
5890 | /// c = vector_op a (equivalent to scalar_op on the related lane) | |||
5891 | /// * d = extractelement <2 x i32> c, i32 0 | |||
5892 | /// * store d | |||
5893 | /// Assuming both extractelement and store can be combine, we get rid of the | |||
5894 | /// transition. | |||
5895 | class VectorPromoteHelper { | |||
5896 | /// DataLayout associated with the current module. | |||
5897 | const DataLayout &DL; | |||
5898 | ||||
5899 | /// Used to perform some checks on the legality of vector operations. | |||
5900 | const TargetLowering &TLI; | |||
5901 | ||||
5902 | /// Used to estimated the cost of the promoted chain. | |||
5903 | const TargetTransformInfo &TTI; | |||
5904 | ||||
5905 | /// The transition being moved downwards. | |||
5906 | Instruction *Transition; | |||
5907 | ||||
5908 | /// The sequence of instructions to be promoted. | |||
5909 | SmallVector<Instruction *, 4> InstsToBePromoted; | |||
5910 | ||||
5911 | /// Cost of combining a store and an extract. | |||
5912 | unsigned StoreExtractCombineCost; | |||
5913 | ||||
5914 | /// Instruction that will be combined with the transition. | |||
5915 | Instruction *CombineInst = nullptr; | |||
5916 | ||||
5917 | /// The instruction that represents the current end of the transition. | |||
5918 | /// Since we are faking the promotion until we reach the end of the chain | |||
5919 | /// of computation, we need a way to get the current end of the transition. | |||
5920 | Instruction *getEndOfTransition() const { | |||
5921 | if (InstsToBePromoted.empty()) | |||
5922 | return Transition; | |||
5923 | return InstsToBePromoted.back(); | |||
5924 | } | |||
5925 | ||||
5926 | /// Return the index of the original value in the transition. | |||
5927 | /// E.g., for "extractelement <2 x i32> c, i32 1" the original value, | |||
5928 | /// c, is at index 0. | |||
5929 | unsigned getTransitionOriginalValueIdx() const { | |||
5930 | assert(isa<ExtractElementInst>(Transition) &&((isa<ExtractElementInst>(Transition) && "Other kind of transitions are not supported yet" ) ? static_cast<void> (0) : __assert_fail ("isa<ExtractElementInst>(Transition) && \"Other kind of transitions are not supported yet\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 5931, __PRETTY_FUNCTION__)) | |||
5931 | "Other kind of transitions are not supported yet")((isa<ExtractElementInst>(Transition) && "Other kind of transitions are not supported yet" ) ? static_cast<void> (0) : __assert_fail ("isa<ExtractElementInst>(Transition) && \"Other kind of transitions are not supported yet\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 5931, __PRETTY_FUNCTION__)); | |||
5932 | return 0; | |||
5933 | } | |||
5934 | ||||
5935 | /// Return the index of the index in the transition. | |||
5936 | /// E.g., for "extractelement <2 x i32> c, i32 0" the index | |||
5937 | /// is at index 1. | |||
5938 | unsigned getTransitionIdx() const { | |||
5939 | assert(isa<ExtractElementInst>(Transition) &&((isa<ExtractElementInst>(Transition) && "Other kind of transitions are not supported yet" ) ? static_cast<void> (0) : __assert_fail ("isa<ExtractElementInst>(Transition) && \"Other kind of transitions are not supported yet\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 5940, __PRETTY_FUNCTION__)) | |||
5940 | "Other kind of transitions are not supported yet")((isa<ExtractElementInst>(Transition) && "Other kind of transitions are not supported yet" ) ? static_cast<void> (0) : __assert_fail ("isa<ExtractElementInst>(Transition) && \"Other kind of transitions are not supported yet\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 5940, __PRETTY_FUNCTION__)); | |||
5941 | return 1; | |||
5942 | } | |||
5943 | ||||
5944 | /// Get the type of the transition. | |||
5945 | /// This is the type of the original value. | |||
5946 | /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the | |||
5947 | /// transition is <2 x i32>. | |||
5948 | Type *getTransitionType() const { | |||
5949 | return Transition->getOperand(getTransitionOriginalValueIdx())->getType(); | |||
5950 | } | |||
5951 | ||||
5952 | /// Promote \p ToBePromoted by moving \p Def downward through. | |||
5953 | /// I.e., we have the following sequence: | |||
5954 | /// Def = Transition <ty1> a to <ty2> | |||
5955 | /// b = ToBePromoted <ty2> Def, ... | |||
5956 | /// => | |||
5957 | /// b = ToBePromoted <ty1> a, ... | |||
5958 | /// Def = Transition <ty1> ToBePromoted to <ty2> | |||
5959 | void promoteImpl(Instruction *ToBePromoted); | |||
5960 | ||||
5961 | /// Check whether or not it is profitable to promote all the | |||
5962 | /// instructions enqueued to be promoted. | |||
5963 | bool isProfitableToPromote() { | |||
5964 | Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx()); | |||
5965 | unsigned Index = isa<ConstantInt>(ValIdx) | |||
5966 | ? cast<ConstantInt>(ValIdx)->getZExtValue() | |||
5967 | : -1; | |||
5968 | Type *PromotedType = getTransitionType(); | |||
5969 | ||||
5970 | StoreInst *ST = cast<StoreInst>(CombineInst); | |||
5971 | unsigned AS = ST->getPointerAddressSpace(); | |||
5972 | unsigned Align = ST->getAlignment(); | |||
5973 | // Check if this store is supported. | |||
5974 | if (!TLI.allowsMisalignedMemoryAccesses( | |||
5975 | TLI.getValueType(DL, ST->getValueOperand()->getType()), AS, | |||
5976 | Align)) { | |||
5977 | // If this is not supported, there is no way we can combine | |||
5978 | // the extract with the store. | |||
5979 | return false; | |||
5980 | } | |||
5981 | ||||
5982 | // The scalar chain of computation has to pay for the transition | |||
5983 | // scalar to vector. | |||
5984 | // The vector chain has to account for the combining cost. | |||
5985 | uint64_t ScalarCost = | |||
5986 | TTI.getVectorInstrCost(Transition->getOpcode(), PromotedType, Index); | |||
5987 | uint64_t VectorCost = StoreExtractCombineCost; | |||
5988 | for (const auto &Inst : InstsToBePromoted) { | |||
5989 | // Compute the cost. | |||
5990 | // By construction, all instructions being promoted are arithmetic ones. | |||
5991 | // Moreover, one argument is a constant that can be viewed as a splat | |||
5992 | // constant. | |||
5993 | Value *Arg0 = Inst->getOperand(0); | |||
5994 | bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) || | |||
5995 | isa<ConstantFP>(Arg0); | |||
5996 | TargetTransformInfo::OperandValueKind Arg0OVK = | |||
5997 | IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue | |||
5998 | : TargetTransformInfo::OK_AnyValue; | |||
5999 | TargetTransformInfo::OperandValueKind Arg1OVK = | |||
6000 | !IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue | |||
6001 | : TargetTransformInfo::OK_AnyValue; | |||
6002 | ScalarCost += TTI.getArithmeticInstrCost( | |||
6003 | Inst->getOpcode(), Inst->getType(), Arg0OVK, Arg1OVK); | |||
6004 | VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType, | |||
6005 | Arg0OVK, Arg1OVK); | |||
6006 | } | |||
6007 | LLVM_DEBUG(do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Estimated cost of computation to be promoted:\nScalar: " << ScalarCost << "\nVector: " << VectorCost << '\n'; } } while (false) | |||
6008 | dbgs() << "Estimated cost of computation to be promoted:\nScalar: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Estimated cost of computation to be promoted:\nScalar: " << ScalarCost << "\nVector: " << VectorCost << '\n'; } } while (false) | |||
6009 | << ScalarCost << "\nVector: " << VectorCost << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Estimated cost of computation to be promoted:\nScalar: " << ScalarCost << "\nVector: " << VectorCost << '\n'; } } while (false); | |||
6010 | return ScalarCost > VectorCost; | |||
6011 | } | |||
6012 | ||||
6013 | /// Generate a constant vector with \p Val with the same | |||
6014 | /// number of elements as the transition. | |||
6015 | /// \p UseSplat defines whether or not \p Val should be replicated | |||
6016 | /// across the whole vector. | |||
6017 | /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>, | |||
6018 | /// otherwise we generate a vector with as many undef as possible: | |||
6019 | /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only | |||
6020 | /// used at the index of the extract. | |||
6021 | Value *getConstantVector(Constant *Val, bool UseSplat) const { | |||
6022 | unsigned ExtractIdx = std::numeric_limits<unsigned>::max(); | |||
6023 | if (!UseSplat) { | |||
6024 | // If we cannot determine where the constant must be, we have to | |||
6025 | // use a splat constant. | |||
6026 | Value *ValExtractIdx = Transition->getOperand(getTransitionIdx()); | |||
6027 | if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx)) | |||
6028 | ExtractIdx = CstVal->getSExtValue(); | |||
6029 | else | |||
6030 | UseSplat = true; | |||
6031 | } | |||
6032 | ||||
6033 | unsigned End = getTransitionType()->getVectorNumElements(); | |||
6034 | if (UseSplat) | |||
6035 | return ConstantVector::getSplat(End, Val); | |||
6036 | ||||
6037 | SmallVector<Constant *, 4> ConstVec; | |||
6038 | UndefValue *UndefVal = UndefValue::get(Val->getType()); | |||
6039 | for (unsigned Idx = 0; Idx != End; ++Idx) { | |||
6040 | if (Idx == ExtractIdx) | |||
6041 | ConstVec.push_back(Val); | |||
6042 | else | |||
6043 | ConstVec.push_back(UndefVal); | |||
6044 | } | |||
6045 | return ConstantVector::get(ConstVec); | |||
6046 | } | |||
6047 | ||||
6048 | /// Check if promoting to a vector type an operand at \p OperandIdx | |||
6049 | /// in \p Use can trigger undefined behavior. | |||
6050 | static bool canCauseUndefinedBehavior(const Instruction *Use, | |||
6051 | unsigned OperandIdx) { | |||
6052 | // This is not safe to introduce undef when the operand is on | |||
6053 | // the right hand side of a division-like instruction. | |||
6054 | if (OperandIdx != 1) | |||
6055 | return false; | |||
6056 | switch (Use->getOpcode()) { | |||
6057 | default: | |||
6058 | return false; | |||
6059 | case Instruction::SDiv: | |||
6060 | case Instruction::UDiv: | |||
6061 | case Instruction::SRem: | |||
6062 | case Instruction::URem: | |||
6063 | return true; | |||
6064 | case Instruction::FDiv: | |||
6065 | case Instruction::FRem: | |||
6066 | return !Use->hasNoNaNs(); | |||
6067 | } | |||
6068 | llvm_unreachable(nullptr)::llvm::llvm_unreachable_internal(nullptr, "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 6068); | |||
6069 | } | |||
6070 | ||||
6071 | public: | |||
6072 | VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI, | |||
6073 | const TargetTransformInfo &TTI, Instruction *Transition, | |||
6074 | unsigned CombineCost) | |||
6075 | : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition), | |||
6076 | StoreExtractCombineCost(CombineCost) { | |||
6077 | assert(Transition && "Do not know how to promote null")((Transition && "Do not know how to promote null") ? static_cast <void> (0) : __assert_fail ("Transition && \"Do not know how to promote null\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 6077, __PRETTY_FUNCTION__)); | |||
6078 | } | |||
6079 | ||||
6080 | /// Check if we can promote \p ToBePromoted to \p Type. | |||
6081 | bool canPromote(const Instruction *ToBePromoted) const { | |||
6082 | // We could support CastInst too. | |||
6083 | return isa<BinaryOperator>(ToBePromoted); | |||
6084 | } | |||
6085 | ||||
6086 | /// Check if it is profitable to promote \p ToBePromoted | |||
6087 | /// by moving downward the transition through. | |||
6088 | bool shouldPromote(const Instruction *ToBePromoted) const { | |||
6089 | // Promote only if all the operands can be statically expanded. | |||
6090 | // Indeed, we do not want to introduce any new kind of transitions. | |||
6091 | for (const Use &U : ToBePromoted->operands()) { | |||
6092 | const Value *Val = U.get(); | |||
6093 | if (Val == getEndOfTransition()) { | |||
6094 | // If the use is a division and the transition is on the rhs, | |||
6095 | // we cannot promote the operation, otherwise we may create a | |||
6096 | // division by zero. | |||
6097 | if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())) | |||
6098 | return false; | |||
6099 | continue; | |||
6100 | } | |||
6101 | if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) && | |||
6102 | !isa<ConstantFP>(Val)) | |||
6103 | return false; | |||
6104 | } | |||
6105 | // Check that the resulting operation is legal. | |||
6106 | int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode()); | |||
6107 | if (!ISDOpcode) | |||
6108 | return false; | |||
6109 | return StressStoreExtract || | |||
6110 | TLI.isOperationLegalOrCustom( | |||
6111 | ISDOpcode, TLI.getValueType(DL, getTransitionType(), true)); | |||
6112 | } | |||
6113 | ||||
6114 | /// Check whether or not \p Use can be combined | |||
6115 | /// with the transition. | |||
6116 | /// I.e., is it possible to do Use(Transition) => AnotherUse? | |||
6117 | bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); } | |||
6118 | ||||
6119 | /// Record \p ToBePromoted as part of the chain to be promoted. | |||
6120 | void enqueueForPromotion(Instruction *ToBePromoted) { | |||
6121 | InstsToBePromoted.push_back(ToBePromoted); | |||
6122 | } | |||
6123 | ||||
6124 | /// Set the instruction that will be combined with the transition. | |||
6125 | void recordCombineInstruction(Instruction *ToBeCombined) { | |||
6126 | assert(canCombine(ToBeCombined) && "Unsupported instruction to combine")((canCombine(ToBeCombined) && "Unsupported instruction to combine" ) ? static_cast<void> (0) : __assert_fail ("canCombine(ToBeCombined) && \"Unsupported instruction to combine\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 6126, __PRETTY_FUNCTION__)); | |||
6127 | CombineInst = ToBeCombined; | |||
6128 | } | |||
6129 | ||||
6130 | /// Promote all the instructions enqueued for promotion if it is | |||
6131 | /// is profitable. | |||
6132 | /// \return True if the promotion happened, false otherwise. | |||
6133 | bool promote() { | |||
6134 | // Check if there is something to promote. | |||
6135 | // Right now, if we do not have anything to combine with, | |||
6136 | // we assume the promotion is not profitable. | |||
6137 | if (InstsToBePromoted.empty() || !CombineInst) | |||
6138 | return false; | |||
6139 | ||||
6140 | // Check cost. | |||
6141 | if (!StressStoreExtract && !isProfitableToPromote()) | |||
6142 | return false; | |||
6143 | ||||
6144 | // Promote. | |||
6145 | for (auto &ToBePromoted : InstsToBePromoted) | |||
6146 | promoteImpl(ToBePromoted); | |||
6147 | InstsToBePromoted.clear(); | |||
6148 | return true; | |||
6149 | } | |||
6150 | }; | |||
6151 | ||||
6152 | } // end anonymous namespace | |||
6153 | ||||
6154 | void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) { | |||
6155 | // At this point, we know that all the operands of ToBePromoted but Def | |||
6156 | // can be statically promoted. | |||
6157 | // For Def, we need to use its parameter in ToBePromoted: | |||
6158 | // b = ToBePromoted ty1 a | |||
6159 | // Def = Transition ty1 b to ty2 | |||
6160 | // Move the transition down. | |||
6161 | // 1. Replace all uses of the promoted operation by the transition. | |||
6162 | // = ... b => = ... Def. | |||
6163 | assert(ToBePromoted->getType() == Transition->getType() &&((ToBePromoted->getType() == Transition->getType() && "The type of the result of the transition does not match " "the final type" ) ? static_cast<void> (0) : __assert_fail ("ToBePromoted->getType() == Transition->getType() && \"The type of the result of the transition does not match \" \"the final type\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 6165, __PRETTY_FUNCTION__)) | |||
6164 | "The type of the result of the transition does not match "((ToBePromoted->getType() == Transition->getType() && "The type of the result of the transition does not match " "the final type" ) ? static_cast<void> (0) : __assert_fail ("ToBePromoted->getType() == Transition->getType() && \"The type of the result of the transition does not match \" \"the final type\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 6165, __PRETTY_FUNCTION__)) | |||
6165 | "the final type")((ToBePromoted->getType() == Transition->getType() && "The type of the result of the transition does not match " "the final type" ) ? static_cast<void> (0) : __assert_fail ("ToBePromoted->getType() == Transition->getType() && \"The type of the result of the transition does not match \" \"the final type\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 6165, __PRETTY_FUNCTION__)); | |||
6166 | ToBePromoted->replaceAllUsesWith(Transition); | |||
6167 | // 2. Update the type of the uses. | |||
6168 | // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def. | |||
6169 | Type *TransitionTy = getTransitionType(); | |||
6170 | ToBePromoted->mutateType(TransitionTy); | |||
6171 | // 3. Update all the operands of the promoted operation with promoted | |||
6172 | // operands. | |||
6173 | // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a. | |||
6174 | for (Use &U : ToBePromoted->operands()) { | |||
6175 | Value *Val = U.get(); | |||
6176 | Value *NewVal = nullptr; | |||
6177 | if (Val == Transition) | |||
6178 | NewVal = Transition->getOperand(getTransitionOriginalValueIdx()); | |||
6179 | else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) || | |||
6180 | isa<ConstantFP>(Val)) { | |||
6181 | // Use a splat constant if it is not safe to use undef. | |||
6182 | NewVal = getConstantVector( | |||
6183 | cast<Constant>(Val), | |||
6184 | isa<UndefValue>(Val) || | |||
6185 | canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())); | |||
6186 | } else | |||
6187 | llvm_unreachable("Did you modified shouldPromote and forgot to update "::llvm::llvm_unreachable_internal("Did you modified shouldPromote and forgot to update " "this?", "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 6188) | |||
6188 | "this?")::llvm::llvm_unreachable_internal("Did you modified shouldPromote and forgot to update " "this?", "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 6188); | |||
6189 | ToBePromoted->setOperand(U.getOperandNo(), NewVal); | |||
6190 | } | |||
6191 | Transition->moveAfter(ToBePromoted); | |||
6192 | Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted); | |||
6193 | } | |||
6194 | ||||
6195 | /// Some targets can do store(extractelement) with one instruction. | |||
6196 | /// Try to push the extractelement towards the stores when the target | |||
6197 | /// has this feature and this is profitable. | |||
6198 | bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) { | |||
6199 | unsigned CombineCost = std::numeric_limits<unsigned>::max(); | |||
6200 | if (DisableStoreExtract || !TLI || | |||
6201 | (!StressStoreExtract && | |||
6202 | !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(), | |||
6203 | Inst->getOperand(1), CombineCost))) | |||
6204 | return false; | |||
6205 | ||||
6206 | // At this point we know that Inst is a vector to scalar transition. | |||
6207 | // Try to move it down the def-use chain, until: | |||
6208 | // - We can combine the transition with its single use | |||
6209 | // => we got rid of the transition. | |||
6210 | // - We escape the current basic block | |||
6211 | // => we would need to check that we are moving it at a cheaper place and | |||
6212 | // we do not do that for now. | |||
6213 | BasicBlock *Parent = Inst->getParent(); | |||
6214 | LLVM_DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Found an interesting transition: " << *Inst << '\n'; } } while (false); | |||
6215 | VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost); | |||
6216 | // If the transition has more than one use, assume this is not going to be | |||
6217 | // beneficial. | |||
6218 | while (Inst->hasOneUse()) { | |||
6219 | Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin()); | |||
6220 | LLVM_DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Use: " << *ToBePromoted << '\n'; } } while (false); | |||
6221 | ||||
6222 | if (ToBePromoted->getParent() != Parent) { | |||
6223 | LLVM_DEBUG(dbgs() << "Instruction to promote is in a different block ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Instruction to promote is in a different block (" << ToBePromoted->getParent()->getName() << ") than the transition (" << Parent->getName() << ").\n"; } } while (false) | |||
6224 | << ToBePromoted->getParent()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Instruction to promote is in a different block (" << ToBePromoted->getParent()->getName() << ") than the transition (" << Parent->getName() << ").\n"; } } while (false) | |||
6225 | << ") than the transition (" << Parent->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Instruction to promote is in a different block (" << ToBePromoted->getParent()->getName() << ") than the transition (" << Parent->getName() << ").\n"; } } while (false) | |||
6226 | << ").\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Instruction to promote is in a different block (" << ToBePromoted->getParent()->getName() << ") than the transition (" << Parent->getName() << ").\n"; } } while (false); | |||
6227 | return false; | |||
6228 | } | |||
6229 | ||||
6230 | if (VPH.canCombine(ToBePromoted)) { | |||
6231 | LLVM_DEBUG(dbgs() << "Assume " << *Inst << '\n'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Assume " << *Inst << '\n' << "will be combined with: " << *ToBePromoted << '\n'; } } while (false) | |||
6232 | << "will be combined with: " << *ToBePromoted << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Assume " << *Inst << '\n' << "will be combined with: " << *ToBePromoted << '\n'; } } while (false); | |||
6233 | VPH.recordCombineInstruction(ToBePromoted); | |||
6234 | bool Changed = VPH.promote(); | |||
6235 | NumStoreExtractExposed += Changed; | |||
6236 | return Changed; | |||
6237 | } | |||
6238 | ||||
6239 | LLVM_DEBUG(dbgs() << "Try promoting.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Try promoting.\n"; } } while (false); | |||
6240 | if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted)) | |||
6241 | return false; | |||
6242 | ||||
6243 | LLVM_DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Promoting is possible... Enqueue for promotion!\n" ; } } while (false); | |||
6244 | ||||
6245 | VPH.enqueueForPromotion(ToBePromoted); | |||
6246 | Inst = ToBePromoted; | |||
6247 | } | |||
6248 | return false; | |||
6249 | } | |||
6250 | ||||
6251 | /// For the instruction sequence of store below, F and I values | |||
6252 | /// are bundled together as an i64 value before being stored into memory. | |||
6253 | /// Sometimes it is more efficient to generate separate stores for F and I, | |||
6254 | /// which can remove the bitwise instructions or sink them to colder places. | |||
6255 | /// | |||
6256 | /// (store (or (zext (bitcast F to i32) to i64), | |||
6257 | /// (shl (zext I to i64), 32)), addr) --> | |||
6258 | /// (store F, addr) and (store I, addr+4) | |||
6259 | /// | |||
6260 | /// Similarly, splitting for other merged store can also be beneficial, like: | |||
6261 | /// For pair of {i32, i32}, i64 store --> two i32 stores. | |||
6262 | /// For pair of {i32, i16}, i64 store --> two i32 stores. | |||
6263 | /// For pair of {i16, i16}, i32 store --> two i16 stores. | |||
6264 | /// For pair of {i16, i8}, i32 store --> two i16 stores. | |||
6265 | /// For pair of {i8, i8}, i16 store --> two i8 stores. | |||
6266 | /// | |||
6267 | /// We allow each target to determine specifically which kind of splitting is | |||
6268 | /// supported. | |||
6269 | /// | |||
6270 | /// The store patterns are commonly seen from the simple code snippet below | |||
6271 | /// if only std::make_pair(...) is sroa transformed before inlined into hoo. | |||
6272 | /// void goo(const std::pair<int, float> &); | |||
6273 | /// hoo() { | |||
6274 | /// ... | |||
6275 | /// goo(std::make_pair(tmp, ftmp)); | |||
6276 | /// ... | |||
6277 | /// } | |||
6278 | /// | |||
6279 | /// Although we already have similar splitting in DAG Combine, we duplicate | |||
6280 | /// it in CodeGenPrepare to catch the case in which pattern is across | |||
6281 | /// multiple BBs. The logic in DAG Combine is kept to catch case generated | |||
6282 | /// during code expansion. | |||
6283 | static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL, | |||
6284 | const TargetLowering &TLI) { | |||
6285 | // Handle simple but common cases only. | |||
6286 | Type *StoreType = SI.getValueOperand()->getType(); | |||
6287 | if (DL.getTypeStoreSizeInBits(StoreType) != DL.getTypeSizeInBits(StoreType) || | |||
6288 | DL.getTypeSizeInBits(StoreType) == 0) | |||
6289 | return false; | |||
6290 | ||||
6291 | unsigned HalfValBitSize = DL.getTypeSizeInBits(StoreType) / 2; | |||
6292 | Type *SplitStoreType = Type::getIntNTy(SI.getContext(), HalfValBitSize); | |||
6293 | if (DL.getTypeStoreSizeInBits(SplitStoreType) != | |||
6294 | DL.getTypeSizeInBits(SplitStoreType)) | |||
6295 | return false; | |||
6296 | ||||
6297 | // Match the following patterns: | |||
6298 | // (store (or (zext LValue to i64), | |||
6299 | // (shl (zext HValue to i64), 32)), HalfValBitSize) | |||
6300 | // or | |||
6301 | // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize) | |||
6302 | // (zext LValue to i64), | |||
6303 | // Expect both operands of OR and the first operand of SHL have only | |||
6304 | // one use. | |||
6305 | Value *LValue, *HValue; | |||
6306 | if (!match(SI.getValueOperand(), | |||
6307 | m_c_Or(m_OneUse(m_ZExt(m_Value(LValue))), | |||
6308 | m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue))), | |||
6309 | m_SpecificInt(HalfValBitSize)))))) | |||
6310 | return false; | |||
6311 | ||||
6312 | // Check LValue and HValue are int with size less or equal than 32. | |||
6313 | if (!LValue->getType()->isIntegerTy() || | |||
6314 | DL.getTypeSizeInBits(LValue->getType()) > HalfValBitSize || | |||
6315 | !HValue->getType()->isIntegerTy() || | |||
6316 | DL.getTypeSizeInBits(HValue->getType()) > HalfValBitSize) | |||
6317 | return false; | |||
6318 | ||||
6319 | // If LValue/HValue is a bitcast instruction, use the EVT before bitcast | |||
6320 | // as the input of target query. | |||
6321 | auto *LBC = dyn_cast<BitCastInst>(LValue); | |||
6322 | auto *HBC = dyn_cast<BitCastInst>(HValue); | |||
6323 | EVT LowTy = LBC ? EVT::getEVT(LBC->getOperand(0)->getType()) | |||
6324 | : EVT::getEVT(LValue->getType()); | |||
6325 | EVT HighTy = HBC ? EVT::getEVT(HBC->getOperand(0)->getType()) | |||
6326 | : EVT::getEVT(HValue->getType()); | |||
6327 | if (!ForceSplitStore && !TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy)) | |||
6328 | return false; | |||
6329 | ||||
6330 | // Start to split store. | |||
6331 | IRBuilder<> Builder(SI.getContext()); | |||
6332 | Builder.SetInsertPoint(&SI); | |||
6333 | ||||
6334 | // If LValue/HValue is a bitcast in another BB, create a new one in current | |||
6335 | // BB so it may be merged with the splitted stores by dag combiner. | |||
6336 | if (LBC && LBC->getParent() != SI.getParent()) | |||
6337 | LValue = Builder.CreateBitCast(LBC->getOperand(0), LBC->getType()); | |||
6338 | if (HBC && HBC->getParent() != SI.getParent()) | |||
6339 | HValue = Builder.CreateBitCast(HBC->getOperand(0), HBC->getType()); | |||
6340 | ||||
6341 | bool IsLE = SI.getModule()->getDataLayout().isLittleEndian(); | |||
6342 | auto CreateSplitStore = [&](Value *V, bool Upper) { | |||
6343 | V = Builder.CreateZExtOrBitCast(V, SplitStoreType); | |||
6344 | Value *Addr = Builder.CreateBitCast( | |||
6345 | SI.getOperand(1), | |||
6346 | SplitStoreType->getPointerTo(SI.getPointerAddressSpace())); | |||
6347 | if ((IsLE && Upper) || (!IsLE && !Upper)) | |||
6348 | Addr = Builder.CreateGEP( | |||
6349 | SplitStoreType, Addr, | |||
6350 | ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1)); | |||
6351 | Builder.CreateAlignedStore( | |||
6352 | V, Addr, Upper ? SI.getAlignment() / 2 : SI.getAlignment()); | |||
6353 | }; | |||
6354 | ||||
6355 | CreateSplitStore(LValue, false); | |||
6356 | CreateSplitStore(HValue, true); | |||
6357 | ||||
6358 | // Delete the old store. | |||
6359 | SI.eraseFromParent(); | |||
6360 | return true; | |||
6361 | } | |||
6362 | ||||
6363 | // Return true if the GEP has two operands, the first operand is of a sequential | |||
6364 | // type, and the second operand is a constant. | |||
6365 | static bool GEPSequentialConstIndexed(GetElementPtrInst *GEP) { | |||
6366 | gep_type_iterator I = gep_type_begin(*GEP); | |||
6367 | return GEP->getNumOperands() == 2 && | |||
6368 | I.isSequential() && | |||
6369 | isa<ConstantInt>(GEP->getOperand(1)); | |||
6370 | } | |||
6371 | ||||
6372 | // Try unmerging GEPs to reduce liveness interference (register pressure) across | |||
6373 | // IndirectBr edges. Since IndirectBr edges tend to touch on many blocks, | |||
6374 | // reducing liveness interference across those edges benefits global register | |||
6375 | // allocation. Currently handles only certain cases. | |||
6376 | // | |||
6377 | // For example, unmerge %GEPI and %UGEPI as below. | |||
6378 | // | |||
6379 | // ---------- BEFORE ---------- | |||
6380 | // SrcBlock: | |||
6381 | // ... | |||
6382 | // %GEPIOp = ... | |||
6383 | // ... | |||
6384 | // %GEPI = gep %GEPIOp, Idx | |||
6385 | // ... | |||
6386 | // indirectbr ... [ label %DstB0, label %DstB1, ... label %DstBi ... ] | |||
6387 | // (* %GEPI is alive on the indirectbr edges due to other uses ahead) | |||
6388 | // (* %GEPIOp is alive on the indirectbr edges only because of it's used by | |||
6389 | // %UGEPI) | |||
6390 | // | |||
6391 | // DstB0: ... (there may be a gep similar to %UGEPI to be unmerged) | |||
6392 | // DstB1: ... (there may be a gep similar to %UGEPI to be unmerged) | |||
6393 | // ... | |||
6394 | // | |||
6395 | // DstBi: | |||
6396 | // ... | |||
6397 | // %UGEPI = gep %GEPIOp, UIdx | |||
6398 | // ... | |||
6399 | // --------------------------- | |||
6400 | // | |||
6401 | // ---------- AFTER ---------- | |||
6402 | // SrcBlock: | |||
6403 | // ... (same as above) | |||
6404 | // (* %GEPI is still alive on the indirectbr edges) | |||
6405 | // (* %GEPIOp is no longer alive on the indirectbr edges as a result of the | |||
6406 | // unmerging) | |||
6407 | // ... | |||
6408 | // | |||
6409 | // DstBi: | |||
6410 | // ... | |||
6411 | // %UGEPI = gep %GEPI, (UIdx-Idx) | |||
6412 | // ... | |||
6413 | // --------------------------- | |||
6414 | // | |||
6415 | // The register pressure on the IndirectBr edges is reduced because %GEPIOp is | |||
6416 | // no longer alive on them. | |||
6417 | // | |||
6418 | // We try to unmerge GEPs here in CodGenPrepare, as opposed to limiting merging | |||
6419 | // of GEPs in the first place in InstCombiner::visitGetElementPtrInst() so as | |||
6420 | // not to disable further simplications and optimizations as a result of GEP | |||
6421 | // merging. | |||
6422 | // | |||
6423 | // Note this unmerging may increase the length of the data flow critical path | |||
6424 | // (the path from %GEPIOp to %UGEPI would go through %GEPI), which is a tradeoff | |||
6425 | // between the register pressure and the length of data-flow critical | |||
6426 | // path. Restricting this to the uncommon IndirectBr case would minimize the | |||
6427 | // impact of potentially longer critical path, if any, and the impact on compile | |||
6428 | // time. | |||
6429 | static bool tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst *GEPI, | |||
6430 | const TargetTransformInfo *TTI) { | |||
6431 | BasicBlock *SrcBlock = GEPI->getParent(); | |||
6432 | // Check that SrcBlock ends with an IndirectBr. If not, give up. The common | |||
6433 | // (non-IndirectBr) cases exit early here. | |||
6434 | if (!isa<IndirectBrInst>(SrcBlock->getTerminator())) | |||
6435 | return false; | |||
6436 | // Check that GEPI is a simple gep with a single constant index. | |||
6437 | if (!GEPSequentialConstIndexed(GEPI)) | |||
6438 | return false; | |||
6439 | ConstantInt *GEPIIdx = cast<ConstantInt>(GEPI->getOperand(1)); | |||
6440 | // Check that GEPI is a cheap one. | |||
6441 | if (TTI->getIntImmCost(GEPIIdx->getValue(), GEPIIdx->getType()) | |||
6442 | > TargetTransformInfo::TCC_Basic) | |||
6443 | return false; | |||
6444 | Value *GEPIOp = GEPI->getOperand(0); | |||
6445 | // Check that GEPIOp is an instruction that's also defined in SrcBlock. | |||
6446 | if (!isa<Instruction>(GEPIOp)) | |||
6447 | return false; | |||
6448 | auto *GEPIOpI = cast<Instruction>(GEPIOp); | |||
6449 | if (GEPIOpI->getParent() != SrcBlock) | |||
6450 | return false; | |||
6451 | // Check that GEP is used outside the block, meaning it's alive on the | |||
6452 | // IndirectBr edge(s). | |||
6453 | if (find_if(GEPI->users(), [&](User *Usr) { | |||
6454 | if (auto *I = dyn_cast<Instruction>(Usr)) { | |||
6455 | if (I->getParent() != SrcBlock) { | |||
6456 | return true; | |||
6457 | } | |||
6458 | } | |||
6459 | return false; | |||
6460 | }) == GEPI->users().end()) | |||
6461 | return false; | |||
6462 | // The second elements of the GEP chains to be unmerged. | |||
6463 | std::vector<GetElementPtrInst *> UGEPIs; | |||
6464 | // Check each user of GEPIOp to check if unmerging would make GEPIOp not alive | |||
6465 | // on IndirectBr edges. | |||
6466 | for (User *Usr : GEPIOp->users()) { | |||
6467 | if (Usr == GEPI) continue; | |||
6468 | // Check if Usr is an Instruction. If not, give up. | |||
6469 | if (!isa<Instruction>(Usr)) | |||
6470 | return false; | |||
6471 | auto *UI = cast<Instruction>(Usr); | |||
6472 | // Check if Usr in the same block as GEPIOp, which is fine, skip. | |||
6473 | if (UI->getParent() == SrcBlock) | |||
6474 | continue; | |||
6475 | // Check if Usr is a GEP. If not, give up. | |||
6476 | if (!isa<GetElementPtrInst>(Usr)) | |||
6477 | return false; | |||
6478 | auto *UGEPI = cast<GetElementPtrInst>(Usr); | |||
6479 | // Check if UGEPI is a simple gep with a single constant index and GEPIOp is | |||
6480 | // the pointer operand to it. If so, record it in the vector. If not, give | |||
6481 | // up. | |||
6482 | if (!GEPSequentialConstIndexed(UGEPI)) | |||
6483 | return false; | |||
6484 | if (UGEPI->getOperand(0) != GEPIOp) | |||
6485 | return false; | |||
6486 | if (GEPIIdx->getType() != | |||
6487 | cast<ConstantInt>(UGEPI->getOperand(1))->getType()) | |||
6488 | return false; | |||
6489 | ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); | |||
6490 | if (TTI->getIntImmCost(UGEPIIdx->getValue(), UGEPIIdx->getType()) | |||
6491 | > TargetTransformInfo::TCC_Basic) | |||
6492 | return false; | |||
6493 | UGEPIs.push_back(UGEPI); | |||
6494 | } | |||
6495 | if (UGEPIs.size() == 0) | |||
6496 | return false; | |||
6497 | // Check the materializing cost of (Uidx-Idx). | |||
6498 | for (GetElementPtrInst *UGEPI : UGEPIs) { | |||
6499 | ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); | |||
6500 | APInt NewIdx = UGEPIIdx->getValue() - GEPIIdx->getValue(); | |||
6501 | unsigned ImmCost = TTI->getIntImmCost(NewIdx, GEPIIdx->getType()); | |||
6502 | if (ImmCost > TargetTransformInfo::TCC_Basic) | |||
6503 | return false; | |||
6504 | } | |||
6505 | // Now unmerge between GEPI and UGEPIs. | |||
6506 | for (GetElementPtrInst *UGEPI : UGEPIs) { | |||
6507 | UGEPI->setOperand(0, GEPI); | |||
6508 | ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); | |||
6509 | Constant *NewUGEPIIdx = | |||
6510 | ConstantInt::get(GEPIIdx->getType(), | |||
6511 | UGEPIIdx->getValue() - GEPIIdx->getValue()); | |||
6512 | UGEPI->setOperand(1, NewUGEPIIdx); | |||
6513 | // If GEPI is not inbounds but UGEPI is inbounds, change UGEPI to not | |||
6514 | // inbounds to avoid UB. | |||
6515 | if (!GEPI->isInBounds()) { | |||
6516 | UGEPI->setIsInBounds(false); | |||
6517 | } | |||
6518 | } | |||
6519 | // After unmerging, verify that GEPIOp is actually only used in SrcBlock (not | |||
6520 | // alive on IndirectBr edges). | |||
6521 | assert(find_if(GEPIOp->users(), [&](User *Usr) {((find_if(GEPIOp->users(), [&](User *Usr) { return cast <Instruction>(Usr)->getParent() != SrcBlock; }) == GEPIOp ->users().end() && "GEPIOp is used outside SrcBlock" ) ? static_cast<void> (0) : __assert_fail ("find_if(GEPIOp->users(), [&](User *Usr) { return cast<Instruction>(Usr)->getParent() != SrcBlock; }) == GEPIOp->users().end() && \"GEPIOp is used outside SrcBlock\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 6523, __PRETTY_FUNCTION__)) | |||
6522 | return cast<Instruction>(Usr)->getParent() != SrcBlock;((find_if(GEPIOp->users(), [&](User *Usr) { return cast <Instruction>(Usr)->getParent() != SrcBlock; }) == GEPIOp ->users().end() && "GEPIOp is used outside SrcBlock" ) ? static_cast<void> (0) : __assert_fail ("find_if(GEPIOp->users(), [&](User *Usr) { return cast<Instruction>(Usr)->getParent() != SrcBlock; }) == GEPIOp->users().end() && \"GEPIOp is used outside SrcBlock\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 6523, __PRETTY_FUNCTION__)) | |||
6523 | }) == GEPIOp->users().end() && "GEPIOp is used outside SrcBlock")((find_if(GEPIOp->users(), [&](User *Usr) { return cast <Instruction>(Usr)->getParent() != SrcBlock; }) == GEPIOp ->users().end() && "GEPIOp is used outside SrcBlock" ) ? static_cast<void> (0) : __assert_fail ("find_if(GEPIOp->users(), [&](User *Usr) { return cast<Instruction>(Usr)->getParent() != SrcBlock; }) == GEPIOp->users().end() && \"GEPIOp is used outside SrcBlock\"" , "/build/llvm-toolchain-snapshot-8~svn345461/lib/CodeGen/CodeGenPrepare.cpp" , 6523, __PRETTY_FUNCTION__)); | |||
6524 | return true; | |||
6525 | } | |||
6526 | ||||
6527 | bool CodeGenPrepare::optimizeInst(Instruction *I, bool &ModifiedDT) { | |||
6528 | // Bail out if we inserted the instruction to prevent optimizations from | |||
6529 | // stepping on each other's toes. | |||
6530 | if (InsertedInsts.count(I)) | |||
6531 | return false; | |||
6532 | ||||
6533 | if (PHINode *P = dyn_cast<PHINode>(I)) { | |||
6534 | // It is possible for very late stage optimizations (such as SimplifyCFG) | |||
6535 | // to introduce PHI nodes too late to be cleaned up. If we detect such a | |||
6536 | // trivial PHI, go ahead and zap it here. | |||
6537 | if (Value *V = SimplifyInstruction(P, {*DL, TLInfo})) { | |||
6538 | P->replaceAllUsesWith(V); | |||
6539 | P->eraseFromParent(); | |||
6540 | ++NumPHIsElim; | |||
6541 | return true; | |||
6542 | } | |||
6543 | return false; | |||
6544 | } | |||
6545 | ||||
6546 | if (CastInst *CI = dyn_cast<CastInst>(I)) { | |||
6547 | // If the source of the cast is a constant, then this should have | |||
6548 | // already been constant folded. The only reason NOT to constant fold | |||
6549 | // it is if something (e.g. LSR) was careful to place the constant | |||
6550 | // evaluation in a block other than then one that uses it (e.g. to hoist | |||
6551 | // the address of globals out of a loop). If this is the case, we don't | |||
6552 | // want to forward-subst the cast. | |||
6553 | if (isa<Constant>(CI->getOperand(0))) | |||
6554 | return false; | |||
6555 | ||||
6556 | if (TLI && OptimizeNoopCopyExpression(CI, *TLI, *DL)) | |||
6557 | return true; | |||
6558 | ||||
6559 | if (isa<ZExtInst>(I) || isa<SExtInst>(I)) { | |||
6560 | /// Sink a zext or sext into its user blocks if the target type doesn't | |||
6561 | /// fit in one register | |||
6562 | if (TLI && | |||
6563 | TLI->getTypeAction(CI->getContext(), | |||
6564 | TLI->getValueType(*DL, CI->getType())) == | |||
6565 | TargetLowering::TypeExpandInteger) { | |||
6566 | return SinkCast(CI); | |||
6567 | } else { | |||
6568 | bool MadeChange = optimizeExt(I); | |||
6569 | return MadeChange | optimizeExtUses(I); | |||
6570 | } | |||
6571 | } | |||
6572 | return false; | |||
6573 | } | |||
6574 | ||||
6575 | if (CmpInst *CI = dyn_cast<CmpInst>(I)) | |||
6576 | if (!TLI || !TLI->hasMultipleConditionRegisters()) | |||
6577 | return OptimizeCmpExpression(CI, TLI); | |||
6578 | ||||
6579 | if (LoadInst *LI = dyn_cast<LoadInst>(I)) { | |||
6580 | LI->setMetadata(LLVMContext::MD_invariant_group, nullptr); | |||
6581 | if (TLI) { | |||
6582 | bool Modified = optimizeLoadExt(LI); | |||
6583 | unsigned AS = LI->getPointerAddressSpace(); | |||
6584 | Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS); | |||
6585 | return Modified; | |||
6586 | } | |||
6587 | return false; | |||
6588 | } | |||
6589 | ||||
6590 | if (StoreInst *SI = dyn_cast<StoreInst>(I)) { | |||
6591 | if (TLI && splitMergedValStore(*SI, *DL, *TLI)) | |||
6592 | return true; | |||
6593 | SI->setMetadata(LLVMContext::MD_invariant_group, nullptr); | |||
6594 | if (TLI) { | |||
6595 | unsigned AS = SI->getPointerAddressSpace(); | |||
6596 | return optimizeMemoryInst(I, SI->getOperand(1), | |||
6597 | SI->getOperand(0)->getType(), AS); | |||
6598 | } | |||
6599 | return false; | |||
6600 | } | |||
6601 | ||||
6602 | if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { | |||
6603 | unsigned AS = RMW->getPointerAddressSpace(); | |||
6604 | return optimizeMemoryInst(I, RMW->getPointerOperand(), | |||
6605 | RMW->getType(), AS); | |||
6606 | } | |||
6607 | ||||
6608 | if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(I)) { | |||
6609 | unsigned AS = CmpX->getPointerAddressSpace(); | |||
6610 | return optimizeMemoryInst(I, CmpX->getPointerOperand(), | |||
6611 | CmpX->getCompareOperand()->getType(), AS); | |||
6612 | } | |||
6613 | ||||
6614 | BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I); | |||
6615 | ||||
6616 | if (BinOp && (BinOp->getOpcode() == Instruction::And) && | |||
6617 | EnableAndCmpSinking && TLI) | |||
6618 | return sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts); | |||
6619 | ||||
6620 | if (BinOp && (BinOp->getOpcode() == Instruction::AShr || | |||
6621 | BinOp->getOpcode() == Instruction::LShr)) { | |||
6622 | ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1)); | |||
6623 | if (TLI && CI && TLI->hasExtractBitsInsn()) | |||
6624 | return OptimizeExtractBits(BinOp, CI, *TLI, *DL); | |||
6625 | ||||
6626 | return false; | |||
6627 | } | |||
6628 | ||||
6629 | if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { | |||
6630 | if (GEPI->hasAllZeroIndices()) { | |||
6631 | /// The GEP operand must be a pointer, so must its result -> BitCast | |||
6632 | Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), | |||
6633 | GEPI->getName(), GEPI); | |||
6634 | NC->setDebugLoc(GEPI->getDebugLoc()); | |||
6635 | GEPI->replaceAllUsesWith(NC); | |||
6636 | GEPI->eraseFromParent(); | |||
6637 | ++NumGEPsElim; | |||
6638 | optimizeInst(NC, ModifiedDT); | |||
6639 | return true; | |||
6640 | } | |||
6641 | if (tryUnmergingGEPsAcrossIndirectBr(GEPI, TTI)) { | |||
6642 | return true; | |||
6643 | } | |||
6644 | return false; | |||
6645 | } | |||
6646 | ||||
6647 | if (CallInst *CI = dyn_cast<CallInst>(I)) | |||
6648 | return optimizeCallInst(CI, ModifiedDT); | |||
6649 | ||||
6650 | if (SelectInst *SI = dyn_cast<SelectInst>(I)) | |||
6651 | return optimizeSelectInst(SI); | |||
6652 | ||||
6653 | if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I)) | |||
6654 | return optimizeShuffleVectorInst(SVI); | |||
6655 | ||||
6656 | if (auto *Switch = dyn_cast<SwitchInst>(I)) | |||
6657 | return optimizeSwitchInst(Switch); | |||
6658 | ||||
6659 | if (isa<ExtractElementInst>(I)) | |||
6660 | return optimizeExtractElementInst(I); | |||
6661 | ||||
6662 | return false; | |||
6663 | } | |||
6664 | ||||
6665 | /// Given an OR instruction, check to see if this is a bitreverse | |||
6666 | /// idiom. If so, insert the new intrinsic and return true. | |||
6667 | static bool makeBitReverse(Instruction &I, const DataLayout &DL, | |||
6668 | const TargetLowering &TLI) { | |||
6669 | if (!I.getType()->isIntegerTy() || | |||
6670 | !TLI.isOperationLegalOrCustom(ISD::BITREVERSE, | |||
6671 | TLI.getValueType(DL, I.getType(), true))) | |||
6672 | return false; | |||
6673 | ||||
6674 | SmallVector<Instruction*, 4> Insts; | |||
6675 | if (!recognizeBSwapOrBitReverseIdiom(&I, false, true, Insts)) | |||
6676 | return false; | |||
6677 | Instruction *LastInst = Insts.back(); | |||
6678 | I.replaceAllUsesWith(LastInst); | |||
6679 | RecursivelyDeleteTriviallyDeadInstructions(&I); | |||
6680 | return true; | |||
6681 | } | |||
6682 | ||||
6683 | // In this pass we look for GEP and cast instructions that are used | |||
6684 | // across basic blocks and rewrite them to improve basic-block-at-a-time | |||
6685 | // selection. | |||
6686 | bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, bool &ModifiedDT) { | |||
6687 | SunkAddrs.clear(); | |||
6688 | bool MadeChange = false; | |||
6689 | ||||
6690 | CurInstIterator = BB.begin(); | |||
6691 | while (CurInstIterator != BB.end()) { | |||
6692 | MadeChange |= optimizeInst(&*CurInstIterator++, ModifiedDT); | |||
6693 | if (ModifiedDT) | |||
6694 | return true; | |||
6695 | } | |||
6696 | ||||
6697 | bool MadeBitReverse = true; | |||
6698 | while (TLI && MadeBitReverse) { | |||
6699 | MadeBitReverse = false; | |||
6700 | for (auto &I : reverse(BB)) { | |||
6701 | if (makeBitReverse(I, *DL, *TLI)) { | |||
6702 | MadeBitReverse = MadeChange = true; | |||
6703 | ModifiedDT = true; | |||
6704 | break; | |||
6705 | } | |||
6706 | } | |||
6707 | } | |||
6708 | MadeChange |= dupRetToEnableTailCallOpts(&BB); | |||
6709 | ||||
6710 | return MadeChange; | |||
6711 | } | |||
6712 | ||||
6713 | // llvm.dbg.value is far away from the value then iSel may not be able | |||
6714 | // handle it properly. iSel will drop llvm.dbg.value if it can not | |||
6715 | // find a node corresponding to the value. | |||
6716 | bool CodeGenPrepare::placeDbgValues(Function &F) { | |||
6717 | bool MadeChange = false; | |||
6718 | for (BasicBlock &BB : F) { | |||
6719 | Instruction *PrevNonDbgInst = nullptr; | |||
6720 | for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { | |||
6721 | Instruction *Insn = &*BI++; | |||
6722 | DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn); | |||
6723 | // Leave dbg.values that refer to an alloca alone. These | |||
6724 | // intrinsics describe the address of a variable (= the alloca) | |||
6725 | // being taken. They should not be moved next to the alloca | |||
6726 | // (and to the beginning of the scope), but rather stay close to | |||
6727 | // where said address is used. | |||
6728 | if (!DVI || (DVI->getValue() && isa<AllocaInst>(DVI->getValue()))) { | |||
6729 | PrevNonDbgInst = Insn; | |||
6730 | continue; | |||
6731 | } | |||
6732 | ||||
6733 | Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue()); | |||
6734 | if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) { | |||
6735 | // If VI is a phi in a block with an EHPad terminator, we can't insert | |||
6736 | // after it. | |||
6737 | if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad()) | |||
6738 | continue; | |||
6739 | LLVM_DEBUG(dbgs() << "Moving Debug Value before :\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI; } } while (false) | |||
6740 | << *DVI << ' ' << *VI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI; } } while (false); | |||
6741 | DVI->removeFromParent(); | |||
6742 | if (isa<PHINode>(VI)) | |||
6743 | DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt()); | |||
6744 | else | |||
6745 | DVI->insertAfter(VI); | |||
6746 | MadeChange = true; | |||
6747 | ++NumDbgValueMoved; | |||
6748 | } | |||
6749 | } | |||
6750 | } | |||
6751 | return MadeChange; | |||
6752 | } | |||
6753 | ||||
6754 | /// Scale down both weights to fit into uint32_t. | |||
6755 | static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) { | |||
6756 | uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse; | |||
6757 | uint32_t Scale = (NewMax / std::numeric_limits<uint32_t>::max()) + 1; | |||
6758 | NewTrue = NewTrue / Scale; | |||
6759 | NewFalse = NewFalse / Scale; | |||
6760 | } | |||
6761 | ||||
6762 | /// Some targets prefer to split a conditional branch like: | |||
6763 | /// \code | |||
6764 | /// %0 = icmp ne i32 %a, 0 | |||
6765 | /// %1 = icmp ne i32 %b, 0 | |||
6766 | /// %or.cond = or i1 %0, %1 | |||
6767 | /// br i1 %or.cond, label %TrueBB, label %FalseBB | |||
6768 | /// \endcode | |||
6769 | /// into multiple branch instructions like: | |||
6770 | /// \code | |||
6771 | /// bb1: | |||
6772 | /// %0 = icmp ne i32 %a, 0 | |||
6773 | /// br i1 %0, label %TrueBB, label %bb2 | |||
6774 | /// bb2: | |||
6775 | /// %1 = icmp ne i32 %b, 0 | |||
6776 | /// br i1 %1, label %TrueBB, label %FalseBB | |||
6777 | /// \endcode | |||
6778 | /// This usually allows instruction selection to do even further optimizations | |||
6779 | /// and combine the compare with the branch instruction. Currently this is | |||
6780 | /// applied for targets which have "cheap" jump instructions. | |||
6781 | /// | |||
6782 | /// FIXME: Remove the (equivalent?) implementation in SelectionDAG. | |||
6783 | /// | |||
6784 | bool CodeGenPrepare::splitBranchCondition(Function &F) { | |||
6785 | if (!TM || !TM->Options.EnableFastISel || !TLI || TLI->isJumpExpensive()) | |||
6786 | return false; | |||
6787 | ||||
6788 | bool MadeChange = false; | |||
6789 | for (auto &BB : F) { | |||
6790 | // Does this BB end with the following? | |||
6791 | // %cond1 = icmp|fcmp|binary instruction ... | |||
6792 | // %cond2 = icmp|fcmp|binary instruction ... | |||
6793 | // %cond.or = or|and i1 %cond1, cond2 | |||
6794 | // br i1 %cond.or label %dest1, label %dest2" | |||
6795 | BinaryOperator *LogicOp; | |||
6796 | BasicBlock *TBB, *FBB; | |||
6797 | if (!match(BB.getTerminator(), m_Br(m_OneUse(m_BinOp(LogicOp)), TBB, FBB))) | |||
6798 | continue; | |||
6799 | ||||
6800 | auto *Br1 = cast<BranchInst>(BB.getTerminator()); | |||
6801 | if (Br1->getMetadata(LLVMContext::MD_unpredictable)) | |||
6802 | continue; | |||
6803 | ||||
6804 | unsigned Opc; | |||
6805 | Value *Cond1, *Cond2; | |||
6806 | if (match(LogicOp, m_And(m_OneUse(m_Value(Cond1)), | |||
6807 | m_OneUse(m_Value(Cond2))))) | |||
6808 | Opc = Instruction::And; | |||
6809 | else if (match(LogicOp, m_Or(m_OneUse(m_Value(Cond1)), | |||
6810 | m_OneUse(m_Value(Cond2))))) | |||
6811 | Opc = Instruction::Or; | |||
6812 | else | |||
6813 | continue; | |||
6814 | ||||
6815 | if (!match(Cond1, m_CombineOr(m_Cmp(), m_BinOp())) || | |||
6816 | !match(Cond2, m_CombineOr(m_Cmp(), m_BinOp())) ) | |||
6817 | continue; | |||
6818 | ||||
6819 | LLVM_DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Before branch condition splitting\n" ; BB.dump(); } } while (false); | |||
6820 | ||||
6821 | // Create a new BB. | |||
6822 | auto TmpBB = | |||
6823 | BasicBlock::Create(BB.getContext(), BB.getName() + ".cond.split", | |||
6824 | BB.getParent(), BB.getNextNode()); | |||
6825 | ||||
6826 | // Update original basic block by using the first condition directly by the | |||
6827 | // branch instruction and removing the no longer needed and/or instruction. | |||
6828 | Br1->setCondition(Cond1); | |||
6829 | LogicOp->eraseFromParent(); | |||
6830 | ||||
6831 | // Depending on the condition we have to either replace the true or the | |||
6832 | // false successor of the original branch instruction. | |||
6833 | if (Opc == Instruction::And) | |||
6834 | Br1->setSuccessor(0, TmpBB); | |||
6835 | else | |||
6836 | Br1->setSuccessor(1, TmpBB); | |||
6837 | ||||
6838 | // Fill in the new basic block. | |||
6839 | auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB); | |||
6840 | if (auto *I = dyn_cast<Instruction>(Cond2)) { | |||
6841 | I->removeFromParent(); | |||
6842 | I->insertBefore(Br2); | |||
6843 | } | |||
6844 | ||||
6845 | // Update PHI nodes in both successors. The original BB needs to be | |||
6846 | // replaced in one successor's PHI nodes, because the branch comes now from | |||
6847 | // the newly generated BB (NewBB). In the other successor we need to add one | |||
6848 | // incoming edge to the PHI nodes, because both branch instructions target | |||
6849 | // now the same successor. Depending on the original branch condition | |||
6850 | // (and/or) we have to swap the successors (TrueDest, FalseDest), so that | |||
6851 | // we perform the correct update for the PHI nodes. | |||
6852 | // This doesn't change the successor order of the just created branch | |||
6853 | // instruction (or any other instruction). | |||
6854 | if (Opc == Instruction::Or) | |||
6855 | std::swap(TBB, FBB); | |||
6856 | ||||
6857 | // Replace the old BB with the new BB. | |||
6858 | for (PHINode &PN : TBB->phis()) { | |||
6859 | int i; | |||
6860 | while ((i = PN.getBasicBlockIndex(&BB)) >= 0) | |||
6861 | PN.setIncomingBlock(i, TmpBB); | |||
6862 | } | |||
6863 | ||||
6864 | // Add another incoming edge form the new BB. | |||
6865 | for (PHINode &PN : FBB->phis()) { | |||
6866 | auto *Val = PN.getIncomingValueForBlock(&BB); | |||
6867 | PN.addIncoming(Val, TmpBB); | |||
6868 | } | |||
6869 | ||||
6870 | // Update the branch weights (from SelectionDAGBuilder:: | |||
6871 | // FindMergedConditions). | |||
6872 | if (Opc == Instruction::Or) { | |||
6873 | // Codegen X | Y as: | |||
6874 | // BB1: | |||
6875 | // jmp_if_X TBB | |||
6876 | // jmp TmpBB | |||
6877 | // TmpBB: | |||
6878 | // jmp_if_Y TBB | |||
6879 | // jmp FBB | |||
6880 | // | |||
6881 | ||||
6882 | // We have flexibility in setting Prob for BB1 and Prob for NewBB. | |||
6883 | // The requirement is that | |||
6884 | // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) | |||
6885 | // = TrueProb for original BB. | |||
6886 | // Assuming the original weights are A and B, one choice is to set BB1's | |||
6887 | // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice | |||
6888 | // assumes that | |||
6889 | // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. | |||
6890 | // Another choice is to assume TrueProb for BB1 equals to TrueProb for | |||
6891 | // TmpBB, but the math is more complicated. | |||
6892 | uint64_t TrueWeight, FalseWeight; | |||
6893 | if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) { | |||
6894 | uint64_t NewTrueWeight = TrueWeight; | |||
6895 | uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight; | |||
6896 | scaleWeights(NewTrueWeight, NewFalseWeight); | |||
6897 | Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) | |||
6898 | .createBranchWeights(TrueWeight, FalseWeight)); | |||
6899 | ||||
6900 | NewTrueWeight = TrueWeight; | |||
6901 | NewFalseWeight = 2 * FalseWeight; | |||
6902 | scaleWeights(NewTrueWeight, NewFalseWeight); | |||
6903 | Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) | |||
6904 | .createBranchWeights(TrueWeight, FalseWeight)); | |||
6905 | } | |||
6906 | } else { | |||
6907 | // Codegen X & Y as: | |||
6908 | // BB1: | |||
6909 | // jmp_if_X TmpBB | |||
6910 | // jmp FBB | |||
6911 | // TmpBB: | |||
6912 | // jmp_if_Y TBB | |||
6913 | // jmp FBB | |||
6914 | // | |||
6915 | // This requires creation of TmpBB after CurBB. | |||
6916 | ||||
6917 | // We have flexibility in setting Prob for BB1 and Prob for TmpBB. | |||
6918 | // The requirement is that | |||
6919 | // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) | |||
6920 | // = FalseProb for original BB. | |||
6921 | // Assuming the original weights are A and B, one choice is to set BB1's | |||
6922 | // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice | |||
6923 | // assumes that | |||
6924 | // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB. | |||
6925 | uint64_t TrueWeight, FalseWeight; | |||
6926 | if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) { | |||
6927 | uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight; | |||
6928 | uint64_t NewFalseWeight = FalseWeight; | |||
6929 | scaleWeights(NewTrueWeight, NewFalseWeight); | |||
6930 | Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) | |||
6931 | .createBranchWeights(TrueWeight, FalseWeight)); | |||
6932 | ||||
6933 | NewTrueWeight = 2 * TrueWeight; | |||
6934 | NewFalseWeight = FalseWeight; | |||
6935 | scaleWeights(NewTrueWeight, NewFalseWeight); | |||
6936 | Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) | |||
6937 | .createBranchWeights(TrueWeight, FalseWeight)); | |||
6938 | } | |||
6939 | } | |||
6940 | ||||
6941 | // Note: No point in getting fancy here, since the DT info is never | |||
6942 | // available to CodeGenPrepare. | |||
6943 | ModifiedDT = true; | |||
6944 | ||||
6945 | MadeChange = true; | |||
6946 | ||||
6947 | LLVM_DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "After branch condition splitting\n" ; BB.dump(); TmpBB->dump(); } } while (false) | |||
6948 | TmpBB->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "After branch condition splitting\n" ; BB.dump(); TmpBB->dump(); } } while (false); | |||
6949 | } | |||
6950 | return MadeChange; | |||
6951 | } |