File: | lib/CodeGen/CodeGenPrepare.cpp |
Location: | line 4223, column 27 |
Description: | Called C++ object pointer is null |
1 | //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// | |||
2 | // | |||
3 | // The LLVM Compiler Infrastructure | |||
4 | // | |||
5 | // This file is distributed under the University of Illinois Open Source | |||
6 | // License. See LICENSE.TXT for details. | |||
7 | // | |||
8 | //===----------------------------------------------------------------------===// | |||
9 | // | |||
10 | // This pass munges the code in the input function to better prepare it for | |||
11 | // SelectionDAG-based code generation. This works around limitations in it's | |||
12 | // basic-block-at-a-time approach. It should eventually be removed. | |||
13 | // | |||
14 | //===----------------------------------------------------------------------===// | |||
15 | ||||
16 | #include "llvm/CodeGen/Passes.h" | |||
17 | #include "llvm/ADT/DenseMap.h" | |||
18 | #include "llvm/ADT/SmallSet.h" | |||
19 | #include "llvm/ADT/Statistic.h" | |||
20 | #include "llvm/Analysis/InstructionSimplify.h" | |||
21 | #include "llvm/Analysis/TargetLibraryInfo.h" | |||
22 | #include "llvm/Analysis/TargetTransformInfo.h" | |||
23 | #include "llvm/IR/CallSite.h" | |||
24 | #include "llvm/IR/Constants.h" | |||
25 | #include "llvm/IR/DataLayout.h" | |||
26 | #include "llvm/IR/DerivedTypes.h" | |||
27 | #include "llvm/IR/Dominators.h" | |||
28 | #include "llvm/IR/Function.h" | |||
29 | #include "llvm/IR/GetElementPtrTypeIterator.h" | |||
30 | #include "llvm/IR/IRBuilder.h" | |||
31 | #include "llvm/IR/InlineAsm.h" | |||
32 | #include "llvm/IR/Instructions.h" | |||
33 | #include "llvm/IR/IntrinsicInst.h" | |||
34 | #include "llvm/IR/MDBuilder.h" | |||
35 | #include "llvm/IR/PatternMatch.h" | |||
36 | #include "llvm/IR/Statepoint.h" | |||
37 | #include "llvm/IR/ValueHandle.h" | |||
38 | #include "llvm/IR/ValueMap.h" | |||
39 | #include "llvm/Pass.h" | |||
40 | #include "llvm/Support/CommandLine.h" | |||
41 | #include "llvm/Support/Debug.h" | |||
42 | #include "llvm/Support/raw_ostream.h" | |||
43 | #include "llvm/Target/TargetLowering.h" | |||
44 | #include "llvm/Target/TargetSubtargetInfo.h" | |||
45 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" | |||
46 | #include "llvm/Transforms/Utils/BuildLibCalls.h" | |||
47 | #include "llvm/Transforms/Utils/BypassSlowDivision.h" | |||
48 | #include "llvm/Transforms/Utils/Local.h" | |||
49 | #include "llvm/Transforms/Utils/SimplifyLibCalls.h" | |||
50 | using namespace llvm; | |||
51 | using namespace llvm::PatternMatch; | |||
52 | ||||
53 | #define DEBUG_TYPE"codegenprepare" "codegenprepare" | |||
54 | ||||
55 | STATISTIC(NumBlocksElim, "Number of blocks eliminated")static llvm::Statistic NumBlocksElim = { "codegenprepare", "Number of blocks eliminated" , 0, 0 }; | |||
56 | STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated")static llvm::Statistic NumPHIsElim = { "codegenprepare", "Number of trivial PHIs eliminated" , 0, 0 }; | |||
57 | STATISTIC(NumGEPsElim, "Number of GEPs converted to casts")static llvm::Statistic NumGEPsElim = { "codegenprepare", "Number of GEPs converted to casts" , 0, 0 }; | |||
58 | STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "static llvm::Statistic NumCmpUses = { "codegenprepare", "Number of uses of Cmp expressions replaced with uses of " "sunken Cmps", 0, 0 } | |||
59 | "sunken Cmps")static llvm::Statistic NumCmpUses = { "codegenprepare", "Number of uses of Cmp expressions replaced with uses of " "sunken Cmps", 0, 0 }; | |||
60 | STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "static llvm::Statistic NumCastUses = { "codegenprepare", "Number of uses of Cast expressions replaced with uses " "of sunken Casts", 0, 0 } | |||
61 | "of sunken Casts")static llvm::Statistic NumCastUses = { "codegenprepare", "Number of uses of Cast expressions replaced with uses " "of sunken Casts", 0, 0 }; | |||
62 | STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "static llvm::Statistic NumMemoryInsts = { "codegenprepare", "Number of memory instructions whose address " "computations were sunk", 0, 0 } | |||
63 | "computations were sunk")static llvm::Statistic NumMemoryInsts = { "codegenprepare", "Number of memory instructions whose address " "computations were sunk", 0, 0 }; | |||
64 | STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads")static llvm::Statistic NumExtsMoved = { "codegenprepare", "Number of [s|z]ext instructions combined with loads" , 0, 0 }; | |||
65 | STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized")static llvm::Statistic NumExtUses = { "codegenprepare", "Number of uses of [s|z]ext instructions optimized" , 0, 0 }; | |||
66 | STATISTIC(NumRetsDup, "Number of return instructions duplicated")static llvm::Statistic NumRetsDup = { "codegenprepare", "Number of return instructions duplicated" , 0, 0 }; | |||
67 | STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved")static llvm::Statistic NumDbgValueMoved = { "codegenprepare", "Number of debug value instructions moved", 0, 0 }; | |||
68 | STATISTIC(NumSelectsExpanded, "Number of selects turned into branches")static llvm::Statistic NumSelectsExpanded = { "codegenprepare" , "Number of selects turned into branches", 0, 0 }; | |||
69 | STATISTIC(NumAndCmpsMoved, "Number of and/cmp's pushed into branches")static llvm::Statistic NumAndCmpsMoved = { "codegenprepare", "Number of and/cmp's pushed into branches" , 0, 0 }; | |||
70 | STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed")static llvm::Statistic NumStoreExtractExposed = { "codegenprepare" , "Number of store(extractelement) exposed", 0, 0 }; | |||
71 | ||||
72 | static cl::opt<bool> DisableBranchOpts( | |||
73 | "disable-cgp-branch-opts", cl::Hidden, cl::init(false), | |||
74 | cl::desc("Disable branch optimizations in CodeGenPrepare")); | |||
75 | ||||
76 | static cl::opt<bool> | |||
77 | DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false), | |||
78 | cl::desc("Disable GC optimizations in CodeGenPrepare")); | |||
79 | ||||
80 | static cl::opt<bool> DisableSelectToBranch( | |||
81 | "disable-cgp-select2branch", cl::Hidden, cl::init(false), | |||
82 | cl::desc("Disable select to branch conversion.")); | |||
83 | ||||
84 | static cl::opt<bool> AddrSinkUsingGEPs( | |||
85 | "addr-sink-using-gep", cl::Hidden, cl::init(false), | |||
86 | cl::desc("Address sinking in CGP using GEPs.")); | |||
87 | ||||
88 | static cl::opt<bool> EnableAndCmpSinking( | |||
89 | "enable-andcmp-sinking", cl::Hidden, cl::init(true), | |||
90 | cl::desc("Enable sinkinig and/cmp into branches.")); | |||
91 | ||||
92 | static cl::opt<bool> DisableStoreExtract( | |||
93 | "disable-cgp-store-extract", cl::Hidden, cl::init(false), | |||
94 | cl::desc("Disable store(extract) optimizations in CodeGenPrepare")); | |||
95 | ||||
96 | static cl::opt<bool> StressStoreExtract( | |||
97 | "stress-cgp-store-extract", cl::Hidden, cl::init(false), | |||
98 | cl::desc("Stress test store(extract) optimizations in CodeGenPrepare")); | |||
99 | ||||
100 | static cl::opt<bool> DisableExtLdPromotion( | |||
101 | "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), | |||
102 | cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " | |||
103 | "CodeGenPrepare")); | |||
104 | ||||
105 | static cl::opt<bool> StressExtLdPromotion( | |||
106 | "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), | |||
107 | cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " | |||
108 | "optimization in CodeGenPrepare")); | |||
109 | ||||
110 | namespace { | |||
111 | typedef SmallPtrSet<Instruction *, 16> SetOfInstrs; | |||
112 | struct TypeIsSExt { | |||
113 | Type *Ty; | |||
114 | bool IsSExt; | |||
115 | TypeIsSExt(Type *Ty, bool IsSExt) : Ty(Ty), IsSExt(IsSExt) {} | |||
116 | }; | |||
117 | typedef DenseMap<Instruction *, TypeIsSExt> InstrToOrigTy; | |||
118 | class TypePromotionTransaction; | |||
119 | ||||
120 | class CodeGenPrepare : public FunctionPass { | |||
121 | /// TLI - Keep a pointer of a TargetLowering to consult for determining | |||
122 | /// transformation profitability. | |||
123 | const TargetMachine *TM; | |||
124 | const TargetLowering *TLI; | |||
125 | const TargetTransformInfo *TTI; | |||
126 | const TargetLibraryInfo *TLInfo; | |||
127 | DominatorTree *DT; | |||
128 | ||||
129 | /// CurInstIterator - As we scan instructions optimizing them, this is the | |||
130 | /// next instruction to optimize. Xforms that can invalidate this should | |||
131 | /// update it. | |||
132 | BasicBlock::iterator CurInstIterator; | |||
133 | ||||
134 | /// Keeps track of non-local addresses that have been sunk into a block. | |||
135 | /// This allows us to avoid inserting duplicate code for blocks with | |||
136 | /// multiple load/stores of the same address. | |||
137 | ValueMap<Value*, Value*> SunkAddrs; | |||
138 | ||||
139 | /// Keeps track of all truncates inserted for the current function. | |||
140 | SetOfInstrs InsertedTruncsSet; | |||
141 | /// Keeps track of the type of the related instruction before their | |||
142 | /// promotion for the current function. | |||
143 | InstrToOrigTy PromotedInsts; | |||
144 | ||||
145 | /// ModifiedDT - If CFG is modified in anyway, dominator tree may need to | |||
146 | /// be updated. | |||
147 | bool ModifiedDT; | |||
148 | ||||
149 | /// OptSize - True if optimizing for size. | |||
150 | bool OptSize; | |||
151 | ||||
152 | public: | |||
153 | static char ID; // Pass identification, replacement for typeid | |||
154 | explicit CodeGenPrepare(const TargetMachine *TM = nullptr) | |||
155 | : FunctionPass(ID), TM(TM), TLI(nullptr), TTI(nullptr) { | |||
156 | initializeCodeGenPreparePass(*PassRegistry::getPassRegistry()); | |||
157 | } | |||
158 | bool runOnFunction(Function &F) override; | |||
159 | ||||
160 | const char *getPassName() const override { return "CodeGen Prepare"; } | |||
161 | ||||
162 | void getAnalysisUsage(AnalysisUsage &AU) const override { | |||
163 | AU.addPreserved<DominatorTreeWrapperPass>(); | |||
164 | AU.addRequired<TargetLibraryInfoWrapperPass>(); | |||
165 | AU.addRequired<TargetTransformInfoWrapperPass>(); | |||
166 | } | |||
167 | ||||
168 | private: | |||
169 | bool EliminateFallThrough(Function &F); | |||
170 | bool EliminateMostlyEmptyBlocks(Function &F); | |||
171 | bool CanMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; | |||
172 | void EliminateMostlyEmptyBlock(BasicBlock *BB); | |||
173 | bool OptimizeBlock(BasicBlock &BB, bool& ModifiedDT); | |||
174 | bool OptimizeInst(Instruction *I, bool& ModifiedDT); | |||
175 | bool OptimizeMemoryInst(Instruction *I, Value *Addr, Type *AccessTy); | |||
176 | bool OptimizeInlineAsmInst(CallInst *CS); | |||
177 | bool OptimizeCallInst(CallInst *CI, bool& ModifiedDT); | |||
178 | bool MoveExtToFormExtLoad(Instruction *&I); | |||
179 | bool OptimizeExtUses(Instruction *I); | |||
180 | bool OptimizeSelectInst(SelectInst *SI); | |||
181 | bool OptimizeShuffleVectorInst(ShuffleVectorInst *SI); | |||
182 | bool OptimizeExtractElementInst(Instruction *Inst); | |||
183 | bool DupRetToEnableTailCallOpts(BasicBlock *BB); | |||
184 | bool PlaceDbgValues(Function &F); | |||
185 | bool sinkAndCmp(Function &F); | |||
186 | bool ExtLdPromotion(TypePromotionTransaction &TPT, LoadInst *&LI, | |||
187 | Instruction *&Inst, | |||
188 | const SmallVectorImpl<Instruction *> &Exts, | |||
189 | unsigned CreatedInst); | |||
190 | bool splitBranchCondition(Function &F); | |||
191 | bool simplifyOffsetableRelocate(Instruction &I); | |||
192 | }; | |||
193 | } | |||
194 | ||||
195 | char CodeGenPrepare::ID = 0; | |||
196 | INITIALIZE_TM_PASS(CodeGenPrepare, "codegenprepare",static void* initializeCodeGenPreparePassOnce(PassRegistry & Registry) { PassInfo *PI = new PassInfo("Optimize for code generation" , "codegenprepare", & CodeGenPrepare ::ID, PassInfo::NormalCtor_t (callDefaultCtor< CodeGenPrepare >), false, false, PassInfo ::TargetMachineCtor_t(callTargetMachineCtor< CodeGenPrepare >)); Registry.registerPass(*PI, true); return PI; } void llvm ::initializeCodeGenPreparePass(PassRegistry &Registry) { static volatile sys::cas_flag initialized = 0; sys::cas_flag old_val = sys::CompareAndSwap(&initialized, 1, 0); if (old_val == 0) { initializeCodeGenPreparePassOnce(Registry); sys::MemoryFence (); AnnotateIgnoreWritesBegin("/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 197); AnnotateHappensBefore("/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 197, &initialized); initialized = 2; AnnotateIgnoreWritesEnd ("/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 197); } else { sys::cas_flag tmp = initialized; sys::MemoryFence (); while (tmp != 2) { tmp = initialized; sys::MemoryFence(); } } AnnotateHappensAfter("/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 197, &initialized); } | |||
197 | "Optimize for code generation", false, false)static void* initializeCodeGenPreparePassOnce(PassRegistry & Registry) { PassInfo *PI = new PassInfo("Optimize for code generation" , "codegenprepare", & CodeGenPrepare ::ID, PassInfo::NormalCtor_t (callDefaultCtor< CodeGenPrepare >), false, false, PassInfo ::TargetMachineCtor_t(callTargetMachineCtor< CodeGenPrepare >)); Registry.registerPass(*PI, true); return PI; } void llvm ::initializeCodeGenPreparePass(PassRegistry &Registry) { static volatile sys::cas_flag initialized = 0; sys::cas_flag old_val = sys::CompareAndSwap(&initialized, 1, 0); if (old_val == 0) { initializeCodeGenPreparePassOnce(Registry); sys::MemoryFence (); AnnotateIgnoreWritesBegin("/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 197); AnnotateHappensBefore("/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 197, &initialized); initialized = 2; AnnotateIgnoreWritesEnd ("/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 197); } else { sys::cas_flag tmp = initialized; sys::MemoryFence (); while (tmp != 2) { tmp = initialized; sys::MemoryFence(); } } AnnotateHappensAfter("/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 197, &initialized); } | |||
198 | ||||
199 | FunctionPass *llvm::createCodeGenPreparePass(const TargetMachine *TM) { | |||
200 | return new CodeGenPrepare(TM); | |||
201 | } | |||
202 | ||||
203 | bool CodeGenPrepare::runOnFunction(Function &F) { | |||
204 | if (skipOptnoneFunction(F)) | |||
205 | return false; | |||
206 | ||||
207 | bool EverMadeChange = false; | |||
208 | // Clear per function information. | |||
209 | InsertedTruncsSet.clear(); | |||
210 | PromotedInsts.clear(); | |||
211 | ||||
212 | ModifiedDT = false; | |||
213 | if (TM) | |||
214 | TLI = TM->getSubtargetImpl(F)->getTargetLowering(); | |||
215 | TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); | |||
216 | TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); | |||
217 | DominatorTreeWrapperPass *DTWP = | |||
218 | getAnalysisIfAvailable<DominatorTreeWrapperPass>(); | |||
219 | DT = DTWP ? &DTWP->getDomTree() : nullptr; | |||
220 | OptSize = F.getAttributes().hasAttribute(AttributeSet::FunctionIndex, | |||
221 | Attribute::OptimizeForSize); | |||
222 | ||||
223 | /// This optimization identifies DIV instructions that can be | |||
224 | /// profitably bypassed and carried out with a shorter, faster divide. | |||
225 | if (!OptSize && TLI && TLI->isSlowDivBypassed()) { | |||
226 | const DenseMap<unsigned int, unsigned int> &BypassWidths = | |||
227 | TLI->getBypassSlowDivWidths(); | |||
228 | for (Function::iterator I = F.begin(); I != F.end(); I++) | |||
229 | EverMadeChange |= bypassSlowDivision(F, I, BypassWidths); | |||
230 | } | |||
231 | ||||
232 | // Eliminate blocks that contain only PHI nodes and an | |||
233 | // unconditional branch. | |||
234 | EverMadeChange |= EliminateMostlyEmptyBlocks(F); | |||
235 | ||||
236 | // llvm.dbg.value is far away from the value then iSel may not be able | |||
237 | // handle it properly. iSel will drop llvm.dbg.value if it can not | |||
238 | // find a node corresponding to the value. | |||
239 | EverMadeChange |= PlaceDbgValues(F); | |||
240 | ||||
241 | // If there is a mask, compare against zero, and branch that can be combined | |||
242 | // into a single target instruction, push the mask and compare into branch | |||
243 | // users. Do this before OptimizeBlock -> OptimizeInst -> | |||
244 | // OptimizeCmpExpression, which perturbs the pattern being searched for. | |||
245 | if (!DisableBranchOpts) { | |||
246 | EverMadeChange |= sinkAndCmp(F); | |||
247 | EverMadeChange |= splitBranchCondition(F); | |||
248 | } | |||
249 | ||||
250 | bool MadeChange = true; | |||
251 | while (MadeChange) { | |||
252 | MadeChange = false; | |||
253 | for (Function::iterator I = F.begin(); I != F.end(); ) { | |||
254 | BasicBlock *BB = I++; | |||
255 | bool ModifiedDTOnIteration = false; | |||
256 | MadeChange |= OptimizeBlock(*BB, ModifiedDTOnIteration); | |||
257 | ||||
258 | // Restart BB iteration if the dominator tree of the Function was changed | |||
259 | ModifiedDT |= ModifiedDTOnIteration; | |||
260 | if (ModifiedDTOnIteration) | |||
261 | break; | |||
262 | } | |||
263 | EverMadeChange |= MadeChange; | |||
264 | } | |||
265 | ||||
266 | SunkAddrs.clear(); | |||
267 | ||||
268 | if (!DisableBranchOpts) { | |||
269 | MadeChange = false; | |||
270 | SmallPtrSet<BasicBlock*, 8> WorkList; | |||
271 | for (BasicBlock &BB : F) { | |||
272 | SmallVector<BasicBlock *, 2> Successors(succ_begin(&BB), succ_end(&BB)); | |||
273 | MadeChange |= ConstantFoldTerminator(&BB, true); | |||
274 | if (!MadeChange) continue; | |||
275 | ||||
276 | for (SmallVectorImpl<BasicBlock*>::iterator | |||
277 | II = Successors.begin(), IE = Successors.end(); II != IE; ++II) | |||
278 | if (pred_begin(*II) == pred_end(*II)) | |||
279 | WorkList.insert(*II); | |||
280 | } | |||
281 | ||||
282 | // Delete the dead blocks and any of their dead successors. | |||
283 | MadeChange |= !WorkList.empty(); | |||
284 | while (!WorkList.empty()) { | |||
285 | BasicBlock *BB = *WorkList.begin(); | |||
286 | WorkList.erase(BB); | |||
287 | SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB)); | |||
288 | ||||
289 | DeleteDeadBlock(BB); | |||
290 | ||||
291 | for (SmallVectorImpl<BasicBlock*>::iterator | |||
292 | II = Successors.begin(), IE = Successors.end(); II != IE; ++II) | |||
293 | if (pred_begin(*II) == pred_end(*II)) | |||
294 | WorkList.insert(*II); | |||
295 | } | |||
296 | ||||
297 | // Merge pairs of basic blocks with unconditional branches, connected by | |||
298 | // a single edge. | |||
299 | if (EverMadeChange || MadeChange) | |||
300 | MadeChange |= EliminateFallThrough(F); | |||
301 | ||||
302 | if (MadeChange) | |||
303 | ModifiedDT = true; | |||
304 | EverMadeChange |= MadeChange; | |||
305 | } | |||
306 | ||||
307 | if (!DisableGCOpts) { | |||
308 | SmallVector<Instruction *, 2> Statepoints; | |||
309 | for (BasicBlock &BB : F) | |||
310 | for (Instruction &I : BB) | |||
311 | if (isStatepoint(I)) | |||
312 | Statepoints.push_back(&I); | |||
313 | for (auto &I : Statepoints) | |||
314 | EverMadeChange |= simplifyOffsetableRelocate(*I); | |||
315 | } | |||
316 | ||||
317 | if (ModifiedDT && DT) | |||
318 | DT->recalculate(F); | |||
319 | ||||
320 | return EverMadeChange; | |||
321 | } | |||
322 | ||||
323 | /// EliminateFallThrough - Merge basic blocks which are connected | |||
324 | /// by a single edge, where one of the basic blocks has a single successor | |||
325 | /// pointing to the other basic block, which has a single predecessor. | |||
326 | bool CodeGenPrepare::EliminateFallThrough(Function &F) { | |||
327 | bool Changed = false; | |||
328 | // Scan all of the blocks in the function, except for the entry block. | |||
329 | for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { | |||
330 | BasicBlock *BB = I++; | |||
331 | // If the destination block has a single pred, then this is a trivial | |||
332 | // edge, just collapse it. | |||
333 | BasicBlock *SinglePred = BB->getSinglePredecessor(); | |||
334 | ||||
335 | // Don't merge if BB's address is taken. | |||
336 | if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue; | |||
337 | ||||
338 | BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator()); | |||
339 | if (Term && !Term->isConditional()) { | |||
340 | Changed = true; | |||
341 | DEBUG(dbgs() << "To merge:\n"<< *SinglePred << "\n\n\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "To merge:\n"<< * SinglePred << "\n\n\n"; } } while (0); | |||
342 | // Remember if SinglePred was the entry block of the function. | |||
343 | // If so, we will need to move BB back to the entry position. | |||
344 | bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); | |||
345 | MergeBasicBlockIntoOnlyPred(BB, DT); | |||
346 | ||||
347 | if (isEntry && BB != &BB->getParent()->getEntryBlock()) | |||
348 | BB->moveBefore(&BB->getParent()->getEntryBlock()); | |||
349 | ||||
350 | // We have erased a block. Update the iterator. | |||
351 | I = BB; | |||
352 | } | |||
353 | } | |||
354 | return Changed; | |||
355 | } | |||
356 | ||||
357 | /// EliminateMostlyEmptyBlocks - eliminate blocks that contain only PHI nodes, | |||
358 | /// debug info directives, and an unconditional branch. Passes before isel | |||
359 | /// (e.g. LSR/loopsimplify) often split edges in ways that are non-optimal for | |||
360 | /// isel. Start by eliminating these blocks so we can split them the way we | |||
361 | /// want them. | |||
362 | bool CodeGenPrepare::EliminateMostlyEmptyBlocks(Function &F) { | |||
363 | bool MadeChange = false; | |||
364 | // Note that this intentionally skips the entry block. | |||
365 | for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { | |||
366 | BasicBlock *BB = I++; | |||
367 | ||||
368 | // If this block doesn't end with an uncond branch, ignore it. | |||
369 | BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); | |||
370 | if (!BI || !BI->isUnconditional()) | |||
371 | continue; | |||
372 | ||||
373 | // If the instruction before the branch (skipping debug info) isn't a phi | |||
374 | // node, then other stuff is happening here. | |||
375 | BasicBlock::iterator BBI = BI; | |||
376 | if (BBI != BB->begin()) { | |||
377 | --BBI; | |||
378 | while (isa<DbgInfoIntrinsic>(BBI)) { | |||
379 | if (BBI == BB->begin()) | |||
380 | break; | |||
381 | --BBI; | |||
382 | } | |||
383 | if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) | |||
384 | continue; | |||
385 | } | |||
386 | ||||
387 | // Do not break infinite loops. | |||
388 | BasicBlock *DestBB = BI->getSuccessor(0); | |||
389 | if (DestBB == BB) | |||
390 | continue; | |||
391 | ||||
392 | if (!CanMergeBlocks(BB, DestBB)) | |||
393 | continue; | |||
394 | ||||
395 | EliminateMostlyEmptyBlock(BB); | |||
396 | MadeChange = true; | |||
397 | } | |||
398 | return MadeChange; | |||
399 | } | |||
400 | ||||
401 | /// CanMergeBlocks - Return true if we can merge BB into DestBB if there is a | |||
402 | /// single uncond branch between them, and BB contains no other non-phi | |||
403 | /// instructions. | |||
404 | bool CodeGenPrepare::CanMergeBlocks(const BasicBlock *BB, | |||
405 | const BasicBlock *DestBB) const { | |||
406 | // We only want to eliminate blocks whose phi nodes are used by phi nodes in | |||
407 | // the successor. If there are more complex condition (e.g. preheaders), | |||
408 | // don't mess around with them. | |||
409 | BasicBlock::const_iterator BBI = BB->begin(); | |||
410 | while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { | |||
411 | for (const User *U : PN->users()) { | |||
412 | const Instruction *UI = cast<Instruction>(U); | |||
413 | if (UI->getParent() != DestBB || !isa<PHINode>(UI)) | |||
414 | return false; | |||
415 | // If User is inside DestBB block and it is a PHINode then check | |||
416 | // incoming value. If incoming value is not from BB then this is | |||
417 | // a complex condition (e.g. preheaders) we want to avoid here. | |||
418 | if (UI->getParent() == DestBB) { | |||
419 | if (const PHINode *UPN = dyn_cast<PHINode>(UI)) | |||
420 | for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { | |||
421 | Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); | |||
422 | if (Insn && Insn->getParent() == BB && | |||
423 | Insn->getParent() != UPN->getIncomingBlock(I)) | |||
424 | return false; | |||
425 | } | |||
426 | } | |||
427 | } | |||
428 | } | |||
429 | ||||
430 | // If BB and DestBB contain any common predecessors, then the phi nodes in BB | |||
431 | // and DestBB may have conflicting incoming values for the block. If so, we | |||
432 | // can't merge the block. | |||
433 | const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); | |||
434 | if (!DestBBPN) return true; // no conflict. | |||
435 | ||||
436 | // Collect the preds of BB. | |||
437 | SmallPtrSet<const BasicBlock*, 16> BBPreds; | |||
438 | if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { | |||
439 | // It is faster to get preds from a PHI than with pred_iterator. | |||
440 | for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) | |||
441 | BBPreds.insert(BBPN->getIncomingBlock(i)); | |||
442 | } else { | |||
443 | BBPreds.insert(pred_begin(BB), pred_end(BB)); | |||
444 | } | |||
445 | ||||
446 | // Walk the preds of DestBB. | |||
447 | for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { | |||
448 | BasicBlock *Pred = DestBBPN->getIncomingBlock(i); | |||
449 | if (BBPreds.count(Pred)) { // Common predecessor? | |||
450 | BBI = DestBB->begin(); | |||
451 | while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { | |||
452 | const Value *V1 = PN->getIncomingValueForBlock(Pred); | |||
453 | const Value *V2 = PN->getIncomingValueForBlock(BB); | |||
454 | ||||
455 | // If V2 is a phi node in BB, look up what the mapped value will be. | |||
456 | if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) | |||
457 | if (V2PN->getParent() == BB) | |||
458 | V2 = V2PN->getIncomingValueForBlock(Pred); | |||
459 | ||||
460 | // If there is a conflict, bail out. | |||
461 | if (V1 != V2) return false; | |||
462 | } | |||
463 | } | |||
464 | } | |||
465 | ||||
466 | return true; | |||
467 | } | |||
468 | ||||
469 | ||||
470 | /// EliminateMostlyEmptyBlock - Eliminate a basic block that have only phi's and | |||
471 | /// an unconditional branch in it. | |||
472 | void CodeGenPrepare::EliminateMostlyEmptyBlock(BasicBlock *BB) { | |||
473 | BranchInst *BI = cast<BranchInst>(BB->getTerminator()); | |||
474 | BasicBlock *DestBB = BI->getSuccessor(0); | |||
475 | ||||
476 | DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB; } } while (0); | |||
477 | ||||
478 | // If the destination block has a single pred, then this is a trivial edge, | |||
479 | // just collapse it. | |||
480 | if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { | |||
481 | if (SinglePred != DestBB) { | |||
482 | // Remember if SinglePred was the entry block of the function. If so, we | |||
483 | // will need to move BB back to the entry position. | |||
484 | bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); | |||
485 | MergeBasicBlockIntoOnlyPred(DestBB, DT); | |||
486 | ||||
487 | if (isEntry && BB != &BB->getParent()->getEntryBlock()) | |||
488 | BB->moveBefore(&BB->getParent()->getEntryBlock()); | |||
489 | ||||
490 | DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"; } } while (0); | |||
491 | return; | |||
492 | } | |||
493 | } | |||
494 | ||||
495 | // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB | |||
496 | // to handle the new incoming edges it is about to have. | |||
497 | PHINode *PN; | |||
498 | for (BasicBlock::iterator BBI = DestBB->begin(); | |||
499 | (PN = dyn_cast<PHINode>(BBI)); ++BBI) { | |||
500 | // Remove the incoming value for BB, and remember it. | |||
501 | Value *InVal = PN->removeIncomingValue(BB, false); | |||
502 | ||||
503 | // Two options: either the InVal is a phi node defined in BB or it is some | |||
504 | // value that dominates BB. | |||
505 | PHINode *InValPhi = dyn_cast<PHINode>(InVal); | |||
506 | if (InValPhi && InValPhi->getParent() == BB) { | |||
507 | // Add all of the input values of the input PHI as inputs of this phi. | |||
508 | for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) | |||
509 | PN->addIncoming(InValPhi->getIncomingValue(i), | |||
510 | InValPhi->getIncomingBlock(i)); | |||
511 | } else { | |||
512 | // Otherwise, add one instance of the dominating value for each edge that | |||
513 | // we will be adding. | |||
514 | if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { | |||
515 | for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) | |||
516 | PN->addIncoming(InVal, BBPN->getIncomingBlock(i)); | |||
517 | } else { | |||
518 | for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) | |||
519 | PN->addIncoming(InVal, *PI); | |||
520 | } | |||
521 | } | |||
522 | } | |||
523 | ||||
524 | // The PHIs are now updated, change everything that refers to BB to use | |||
525 | // DestBB and remove BB. | |||
526 | BB->replaceAllUsesWith(DestBB); | |||
527 | if (DT && !ModifiedDT) { | |||
528 | BasicBlock *BBIDom = DT->getNode(BB)->getIDom()->getBlock(); | |||
529 | BasicBlock *DestBBIDom = DT->getNode(DestBB)->getIDom()->getBlock(); | |||
530 | BasicBlock *NewIDom = DT->findNearestCommonDominator(BBIDom, DestBBIDom); | |||
531 | DT->changeImmediateDominator(DestBB, NewIDom); | |||
532 | DT->eraseNode(BB); | |||
533 | } | |||
534 | BB->eraseFromParent(); | |||
535 | ++NumBlocksElim; | |||
536 | ||||
537 | DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"; } } while (0); | |||
538 | } | |||
539 | ||||
540 | // Computes a map of base pointer relocation instructions to corresponding | |||
541 | // derived pointer relocation instructions given a vector of all relocate calls | |||
542 | static void computeBaseDerivedRelocateMap( | |||
543 | const SmallVectorImpl<User *> &AllRelocateCalls, | |||
544 | DenseMap<IntrinsicInst *, SmallVector<IntrinsicInst *, 2>> & | |||
545 | RelocateInstMap) { | |||
546 | // Collect information in two maps: one primarily for locating the base object | |||
547 | // while filling the second map; the second map is the final structure holding | |||
548 | // a mapping between Base and corresponding Derived relocate calls | |||
549 | DenseMap<std::pair<unsigned, unsigned>, IntrinsicInst *> RelocateIdxMap; | |||
550 | for (auto &U : AllRelocateCalls) { | |||
551 | GCRelocateOperands ThisRelocate(U); | |||
552 | IntrinsicInst *I = cast<IntrinsicInst>(U); | |||
553 | auto K = std::make_pair(ThisRelocate.basePtrIndex(), | |||
554 | ThisRelocate.derivedPtrIndex()); | |||
555 | RelocateIdxMap.insert(std::make_pair(K, I)); | |||
556 | } | |||
557 | for (auto &Item : RelocateIdxMap) { | |||
558 | std::pair<unsigned, unsigned> Key = Item.first; | |||
559 | if (Key.first == Key.second) | |||
560 | // Base relocation: nothing to insert | |||
561 | continue; | |||
562 | ||||
563 | IntrinsicInst *I = Item.second; | |||
564 | auto BaseKey = std::make_pair(Key.first, Key.first); | |||
565 | IntrinsicInst *Base = RelocateIdxMap[BaseKey]; | |||
566 | if (!Base) | |||
567 | // TODO: We might want to insert a new base object relocate and gep off | |||
568 | // that, if there are enough derived object relocates. | |||
569 | continue; | |||
570 | RelocateInstMap[Base].push_back(I); | |||
571 | } | |||
572 | } | |||
573 | ||||
574 | // Accepts a GEP and extracts the operands into a vector provided they're all | |||
575 | // small integer constants | |||
576 | static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP, | |||
577 | SmallVectorImpl<Value *> &OffsetV) { | |||
578 | for (unsigned i = 1; i < GEP->getNumOperands(); i++) { | |||
579 | // Only accept small constant integer operands | |||
580 | auto Op = dyn_cast<ConstantInt>(GEP->getOperand(i)); | |||
581 | if (!Op || Op->getZExtValue() > 20) | |||
582 | return false; | |||
583 | } | |||
584 | ||||
585 | for (unsigned i = 1; i < GEP->getNumOperands(); i++) | |||
586 | OffsetV.push_back(GEP->getOperand(i)); | |||
587 | return true; | |||
588 | } | |||
589 | ||||
590 | // Takes a RelocatedBase (base pointer relocation instruction) and Targets to | |||
591 | // replace, computes a replacement, and affects it. | |||
592 | static bool | |||
593 | simplifyRelocatesOffABase(IntrinsicInst *RelocatedBase, | |||
594 | const SmallVectorImpl<IntrinsicInst *> &Targets) { | |||
595 | bool MadeChange = false; | |||
596 | for (auto &ToReplace : Targets) { | |||
597 | GCRelocateOperands MasterRelocate(RelocatedBase); | |||
598 | GCRelocateOperands ThisRelocate(ToReplace); | |||
599 | ||||
600 | assert(ThisRelocate.basePtrIndex() == MasterRelocate.basePtrIndex() &&((ThisRelocate.basePtrIndex() == MasterRelocate.basePtrIndex( ) && "Not relocating a derived object of the original base object" ) ? static_cast<void> (0) : __assert_fail ("ThisRelocate.basePtrIndex() == MasterRelocate.basePtrIndex() && \"Not relocating a derived object of the original base object\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 601, __PRETTY_FUNCTION__)) | |||
601 | "Not relocating a derived object of the original base object")((ThisRelocate.basePtrIndex() == MasterRelocate.basePtrIndex( ) && "Not relocating a derived object of the original base object" ) ? static_cast<void> (0) : __assert_fail ("ThisRelocate.basePtrIndex() == MasterRelocate.basePtrIndex() && \"Not relocating a derived object of the original base object\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 601, __PRETTY_FUNCTION__)); | |||
602 | if (ThisRelocate.basePtrIndex() == ThisRelocate.derivedPtrIndex()) { | |||
603 | // A duplicate relocate call. TODO: coalesce duplicates. | |||
604 | continue; | |||
605 | } | |||
606 | ||||
607 | Value *Base = ThisRelocate.basePtr(); | |||
608 | auto Derived = dyn_cast<GetElementPtrInst>(ThisRelocate.derivedPtr()); | |||
609 | if (!Derived || Derived->getPointerOperand() != Base) | |||
610 | continue; | |||
611 | ||||
612 | SmallVector<Value *, 2> OffsetV; | |||
613 | if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV)) | |||
614 | continue; | |||
615 | ||||
616 | // Create a Builder and replace the target callsite with a gep | |||
617 | IRBuilder<> Builder(ToReplace); | |||
618 | Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc()); | |||
619 | Value *Replacement = | |||
620 | Builder.CreateGEP(RelocatedBase, makeArrayRef(OffsetV)); | |||
621 | Instruction *ReplacementInst = cast<Instruction>(Replacement); | |||
622 | ReplacementInst->removeFromParent(); | |||
623 | ReplacementInst->insertAfter(RelocatedBase); | |||
624 | Replacement->takeName(ToReplace); | |||
625 | ToReplace->replaceAllUsesWith(Replacement); | |||
626 | ToReplace->eraseFromParent(); | |||
627 | ||||
628 | MadeChange = true; | |||
629 | } | |||
630 | return MadeChange; | |||
631 | } | |||
632 | ||||
633 | // Turns this: | |||
634 | // | |||
635 | // %base = ... | |||
636 | // %ptr = gep %base + 15 | |||
637 | // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) | |||
638 | // %base' = relocate(%tok, i32 4, i32 4) | |||
639 | // %ptr' = relocate(%tok, i32 4, i32 5) | |||
640 | // %val = load %ptr' | |||
641 | // | |||
642 | // into this: | |||
643 | // | |||
644 | // %base = ... | |||
645 | // %ptr = gep %base + 15 | |||
646 | // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) | |||
647 | // %base' = gc.relocate(%tok, i32 4, i32 4) | |||
648 | // %ptr' = gep %base' + 15 | |||
649 | // %val = load %ptr' | |||
650 | bool CodeGenPrepare::simplifyOffsetableRelocate(Instruction &I) { | |||
651 | bool MadeChange = false; | |||
652 | SmallVector<User *, 2> AllRelocateCalls; | |||
653 | ||||
654 | for (auto *U : I.users()) | |||
655 | if (isGCRelocate(dyn_cast<Instruction>(U))) | |||
656 | // Collect all the relocate calls associated with a statepoint | |||
657 | AllRelocateCalls.push_back(U); | |||
658 | ||||
659 | // We need atleast one base pointer relocation + one derived pointer | |||
660 | // relocation to mangle | |||
661 | if (AllRelocateCalls.size() < 2) | |||
662 | return false; | |||
663 | ||||
664 | // RelocateInstMap is a mapping from the base relocate instruction to the | |||
665 | // corresponding derived relocate instructions | |||
666 | DenseMap<IntrinsicInst *, SmallVector<IntrinsicInst *, 2>> RelocateInstMap; | |||
667 | computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap); | |||
668 | if (RelocateInstMap.empty()) | |||
669 | return false; | |||
670 | ||||
671 | for (auto &Item : RelocateInstMap) | |||
672 | // Item.first is the RelocatedBase to offset against | |||
673 | // Item.second is the vector of Targets to replace | |||
674 | MadeChange = simplifyRelocatesOffABase(Item.first, Item.second); | |||
675 | return MadeChange; | |||
676 | } | |||
677 | ||||
678 | /// SinkCast - Sink the specified cast instruction into its user blocks | |||
679 | static bool SinkCast(CastInst *CI) { | |||
680 | BasicBlock *DefBB = CI->getParent(); | |||
681 | ||||
682 | /// InsertedCasts - Only insert a cast in each block once. | |||
683 | DenseMap<BasicBlock*, CastInst*> InsertedCasts; | |||
684 | ||||
685 | bool MadeChange = false; | |||
686 | for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); | |||
687 | UI != E; ) { | |||
688 | Use &TheUse = UI.getUse(); | |||
689 | Instruction *User = cast<Instruction>(*UI); | |||
690 | ||||
691 | // Figure out which BB this cast is used in. For PHI's this is the | |||
692 | // appropriate predecessor block. | |||
693 | BasicBlock *UserBB = User->getParent(); | |||
694 | if (PHINode *PN = dyn_cast<PHINode>(User)) { | |||
695 | UserBB = PN->getIncomingBlock(TheUse); | |||
696 | } | |||
697 | ||||
698 | // Preincrement use iterator so we don't invalidate it. | |||
699 | ++UI; | |||
700 | ||||
701 | // If this user is in the same block as the cast, don't change the cast. | |||
702 | if (UserBB == DefBB) continue; | |||
703 | ||||
704 | // If we have already inserted a cast into this block, use it. | |||
705 | CastInst *&InsertedCast = InsertedCasts[UserBB]; | |||
706 | ||||
707 | if (!InsertedCast) { | |||
708 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); | |||
709 | InsertedCast = | |||
710 | CastInst::Create(CI->getOpcode(), CI->getOperand(0), CI->getType(), "", | |||
711 | InsertPt); | |||
712 | MadeChange = true; | |||
713 | } | |||
714 | ||||
715 | // Replace a use of the cast with a use of the new cast. | |||
716 | TheUse = InsertedCast; | |||
717 | ++NumCastUses; | |||
718 | } | |||
719 | ||||
720 | // If we removed all uses, nuke the cast. | |||
721 | if (CI->use_empty()) { | |||
722 | CI->eraseFromParent(); | |||
723 | MadeChange = true; | |||
724 | } | |||
725 | ||||
726 | return MadeChange; | |||
727 | } | |||
728 | ||||
729 | /// OptimizeNoopCopyExpression - If the specified cast instruction is a noop | |||
730 | /// copy (e.g. it's casting from one pointer type to another, i32->i8 on PPC), | |||
731 | /// sink it into user blocks to reduce the number of virtual | |||
732 | /// registers that must be created and coalesced. | |||
733 | /// | |||
734 | /// Return true if any changes are made. | |||
735 | /// | |||
736 | static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI){ | |||
737 | // If this is a noop copy, | |||
738 | EVT SrcVT = TLI.getValueType(CI->getOperand(0)->getType()); | |||
739 | EVT DstVT = TLI.getValueType(CI->getType()); | |||
740 | ||||
741 | // This is an fp<->int conversion? | |||
742 | if (SrcVT.isInteger() != DstVT.isInteger()) | |||
743 | return false; | |||
744 | ||||
745 | // If this is an extension, it will be a zero or sign extension, which | |||
746 | // isn't a noop. | |||
747 | if (SrcVT.bitsLT(DstVT)) return false; | |||
748 | ||||
749 | // If these values will be promoted, find out what they will be promoted | |||
750 | // to. This helps us consider truncates on PPC as noop copies when they | |||
751 | // are. | |||
752 | if (TLI.getTypeAction(CI->getContext(), SrcVT) == | |||
753 | TargetLowering::TypePromoteInteger) | |||
754 | SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); | |||
755 | if (TLI.getTypeAction(CI->getContext(), DstVT) == | |||
756 | TargetLowering::TypePromoteInteger) | |||
757 | DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); | |||
758 | ||||
759 | // If, after promotion, these are the same types, this is a noop copy. | |||
760 | if (SrcVT != DstVT) | |||
761 | return false; | |||
762 | ||||
763 | return SinkCast(CI); | |||
764 | } | |||
765 | ||||
766 | /// OptimizeCmpExpression - sink the given CmpInst into user blocks to reduce | |||
767 | /// the number of virtual registers that must be created and coalesced. This is | |||
768 | /// a clear win except on targets with multiple condition code registers | |||
769 | /// (PowerPC), where it might lose; some adjustment may be wanted there. | |||
770 | /// | |||
771 | /// Return true if any changes are made. | |||
772 | static bool OptimizeCmpExpression(CmpInst *CI) { | |||
773 | BasicBlock *DefBB = CI->getParent(); | |||
774 | ||||
775 | /// InsertedCmp - Only insert a cmp in each block once. | |||
776 | DenseMap<BasicBlock*, CmpInst*> InsertedCmps; | |||
777 | ||||
778 | bool MadeChange = false; | |||
779 | for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); | |||
780 | UI != E; ) { | |||
781 | Use &TheUse = UI.getUse(); | |||
782 | Instruction *User = cast<Instruction>(*UI); | |||
783 | ||||
784 | // Preincrement use iterator so we don't invalidate it. | |||
785 | ++UI; | |||
786 | ||||
787 | // Don't bother for PHI nodes. | |||
788 | if (isa<PHINode>(User)) | |||
789 | continue; | |||
790 | ||||
791 | // Figure out which BB this cmp is used in. | |||
792 | BasicBlock *UserBB = User->getParent(); | |||
793 | ||||
794 | // If this user is in the same block as the cmp, don't change the cmp. | |||
795 | if (UserBB == DefBB) continue; | |||
796 | ||||
797 | // If we have already inserted a cmp into this block, use it. | |||
798 | CmpInst *&InsertedCmp = InsertedCmps[UserBB]; | |||
799 | ||||
800 | if (!InsertedCmp) { | |||
801 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); | |||
802 | InsertedCmp = | |||
803 | CmpInst::Create(CI->getOpcode(), | |||
804 | CI->getPredicate(), CI->getOperand(0), | |||
805 | CI->getOperand(1), "", InsertPt); | |||
806 | MadeChange = true; | |||
807 | } | |||
808 | ||||
809 | // Replace a use of the cmp with a use of the new cmp. | |||
810 | TheUse = InsertedCmp; | |||
811 | ++NumCmpUses; | |||
812 | } | |||
813 | ||||
814 | // If we removed all uses, nuke the cmp. | |||
815 | if (CI->use_empty()) | |||
816 | CI->eraseFromParent(); | |||
817 | ||||
818 | return MadeChange; | |||
819 | } | |||
820 | ||||
821 | /// isExtractBitsCandidateUse - Check if the candidates could | |||
822 | /// be combined with shift instruction, which includes: | |||
823 | /// 1. Truncate instruction | |||
824 | /// 2. And instruction and the imm is a mask of the low bits: | |||
825 | /// imm & (imm+1) == 0 | |||
826 | static bool isExtractBitsCandidateUse(Instruction *User) { | |||
827 | if (!isa<TruncInst>(User)) { | |||
828 | if (User->getOpcode() != Instruction::And || | |||
829 | !isa<ConstantInt>(User->getOperand(1))) | |||
830 | return false; | |||
831 | ||||
832 | const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue(); | |||
833 | ||||
834 | if ((Cimm & (Cimm + 1)).getBoolValue()) | |||
835 | return false; | |||
836 | } | |||
837 | return true; | |||
838 | } | |||
839 | ||||
840 | /// SinkShiftAndTruncate - sink both shift and truncate instruction | |||
841 | /// to the use of truncate's BB. | |||
842 | static bool | |||
843 | SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, | |||
844 | DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts, | |||
845 | const TargetLowering &TLI) { | |||
846 | BasicBlock *UserBB = User->getParent(); | |||
847 | DenseMap<BasicBlock *, CastInst *> InsertedTruncs; | |||
848 | TruncInst *TruncI = dyn_cast<TruncInst>(User); | |||
849 | bool MadeChange = false; | |||
850 | ||||
851 | for (Value::user_iterator TruncUI = TruncI->user_begin(), | |||
852 | TruncE = TruncI->user_end(); | |||
853 | TruncUI != TruncE;) { | |||
854 | ||||
855 | Use &TruncTheUse = TruncUI.getUse(); | |||
856 | Instruction *TruncUser = cast<Instruction>(*TruncUI); | |||
857 | // Preincrement use iterator so we don't invalidate it. | |||
858 | ||||
859 | ++TruncUI; | |||
860 | ||||
861 | int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode()); | |||
862 | if (!ISDOpcode) | |||
863 | continue; | |||
864 | ||||
865 | // If the use is actually a legal node, there will not be an | |||
866 | // implicit truncate. | |||
867 | // FIXME: always querying the result type is just an | |||
868 | // approximation; some nodes' legality is determined by the | |||
869 | // operand or other means. There's no good way to find out though. | |||
870 | if (TLI.isOperationLegalOrCustom( | |||
871 | ISDOpcode, TLI.getValueType(TruncUser->getType(), true))) | |||
872 | continue; | |||
873 | ||||
874 | // Don't bother for PHI nodes. | |||
875 | if (isa<PHINode>(TruncUser)) | |||
876 | continue; | |||
877 | ||||
878 | BasicBlock *TruncUserBB = TruncUser->getParent(); | |||
879 | ||||
880 | if (UserBB == TruncUserBB) | |||
881 | continue; | |||
882 | ||||
883 | BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB]; | |||
884 | CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB]; | |||
885 | ||||
886 | if (!InsertedShift && !InsertedTrunc) { | |||
887 | BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt(); | |||
888 | // Sink the shift | |||
889 | if (ShiftI->getOpcode() == Instruction::AShr) | |||
890 | InsertedShift = | |||
891 | BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, "", InsertPt); | |||
892 | else | |||
893 | InsertedShift = | |||
894 | BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, "", InsertPt); | |||
895 | ||||
896 | // Sink the trunc | |||
897 | BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt(); | |||
898 | TruncInsertPt++; | |||
899 | ||||
900 | InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift, | |||
901 | TruncI->getType(), "", TruncInsertPt); | |||
902 | ||||
903 | MadeChange = true; | |||
904 | ||||
905 | TruncTheUse = InsertedTrunc; | |||
906 | } | |||
907 | } | |||
908 | return MadeChange; | |||
909 | } | |||
910 | ||||
911 | /// OptimizeExtractBits - sink the shift *right* instruction into user blocks if | |||
912 | /// the uses could potentially be combined with this shift instruction and | |||
913 | /// generate BitExtract instruction. It will only be applied if the architecture | |||
914 | /// supports BitExtract instruction. Here is an example: | |||
915 | /// BB1: | |||
916 | /// %x.extract.shift = lshr i64 %arg1, 32 | |||
917 | /// BB2: | |||
918 | /// %x.extract.trunc = trunc i64 %x.extract.shift to i16 | |||
919 | /// ==> | |||
920 | /// | |||
921 | /// BB2: | |||
922 | /// %x.extract.shift.1 = lshr i64 %arg1, 32 | |||
923 | /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16 | |||
924 | /// | |||
925 | /// CodeGen will recoginze the pattern in BB2 and generate BitExtract | |||
926 | /// instruction. | |||
927 | /// Return true if any changes are made. | |||
928 | static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, | |||
929 | const TargetLowering &TLI) { | |||
930 | BasicBlock *DefBB = ShiftI->getParent(); | |||
931 | ||||
932 | /// Only insert instructions in each block once. | |||
933 | DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts; | |||
934 | ||||
935 | bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(ShiftI->getType())); | |||
936 | ||||
937 | bool MadeChange = false; | |||
938 | for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end(); | |||
939 | UI != E;) { | |||
940 | Use &TheUse = UI.getUse(); | |||
941 | Instruction *User = cast<Instruction>(*UI); | |||
942 | // Preincrement use iterator so we don't invalidate it. | |||
943 | ++UI; | |||
944 | ||||
945 | // Don't bother for PHI nodes. | |||
946 | if (isa<PHINode>(User)) | |||
947 | continue; | |||
948 | ||||
949 | if (!isExtractBitsCandidateUse(User)) | |||
950 | continue; | |||
951 | ||||
952 | BasicBlock *UserBB = User->getParent(); | |||
953 | ||||
954 | if (UserBB == DefBB) { | |||
955 | // If the shift and truncate instruction are in the same BB. The use of | |||
956 | // the truncate(TruncUse) may still introduce another truncate if not | |||
957 | // legal. In this case, we would like to sink both shift and truncate | |||
958 | // instruction to the BB of TruncUse. | |||
959 | // for example: | |||
960 | // BB1: | |||
961 | // i64 shift.result = lshr i64 opnd, imm | |||
962 | // trunc.result = trunc shift.result to i16 | |||
963 | // | |||
964 | // BB2: | |||
965 | // ----> We will have an implicit truncate here if the architecture does | |||
966 | // not have i16 compare. | |||
967 | // cmp i16 trunc.result, opnd2 | |||
968 | // | |||
969 | if (isa<TruncInst>(User) && shiftIsLegal | |||
970 | // If the type of the truncate is legal, no trucate will be | |||
971 | // introduced in other basic blocks. | |||
972 | && (!TLI.isTypeLegal(TLI.getValueType(User->getType())))) | |||
973 | MadeChange = | |||
974 | SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI); | |||
975 | ||||
976 | continue; | |||
977 | } | |||
978 | // If we have already inserted a shift into this block, use it. | |||
979 | BinaryOperator *&InsertedShift = InsertedShifts[UserBB]; | |||
980 | ||||
981 | if (!InsertedShift) { | |||
982 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); | |||
983 | ||||
984 | if (ShiftI->getOpcode() == Instruction::AShr) | |||
985 | InsertedShift = | |||
986 | BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, "", InsertPt); | |||
987 | else | |||
988 | InsertedShift = | |||
989 | BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, "", InsertPt); | |||
990 | ||||
991 | MadeChange = true; | |||
992 | } | |||
993 | ||||
994 | // Replace a use of the shift with a use of the new shift. | |||
995 | TheUse = InsertedShift; | |||
996 | } | |||
997 | ||||
998 | // If we removed all uses, nuke the shift. | |||
999 | if (ShiftI->use_empty()) | |||
1000 | ShiftI->eraseFromParent(); | |||
1001 | ||||
1002 | return MadeChange; | |||
1003 | } | |||
1004 | ||||
1005 | // ScalarizeMaskedLoad() translates masked load intrinsic, like | |||
1006 | // <16 x i32 > @llvm.masked.load( <16 x i32>* %addr, i32 align, | |||
1007 | // <16 x i1> %mask, <16 x i32> %passthru) | |||
1008 | // to a chain of basic blocks, whith loading element one-by-one if | |||
1009 | // the appropriate mask bit is set | |||
1010 | // | |||
1011 | // %1 = bitcast i8* %addr to i32* | |||
1012 | // %2 = extractelement <16 x i1> %mask, i32 0 | |||
1013 | // %3 = icmp eq i1 %2, true | |||
1014 | // br i1 %3, label %cond.load, label %else | |||
1015 | // | |||
1016 | //cond.load: ; preds = %0 | |||
1017 | // %4 = getelementptr i32* %1, i32 0 | |||
1018 | // %5 = load i32* %4 | |||
1019 | // %6 = insertelement <16 x i32> undef, i32 %5, i32 0 | |||
1020 | // br label %else | |||
1021 | // | |||
1022 | //else: ; preds = %0, %cond.load | |||
1023 | // %res.phi.else = phi <16 x i32> [ %6, %cond.load ], [ undef, %0 ] | |||
1024 | // %7 = extractelement <16 x i1> %mask, i32 1 | |||
1025 | // %8 = icmp eq i1 %7, true | |||
1026 | // br i1 %8, label %cond.load1, label %else2 | |||
1027 | // | |||
1028 | //cond.load1: ; preds = %else | |||
1029 | // %9 = getelementptr i32* %1, i32 1 | |||
1030 | // %10 = load i32* %9 | |||
1031 | // %11 = insertelement <16 x i32> %res.phi.else, i32 %10, i32 1 | |||
1032 | // br label %else2 | |||
1033 | // | |||
1034 | //else2: ; preds = %else, %cond.load1 | |||
1035 | // %res.phi.else3 = phi <16 x i32> [ %11, %cond.load1 ], [ %res.phi.else, %else ] | |||
1036 | // %12 = extractelement <16 x i1> %mask, i32 2 | |||
1037 | // %13 = icmp eq i1 %12, true | |||
1038 | // br i1 %13, label %cond.load4, label %else5 | |||
1039 | // | |||
1040 | static void ScalarizeMaskedLoad(CallInst *CI) { | |||
1041 | Value *Ptr = CI->getArgOperand(0); | |||
1042 | Value *Src0 = CI->getArgOperand(3); | |||
1043 | Value *Mask = CI->getArgOperand(2); | |||
1044 | VectorType *VecType = dyn_cast<VectorType>(CI->getType()); | |||
1045 | Type *EltTy = VecType->getElementType(); | |||
1046 | ||||
1047 | assert(VecType && "Unexpected return type of masked load intrinsic")((VecType && "Unexpected return type of masked load intrinsic" ) ? static_cast<void> (0) : __assert_fail ("VecType && \"Unexpected return type of masked load intrinsic\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 1047, __PRETTY_FUNCTION__)); | |||
1048 | ||||
1049 | IRBuilder<> Builder(CI->getContext()); | |||
1050 | Instruction *InsertPt = CI; | |||
1051 | BasicBlock *IfBlock = CI->getParent(); | |||
1052 | BasicBlock *CondBlock = nullptr; | |||
1053 | BasicBlock *PrevIfBlock = CI->getParent(); | |||
1054 | Builder.SetInsertPoint(InsertPt); | |||
1055 | ||||
1056 | Builder.SetCurrentDebugLocation(CI->getDebugLoc()); | |||
1057 | ||||
1058 | // Bitcast %addr fron i8* to EltTy* | |||
1059 | Type *NewPtrType = | |||
1060 | EltTy->getPointerTo(cast<PointerType>(Ptr->getType())->getAddressSpace()); | |||
1061 | Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType); | |||
1062 | Value *UndefVal = UndefValue::get(VecType); | |||
1063 | ||||
1064 | // The result vector | |||
1065 | Value *VResult = UndefVal; | |||
1066 | ||||
1067 | PHINode *Phi = nullptr; | |||
1068 | Value *PrevPhi = UndefVal; | |||
1069 | ||||
1070 | unsigned VectorWidth = VecType->getNumElements(); | |||
1071 | for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { | |||
1072 | ||||
1073 | // Fill the "else" block, created in the previous iteration | |||
1074 | // | |||
1075 | // %res.phi.else3 = phi <16 x i32> [ %11, %cond.load1 ], [ %res.phi.else, %else ] | |||
1076 | // %mask_1 = extractelement <16 x i1> %mask, i32 Idx | |||
1077 | // %to_load = icmp eq i1 %mask_1, true | |||
1078 | // br i1 %to_load, label %cond.load, label %else | |||
1079 | // | |||
1080 | if (Idx > 0) { | |||
1081 | Phi = Builder.CreatePHI(VecType, 2, "res.phi.else"); | |||
1082 | Phi->addIncoming(VResult, CondBlock); | |||
1083 | Phi->addIncoming(PrevPhi, PrevIfBlock); | |||
1084 | PrevPhi = Phi; | |||
1085 | VResult = Phi; | |||
1086 | } | |||
1087 | ||||
1088 | Value *Predicate = Builder.CreateExtractElement(Mask, Builder.getInt32(Idx)); | |||
1089 | Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate, | |||
1090 | ConstantInt::get(Predicate->getType(), 1)); | |||
1091 | ||||
1092 | // Create "cond" block | |||
1093 | // | |||
1094 | // %EltAddr = getelementptr i32* %1, i32 0 | |||
1095 | // %Elt = load i32* %EltAddr | |||
1096 | // VResult = insertelement <16 x i32> VResult, i32 %Elt, i32 Idx | |||
1097 | // | |||
1098 | CondBlock = IfBlock->splitBasicBlock(InsertPt, "cond.load"); | |||
1099 | Builder.SetInsertPoint(InsertPt); | |||
1100 | ||||
1101 | Value* Gep = Builder.CreateInBoundsGEP(FirstEltPtr, Builder.getInt32(Idx)); | |||
1102 | LoadInst* Load = Builder.CreateLoad(Gep, false); | |||
1103 | VResult = Builder.CreateInsertElement(VResult, Load, Builder.getInt32(Idx)); | |||
1104 | ||||
1105 | // Create "else" block, fill it in the next iteration | |||
1106 | BasicBlock *NewIfBlock = CondBlock->splitBasicBlock(InsertPt, "else"); | |||
1107 | Builder.SetInsertPoint(InsertPt); | |||
1108 | Instruction *OldBr = IfBlock->getTerminator(); | |||
1109 | BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr); | |||
1110 | OldBr->eraseFromParent(); | |||
1111 | PrevIfBlock = IfBlock; | |||
1112 | IfBlock = NewIfBlock; | |||
1113 | } | |||
1114 | ||||
1115 | Phi = Builder.CreatePHI(VecType, 2, "res.phi.select"); | |||
1116 | Phi->addIncoming(VResult, CondBlock); | |||
1117 | Phi->addIncoming(PrevPhi, PrevIfBlock); | |||
1118 | Value *NewI = Builder.CreateSelect(Mask, Phi, Src0); | |||
1119 | CI->replaceAllUsesWith(NewI); | |||
1120 | CI->eraseFromParent(); | |||
1121 | } | |||
1122 | ||||
1123 | // ScalarizeMaskedStore() translates masked store intrinsic, like | |||
1124 | // void @llvm.masked.store(<16 x i32> %src, <16 x i32>* %addr, i32 align, | |||
1125 | // <16 x i1> %mask) | |||
1126 | // to a chain of basic blocks, that stores element one-by-one if | |||
1127 | // the appropriate mask bit is set | |||
1128 | // | |||
1129 | // %1 = bitcast i8* %addr to i32* | |||
1130 | // %2 = extractelement <16 x i1> %mask, i32 0 | |||
1131 | // %3 = icmp eq i1 %2, true | |||
1132 | // br i1 %3, label %cond.store, label %else | |||
1133 | // | |||
1134 | // cond.store: ; preds = %0 | |||
1135 | // %4 = extractelement <16 x i32> %val, i32 0 | |||
1136 | // %5 = getelementptr i32* %1, i32 0 | |||
1137 | // store i32 %4, i32* %5 | |||
1138 | // br label %else | |||
1139 | // | |||
1140 | // else: ; preds = %0, %cond.store | |||
1141 | // %6 = extractelement <16 x i1> %mask, i32 1 | |||
1142 | // %7 = icmp eq i1 %6, true | |||
1143 | // br i1 %7, label %cond.store1, label %else2 | |||
1144 | // | |||
1145 | // cond.store1: ; preds = %else | |||
1146 | // %8 = extractelement <16 x i32> %val, i32 1 | |||
1147 | // %9 = getelementptr i32* %1, i32 1 | |||
1148 | // store i32 %8, i32* %9 | |||
1149 | // br label %else2 | |||
1150 | // . . . | |||
1151 | static void ScalarizeMaskedStore(CallInst *CI) { | |||
1152 | Value *Ptr = CI->getArgOperand(1); | |||
1153 | Value *Src = CI->getArgOperand(0); | |||
1154 | Value *Mask = CI->getArgOperand(3); | |||
1155 | ||||
1156 | VectorType *VecType = dyn_cast<VectorType>(Src->getType()); | |||
1157 | Type *EltTy = VecType->getElementType(); | |||
1158 | ||||
1159 | assert(VecType && "Unexpected data type in masked store intrinsic")((VecType && "Unexpected data type in masked store intrinsic" ) ? static_cast<void> (0) : __assert_fail ("VecType && \"Unexpected data type in masked store intrinsic\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 1159, __PRETTY_FUNCTION__)); | |||
1160 | ||||
1161 | IRBuilder<> Builder(CI->getContext()); | |||
1162 | Instruction *InsertPt = CI; | |||
1163 | BasicBlock *IfBlock = CI->getParent(); | |||
1164 | Builder.SetInsertPoint(InsertPt); | |||
1165 | Builder.SetCurrentDebugLocation(CI->getDebugLoc()); | |||
1166 | ||||
1167 | // Bitcast %addr fron i8* to EltTy* | |||
1168 | Type *NewPtrType = | |||
1169 | EltTy->getPointerTo(cast<PointerType>(Ptr->getType())->getAddressSpace()); | |||
1170 | Value *FirstEltPtr = Builder.CreateBitCast(Ptr, NewPtrType); | |||
1171 | ||||
1172 | unsigned VectorWidth = VecType->getNumElements(); | |||
1173 | for (unsigned Idx = 0; Idx < VectorWidth; ++Idx) { | |||
1174 | ||||
1175 | // Fill the "else" block, created in the previous iteration | |||
1176 | // | |||
1177 | // %mask_1 = extractelement <16 x i1> %mask, i32 Idx | |||
1178 | // %to_store = icmp eq i1 %mask_1, true | |||
1179 | // br i1 %to_load, label %cond.store, label %else | |||
1180 | // | |||
1181 | Value *Predicate = Builder.CreateExtractElement(Mask, Builder.getInt32(Idx)); | |||
1182 | Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Predicate, | |||
1183 | ConstantInt::get(Predicate->getType(), 1)); | |||
1184 | ||||
1185 | // Create "cond" block | |||
1186 | // | |||
1187 | // %OneElt = extractelement <16 x i32> %Src, i32 Idx | |||
1188 | // %EltAddr = getelementptr i32* %1, i32 0 | |||
1189 | // %store i32 %OneElt, i32* %EltAddr | |||
1190 | // | |||
1191 | BasicBlock *CondBlock = IfBlock->splitBasicBlock(InsertPt, "cond.store"); | |||
1192 | Builder.SetInsertPoint(InsertPt); | |||
1193 | ||||
1194 | Value *OneElt = Builder.CreateExtractElement(Src, Builder.getInt32(Idx)); | |||
1195 | Value* Gep = Builder.CreateInBoundsGEP(FirstEltPtr, Builder.getInt32(Idx)); | |||
1196 | Builder.CreateStore(OneElt, Gep); | |||
1197 | ||||
1198 | // Create "else" block, fill it in the next iteration | |||
1199 | BasicBlock *NewIfBlock = CondBlock->splitBasicBlock(InsertPt, "else"); | |||
1200 | Builder.SetInsertPoint(InsertPt); | |||
1201 | Instruction *OldBr = IfBlock->getTerminator(); | |||
1202 | BranchInst::Create(CondBlock, NewIfBlock, Cmp, OldBr); | |||
1203 | OldBr->eraseFromParent(); | |||
1204 | IfBlock = NewIfBlock; | |||
1205 | } | |||
1206 | CI->eraseFromParent(); | |||
1207 | } | |||
1208 | ||||
1209 | bool CodeGenPrepare::OptimizeCallInst(CallInst *CI, bool& ModifiedDT) { | |||
1210 | BasicBlock *BB = CI->getParent(); | |||
1211 | ||||
1212 | // Lower inline assembly if we can. | |||
1213 | // If we found an inline asm expession, and if the target knows how to | |||
1214 | // lower it to normal LLVM code, do so now. | |||
1215 | if (TLI && isa<InlineAsm>(CI->getCalledValue())) { | |||
1216 | if (TLI->ExpandInlineAsm(CI)) { | |||
1217 | // Avoid invalidating the iterator. | |||
1218 | CurInstIterator = BB->begin(); | |||
1219 | // Avoid processing instructions out of order, which could cause | |||
1220 | // reuse before a value is defined. | |||
1221 | SunkAddrs.clear(); | |||
1222 | return true; | |||
1223 | } | |||
1224 | // Sink address computing for memory operands into the block. | |||
1225 | if (OptimizeInlineAsmInst(CI)) | |||
1226 | return true; | |||
1227 | } | |||
1228 | ||||
1229 | IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); | |||
1230 | if (II) { | |||
1231 | switch (II->getIntrinsicID()) { | |||
1232 | default: break; | |||
1233 | case Intrinsic::objectsize: { | |||
1234 | // Lower all uses of llvm.objectsize.* | |||
1235 | bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1); | |||
1236 | Type *ReturnTy = CI->getType(); | |||
1237 | Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL); | |||
1238 | ||||
1239 | // Substituting this can cause recursive simplifications, which can | |||
1240 | // invalidate our iterator. Use a WeakVH to hold onto it in case this | |||
1241 | // happens. | |||
1242 | WeakVH IterHandle(CurInstIterator); | |||
1243 | ||||
1244 | replaceAndRecursivelySimplify(CI, RetVal, | |||
1245 | TLI ? TLI->getDataLayout() : nullptr, | |||
1246 | TLInfo, ModifiedDT ? nullptr : DT); | |||
1247 | ||||
1248 | // If the iterator instruction was recursively deleted, start over at the | |||
1249 | // start of the block. | |||
1250 | if (IterHandle != CurInstIterator) { | |||
1251 | CurInstIterator = BB->begin(); | |||
1252 | SunkAddrs.clear(); | |||
1253 | } | |||
1254 | return true; | |||
1255 | } | |||
1256 | case Intrinsic::masked_load: { | |||
1257 | // Scalarize unsupported vector masked load | |||
1258 | if (!TTI->isLegalMaskedLoad(CI->getType(), 1)) { | |||
1259 | ScalarizeMaskedLoad(CI); | |||
1260 | ModifiedDT = true; | |||
1261 | return true; | |||
1262 | } | |||
1263 | return false; | |||
1264 | } | |||
1265 | case Intrinsic::masked_store: { | |||
1266 | if (!TTI->isLegalMaskedStore(CI->getArgOperand(0)->getType(), 1)) { | |||
1267 | ScalarizeMaskedStore(CI); | |||
1268 | ModifiedDT = true; | |||
1269 | return true; | |||
1270 | } | |||
1271 | return false; | |||
1272 | } | |||
1273 | } | |||
1274 | ||||
1275 | if (TLI) { | |||
1276 | SmallVector<Value*, 2> PtrOps; | |||
1277 | Type *AccessTy; | |||
1278 | if (TLI->GetAddrModeArguments(II, PtrOps, AccessTy)) | |||
1279 | while (!PtrOps.empty()) | |||
1280 | if (OptimizeMemoryInst(II, PtrOps.pop_back_val(), AccessTy)) | |||
1281 | return true; | |||
1282 | } | |||
1283 | } | |||
1284 | ||||
1285 | // From here on out we're working with named functions. | |||
1286 | if (!CI->getCalledFunction()) return false; | |||
1287 | ||||
1288 | // We'll need DataLayout from here on out. | |||
1289 | const DataLayout *TD = TLI ? TLI->getDataLayout() : nullptr; | |||
1290 | if (!TD) return false; | |||
1291 | ||||
1292 | // Lower all default uses of _chk calls. This is very similar | |||
1293 | // to what InstCombineCalls does, but here we are only lowering calls | |||
1294 | // to fortified library functions (e.g. __memcpy_chk) that have the default | |||
1295 | // "don't know" as the objectsize. Anything else should be left alone. | |||
1296 | FortifiedLibCallSimplifier Simplifier(TD, TLInfo, true); | |||
1297 | if (Value *V = Simplifier.optimizeCall(CI)) { | |||
1298 | CI->replaceAllUsesWith(V); | |||
1299 | CI->eraseFromParent(); | |||
1300 | return true; | |||
1301 | } | |||
1302 | return false; | |||
1303 | } | |||
1304 | ||||
1305 | /// DupRetToEnableTailCallOpts - Look for opportunities to duplicate return | |||
1306 | /// instructions to the predecessor to enable tail call optimizations. The | |||
1307 | /// case it is currently looking for is: | |||
1308 | /// @code | |||
1309 | /// bb0: | |||
1310 | /// %tmp0 = tail call i32 @f0() | |||
1311 | /// br label %return | |||
1312 | /// bb1: | |||
1313 | /// %tmp1 = tail call i32 @f1() | |||
1314 | /// br label %return | |||
1315 | /// bb2: | |||
1316 | /// %tmp2 = tail call i32 @f2() | |||
1317 | /// br label %return | |||
1318 | /// return: | |||
1319 | /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ] | |||
1320 | /// ret i32 %retval | |||
1321 | /// @endcode | |||
1322 | /// | |||
1323 | /// => | |||
1324 | /// | |||
1325 | /// @code | |||
1326 | /// bb0: | |||
1327 | /// %tmp0 = tail call i32 @f0() | |||
1328 | /// ret i32 %tmp0 | |||
1329 | /// bb1: | |||
1330 | /// %tmp1 = tail call i32 @f1() | |||
1331 | /// ret i32 %tmp1 | |||
1332 | /// bb2: | |||
1333 | /// %tmp2 = tail call i32 @f2() | |||
1334 | /// ret i32 %tmp2 | |||
1335 | /// @endcode | |||
1336 | bool CodeGenPrepare::DupRetToEnableTailCallOpts(BasicBlock *BB) { | |||
1337 | if (!TLI) | |||
1338 | return false; | |||
1339 | ||||
1340 | ReturnInst *RI = dyn_cast<ReturnInst>(BB->getTerminator()); | |||
1341 | if (!RI) | |||
1342 | return false; | |||
1343 | ||||
1344 | PHINode *PN = nullptr; | |||
1345 | BitCastInst *BCI = nullptr; | |||
1346 | Value *V = RI->getReturnValue(); | |||
1347 | if (V) { | |||
1348 | BCI = dyn_cast<BitCastInst>(V); | |||
1349 | if (BCI) | |||
1350 | V = BCI->getOperand(0); | |||
1351 | ||||
1352 | PN = dyn_cast<PHINode>(V); | |||
1353 | if (!PN) | |||
1354 | return false; | |||
1355 | } | |||
1356 | ||||
1357 | if (PN && PN->getParent() != BB) | |||
1358 | return false; | |||
1359 | ||||
1360 | // It's not safe to eliminate the sign / zero extension of the return value. | |||
1361 | // See llvm::isInTailCallPosition(). | |||
1362 | const Function *F = BB->getParent(); | |||
1363 | AttributeSet CallerAttrs = F->getAttributes(); | |||
1364 | if (CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt) || | |||
1365 | CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt)) | |||
1366 | return false; | |||
1367 | ||||
1368 | // Make sure there are no instructions between the PHI and return, or that the | |||
1369 | // return is the first instruction in the block. | |||
1370 | if (PN) { | |||
1371 | BasicBlock::iterator BI = BB->begin(); | |||
1372 | do { ++BI; } while (isa<DbgInfoIntrinsic>(BI)); | |||
1373 | if (&*BI == BCI) | |||
1374 | // Also skip over the bitcast. | |||
1375 | ++BI; | |||
1376 | if (&*BI != RI) | |||
1377 | return false; | |||
1378 | } else { | |||
1379 | BasicBlock::iterator BI = BB->begin(); | |||
1380 | while (isa<DbgInfoIntrinsic>(BI)) ++BI; | |||
1381 | if (&*BI != RI) | |||
1382 | return false; | |||
1383 | } | |||
1384 | ||||
1385 | /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail | |||
1386 | /// call. | |||
1387 | SmallVector<CallInst*, 4> TailCalls; | |||
1388 | if (PN) { | |||
1389 | for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) { | |||
1390 | CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I)); | |||
1391 | // Make sure the phi value is indeed produced by the tail call. | |||
1392 | if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) && | |||
1393 | TLI->mayBeEmittedAsTailCall(CI)) | |||
1394 | TailCalls.push_back(CI); | |||
1395 | } | |||
1396 | } else { | |||
1397 | SmallPtrSet<BasicBlock*, 4> VisitedBBs; | |||
1398 | for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) { | |||
1399 | if (!VisitedBBs.insert(*PI).second) | |||
1400 | continue; | |||
1401 | ||||
1402 | BasicBlock::InstListType &InstList = (*PI)->getInstList(); | |||
1403 | BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin(); | |||
1404 | BasicBlock::InstListType::reverse_iterator RE = InstList.rend(); | |||
1405 | do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI)); | |||
1406 | if (RI == RE) | |||
1407 | continue; | |||
1408 | ||||
1409 | CallInst *CI = dyn_cast<CallInst>(&*RI); | |||
1410 | if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI)) | |||
1411 | TailCalls.push_back(CI); | |||
1412 | } | |||
1413 | } | |||
1414 | ||||
1415 | bool Changed = false; | |||
1416 | for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) { | |||
1417 | CallInst *CI = TailCalls[i]; | |||
1418 | CallSite CS(CI); | |||
1419 | ||||
1420 | // Conservatively require the attributes of the call to match those of the | |||
1421 | // return. Ignore noalias because it doesn't affect the call sequence. | |||
1422 | AttributeSet CalleeAttrs = CS.getAttributes(); | |||
1423 | if (AttrBuilder(CalleeAttrs, AttributeSet::ReturnIndex). | |||
1424 | removeAttribute(Attribute::NoAlias) != | |||
1425 | AttrBuilder(CalleeAttrs, AttributeSet::ReturnIndex). | |||
1426 | removeAttribute(Attribute::NoAlias)) | |||
1427 | continue; | |||
1428 | ||||
1429 | // Make sure the call instruction is followed by an unconditional branch to | |||
1430 | // the return block. | |||
1431 | BasicBlock *CallBB = CI->getParent(); | |||
1432 | BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator()); | |||
1433 | if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB) | |||
1434 | continue; | |||
1435 | ||||
1436 | // Duplicate the return into CallBB. | |||
1437 | (void)FoldReturnIntoUncondBranch(RI, BB, CallBB); | |||
1438 | ModifiedDT = Changed = true; | |||
1439 | ++NumRetsDup; | |||
1440 | } | |||
1441 | ||||
1442 | // If we eliminated all predecessors of the block, delete the block now. | |||
1443 | if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB)) | |||
1444 | BB->eraseFromParent(); | |||
1445 | ||||
1446 | return Changed; | |||
1447 | } | |||
1448 | ||||
1449 | //===----------------------------------------------------------------------===// | |||
1450 | // Memory Optimization | |||
1451 | //===----------------------------------------------------------------------===// | |||
1452 | ||||
1453 | namespace { | |||
1454 | ||||
1455 | /// ExtAddrMode - This is an extended version of TargetLowering::AddrMode | |||
1456 | /// which holds actual Value*'s for register values. | |||
1457 | struct ExtAddrMode : public TargetLowering::AddrMode { | |||
1458 | Value *BaseReg; | |||
1459 | Value *ScaledReg; | |||
1460 | ExtAddrMode() : BaseReg(nullptr), ScaledReg(nullptr) {} | |||
1461 | void print(raw_ostream &OS) const; | |||
1462 | void dump() const; | |||
1463 | ||||
1464 | bool operator==(const ExtAddrMode& O) const { | |||
1465 | return (BaseReg == O.BaseReg) && (ScaledReg == O.ScaledReg) && | |||
1466 | (BaseGV == O.BaseGV) && (BaseOffs == O.BaseOffs) && | |||
1467 | (HasBaseReg == O.HasBaseReg) && (Scale == O.Scale); | |||
1468 | } | |||
1469 | }; | |||
1470 | ||||
1471 | #ifndef NDEBUG | |||
1472 | static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) { | |||
1473 | AM.print(OS); | |||
1474 | return OS; | |||
1475 | } | |||
1476 | #endif | |||
1477 | ||||
1478 | void ExtAddrMode::print(raw_ostream &OS) const { | |||
1479 | bool NeedPlus = false; | |||
1480 | OS << "["; | |||
1481 | if (BaseGV) { | |||
1482 | OS << (NeedPlus ? " + " : "") | |||
1483 | << "GV:"; | |||
1484 | BaseGV->printAsOperand(OS, /*PrintType=*/false); | |||
1485 | NeedPlus = true; | |||
1486 | } | |||
1487 | ||||
1488 | if (BaseOffs) { | |||
1489 | OS << (NeedPlus ? " + " : "") | |||
1490 | << BaseOffs; | |||
1491 | NeedPlus = true; | |||
1492 | } | |||
1493 | ||||
1494 | if (BaseReg) { | |||
1495 | OS << (NeedPlus ? " + " : "") | |||
1496 | << "Base:"; | |||
1497 | BaseReg->printAsOperand(OS, /*PrintType=*/false); | |||
1498 | NeedPlus = true; | |||
1499 | } | |||
1500 | if (Scale) { | |||
1501 | OS << (NeedPlus ? " + " : "") | |||
1502 | << Scale << "*"; | |||
1503 | ScaledReg->printAsOperand(OS, /*PrintType=*/false); | |||
1504 | } | |||
1505 | ||||
1506 | OS << ']'; | |||
1507 | } | |||
1508 | ||||
1509 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) | |||
1510 | void ExtAddrMode::dump() const { | |||
1511 | print(dbgs()); | |||
1512 | dbgs() << '\n'; | |||
1513 | } | |||
1514 | #endif | |||
1515 | ||||
1516 | /// \brief This class provides transaction based operation on the IR. | |||
1517 | /// Every change made through this class is recorded in the internal state and | |||
1518 | /// can be undone (rollback) until commit is called. | |||
1519 | class TypePromotionTransaction { | |||
1520 | ||||
1521 | /// \brief This represents the common interface of the individual transaction. | |||
1522 | /// Each class implements the logic for doing one specific modification on | |||
1523 | /// the IR via the TypePromotionTransaction. | |||
1524 | class TypePromotionAction { | |||
1525 | protected: | |||
1526 | /// The Instruction modified. | |||
1527 | Instruction *Inst; | |||
1528 | ||||
1529 | public: | |||
1530 | /// \brief Constructor of the action. | |||
1531 | /// The constructor performs the related action on the IR. | |||
1532 | TypePromotionAction(Instruction *Inst) : Inst(Inst) {} | |||
1533 | ||||
1534 | virtual ~TypePromotionAction() {} | |||
1535 | ||||
1536 | /// \brief Undo the modification done by this action. | |||
1537 | /// When this method is called, the IR must be in the same state as it was | |||
1538 | /// before this action was applied. | |||
1539 | /// \pre Undoing the action works if and only if the IR is in the exact same | |||
1540 | /// state as it was directly after this action was applied. | |||
1541 | virtual void undo() = 0; | |||
1542 | ||||
1543 | /// \brief Advocate every change made by this action. | |||
1544 | /// When the results on the IR of the action are to be kept, it is important | |||
1545 | /// to call this function, otherwise hidden information may be kept forever. | |||
1546 | virtual void commit() { | |||
1547 | // Nothing to be done, this action is not doing anything. | |||
1548 | } | |||
1549 | }; | |||
1550 | ||||
1551 | /// \brief Utility to remember the position of an instruction. | |||
1552 | class InsertionHandler { | |||
1553 | /// Position of an instruction. | |||
1554 | /// Either an instruction: | |||
1555 | /// - Is the first in a basic block: BB is used. | |||
1556 | /// - Has a previous instructon: PrevInst is used. | |||
1557 | union { | |||
1558 | Instruction *PrevInst; | |||
1559 | BasicBlock *BB; | |||
1560 | } Point; | |||
1561 | /// Remember whether or not the instruction had a previous instruction. | |||
1562 | bool HasPrevInstruction; | |||
1563 | ||||
1564 | public: | |||
1565 | /// \brief Record the position of \p Inst. | |||
1566 | InsertionHandler(Instruction *Inst) { | |||
1567 | BasicBlock::iterator It = Inst; | |||
1568 | HasPrevInstruction = (It != (Inst->getParent()->begin())); | |||
1569 | if (HasPrevInstruction) | |||
1570 | Point.PrevInst = --It; | |||
1571 | else | |||
1572 | Point.BB = Inst->getParent(); | |||
1573 | } | |||
1574 | ||||
1575 | /// \brief Insert \p Inst at the recorded position. | |||
1576 | void insert(Instruction *Inst) { | |||
1577 | if (HasPrevInstruction) { | |||
1578 | if (Inst->getParent()) | |||
1579 | Inst->removeFromParent(); | |||
1580 | Inst->insertAfter(Point.PrevInst); | |||
1581 | } else { | |||
1582 | Instruction *Position = Point.BB->getFirstInsertionPt(); | |||
1583 | if (Inst->getParent()) | |||
1584 | Inst->moveBefore(Position); | |||
1585 | else | |||
1586 | Inst->insertBefore(Position); | |||
1587 | } | |||
1588 | } | |||
1589 | }; | |||
1590 | ||||
1591 | /// \brief Move an instruction before another. | |||
1592 | class InstructionMoveBefore : public TypePromotionAction { | |||
1593 | /// Original position of the instruction. | |||
1594 | InsertionHandler Position; | |||
1595 | ||||
1596 | public: | |||
1597 | /// \brief Move \p Inst before \p Before. | |||
1598 | InstructionMoveBefore(Instruction *Inst, Instruction *Before) | |||
1599 | : TypePromotionAction(Inst), Position(Inst) { | |||
1600 | DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: move: " << * Inst << "\nbefore: " << *Before << "\n"; } } while (0); | |||
1601 | Inst->moveBefore(Before); | |||
1602 | } | |||
1603 | ||||
1604 | /// \brief Move the instruction back to its original position. | |||
1605 | void undo() override { | |||
1606 | DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: moveBefore: " << *Inst << "\n"; } } while (0); | |||
1607 | Position.insert(Inst); | |||
1608 | } | |||
1609 | }; | |||
1610 | ||||
1611 | /// \brief Set the operand of an instruction with a new value. | |||
1612 | class OperandSetter : public TypePromotionAction { | |||
1613 | /// Original operand of the instruction. | |||
1614 | Value *Origin; | |||
1615 | /// Index of the modified instruction. | |||
1616 | unsigned Idx; | |||
1617 | ||||
1618 | public: | |||
1619 | /// \brief Set \p Idx operand of \p Inst with \p NewVal. | |||
1620 | OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal) | |||
1621 | : TypePromotionAction(Inst), Idx(Idx) { | |||
1622 | DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: setOperand: " << Idx << "\n" << "for:" << *Inst << "\n" << "with:" << *NewVal << "\n"; } } while ( 0) | |||
1623 | << "for:" << *Inst << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: setOperand: " << Idx << "\n" << "for:" << *Inst << "\n" << "with:" << *NewVal << "\n"; } } while ( 0) | |||
1624 | << "with:" << *NewVal << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: setOperand: " << Idx << "\n" << "for:" << *Inst << "\n" << "with:" << *NewVal << "\n"; } } while ( 0); | |||
1625 | Origin = Inst->getOperand(Idx); | |||
1626 | Inst->setOperand(Idx, NewVal); | |||
1627 | } | |||
1628 | ||||
1629 | /// \brief Restore the original value of the instruction. | |||
1630 | void undo() override { | |||
1631 | DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: setOperand:" << Idx << "\n" << "for: " << *Inst << "\n" << "with: " << *Origin << "\n"; } } while ( 0) | |||
1632 | << "for: " << *Inst << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: setOperand:" << Idx << "\n" << "for: " << *Inst << "\n" << "with: " << *Origin << "\n"; } } while ( 0) | |||
1633 | << "with: " << *Origin << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: setOperand:" << Idx << "\n" << "for: " << *Inst << "\n" << "with: " << *Origin << "\n"; } } while ( 0); | |||
1634 | Inst->setOperand(Idx, Origin); | |||
1635 | } | |||
1636 | }; | |||
1637 | ||||
1638 | /// \brief Hide the operands of an instruction. | |||
1639 | /// Do as if this instruction was not using any of its operands. | |||
1640 | class OperandsHider : public TypePromotionAction { | |||
1641 | /// The list of original operands. | |||
1642 | SmallVector<Value *, 4> OriginalValues; | |||
1643 | ||||
1644 | public: | |||
1645 | /// \brief Remove \p Inst from the uses of the operands of \p Inst. | |||
1646 | OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) { | |||
1647 | DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: OperandsHider: " << *Inst << "\n"; } } while (0); | |||
1648 | unsigned NumOpnds = Inst->getNumOperands(); | |||
1649 | OriginalValues.reserve(NumOpnds); | |||
1650 | for (unsigned It = 0; It < NumOpnds; ++It) { | |||
1651 | // Save the current operand. | |||
1652 | Value *Val = Inst->getOperand(It); | |||
1653 | OriginalValues.push_back(Val); | |||
1654 | // Set a dummy one. | |||
1655 | // We could use OperandSetter here, but that would implied an overhead | |||
1656 | // that we are not willing to pay. | |||
1657 | Inst->setOperand(It, UndefValue::get(Val->getType())); | |||
1658 | } | |||
1659 | } | |||
1660 | ||||
1661 | /// \brief Restore the original list of uses. | |||
1662 | void undo() override { | |||
1663 | DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: OperandsHider: " << *Inst << "\n"; } } while (0); | |||
1664 | for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It) | |||
1665 | Inst->setOperand(It, OriginalValues[It]); | |||
1666 | } | |||
1667 | }; | |||
1668 | ||||
1669 | /// \brief Build a truncate instruction. | |||
1670 | class TruncBuilder : public TypePromotionAction { | |||
1671 | Value *Val; | |||
1672 | public: | |||
1673 | /// \brief Build a truncate instruction of \p Opnd producing a \p Ty | |||
1674 | /// result. | |||
1675 | /// trunc Opnd to Ty. | |||
1676 | TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) { | |||
1677 | IRBuilder<> Builder(Opnd); | |||
1678 | Val = Builder.CreateTrunc(Opnd, Ty, "promoted"); | |||
1679 | DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: TruncBuilder: " << *Val << "\n"; } } while (0); | |||
1680 | } | |||
1681 | ||||
1682 | /// \brief Get the built value. | |||
1683 | Value *getBuiltValue() { return Val; } | |||
1684 | ||||
1685 | /// \brief Remove the built instruction. | |||
1686 | void undo() override { | |||
1687 | DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: TruncBuilder: " << *Val << "\n"; } } while (0); | |||
1688 | if (Instruction *IVal = dyn_cast<Instruction>(Val)) | |||
1689 | IVal->eraseFromParent(); | |||
1690 | } | |||
1691 | }; | |||
1692 | ||||
1693 | /// \brief Build a sign extension instruction. | |||
1694 | class SExtBuilder : public TypePromotionAction { | |||
1695 | Value *Val; | |||
1696 | public: | |||
1697 | /// \brief Build a sign extension instruction of \p Opnd producing a \p Ty | |||
1698 | /// result. | |||
1699 | /// sext Opnd to Ty. | |||
1700 | SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) | |||
1701 | : TypePromotionAction(InsertPt) { | |||
1702 | IRBuilder<> Builder(InsertPt); | |||
1703 | Val = Builder.CreateSExt(Opnd, Ty, "promoted"); | |||
1704 | DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: SExtBuilder: " << *Val << "\n"; } } while (0); | |||
1705 | } | |||
1706 | ||||
1707 | /// \brief Get the built value. | |||
1708 | Value *getBuiltValue() { return Val; } | |||
1709 | ||||
1710 | /// \brief Remove the built instruction. | |||
1711 | void undo() override { | |||
1712 | DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: SExtBuilder: " << *Val << "\n"; } } while (0); | |||
1713 | if (Instruction *IVal = dyn_cast<Instruction>(Val)) | |||
1714 | IVal->eraseFromParent(); | |||
1715 | } | |||
1716 | }; | |||
1717 | ||||
1718 | /// \brief Build a zero extension instruction. | |||
1719 | class ZExtBuilder : public TypePromotionAction { | |||
1720 | Value *Val; | |||
1721 | public: | |||
1722 | /// \brief Build a zero extension instruction of \p Opnd producing a \p Ty | |||
1723 | /// result. | |||
1724 | /// zext Opnd to Ty. | |||
1725 | ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) | |||
1726 | : TypePromotionAction(InsertPt) { | |||
1727 | IRBuilder<> Builder(InsertPt); | |||
1728 | Val = Builder.CreateZExt(Opnd, Ty, "promoted"); | |||
1729 | DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: ZExtBuilder: " << *Val << "\n"; } } while (0); | |||
1730 | } | |||
1731 | ||||
1732 | /// \brief Get the built value. | |||
1733 | Value *getBuiltValue() { return Val; } | |||
1734 | ||||
1735 | /// \brief Remove the built instruction. | |||
1736 | void undo() override { | |||
1737 | DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: ZExtBuilder: " << *Val << "\n"; } } while (0); | |||
1738 | if (Instruction *IVal = dyn_cast<Instruction>(Val)) | |||
1739 | IVal->eraseFromParent(); | |||
1740 | } | |||
1741 | }; | |||
1742 | ||||
1743 | /// \brief Mutate an instruction to another type. | |||
1744 | class TypeMutator : public TypePromotionAction { | |||
1745 | /// Record the original type. | |||
1746 | Type *OrigTy; | |||
1747 | ||||
1748 | public: | |||
1749 | /// \brief Mutate the type of \p Inst into \p NewTy. | |||
1750 | TypeMutator(Instruction *Inst, Type *NewTy) | |||
1751 | : TypePromotionAction(Inst), OrigTy(Inst->getType()) { | |||
1752 | DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTydo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy << "\n"; } } while (0) | |||
1753 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy << "\n"; } } while (0); | |||
1754 | Inst->mutateType(NewTy); | |||
1755 | } | |||
1756 | ||||
1757 | /// \brief Mutate the instruction back to its original type. | |||
1758 | void undo() override { | |||
1759 | DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTydo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy << "\n"; } } while (0) | |||
1760 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy << "\n"; } } while (0); | |||
1761 | Inst->mutateType(OrigTy); | |||
1762 | } | |||
1763 | }; | |||
1764 | ||||
1765 | /// \brief Replace the uses of an instruction by another instruction. | |||
1766 | class UsesReplacer : public TypePromotionAction { | |||
1767 | /// Helper structure to keep track of the replaced uses. | |||
1768 | struct InstructionAndIdx { | |||
1769 | /// The instruction using the instruction. | |||
1770 | Instruction *Inst; | |||
1771 | /// The index where this instruction is used for Inst. | |||
1772 | unsigned Idx; | |||
1773 | InstructionAndIdx(Instruction *Inst, unsigned Idx) | |||
1774 | : Inst(Inst), Idx(Idx) {} | |||
1775 | }; | |||
1776 | ||||
1777 | /// Keep track of the original uses (pair Instruction, Index). | |||
1778 | SmallVector<InstructionAndIdx, 4> OriginalUses; | |||
1779 | typedef SmallVectorImpl<InstructionAndIdx>::iterator use_iterator; | |||
1780 | ||||
1781 | public: | |||
1782 | /// \brief Replace all the use of \p Inst by \p New. | |||
1783 | UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) { | |||
1784 | DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *Newdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New << "\n"; } } while (0) | |||
1785 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New << "\n"; } } while (0); | |||
1786 | // Record the original uses. | |||
1787 | for (Use &U : Inst->uses()) { | |||
1788 | Instruction *UserI = cast<Instruction>(U.getUser()); | |||
1789 | OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo())); | |||
1790 | } | |||
1791 | // Now, we can replace the uses. | |||
1792 | Inst->replaceAllUsesWith(New); | |||
1793 | } | |||
1794 | ||||
1795 | /// \brief Reassign the original uses of Inst to Inst. | |||
1796 | void undo() override { | |||
1797 | DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: UsersReplacer: " << *Inst << "\n"; } } while (0); | |||
1798 | for (use_iterator UseIt = OriginalUses.begin(), | |||
1799 | EndIt = OriginalUses.end(); | |||
1800 | UseIt != EndIt; ++UseIt) { | |||
1801 | UseIt->Inst->setOperand(UseIt->Idx, Inst); | |||
1802 | } | |||
1803 | } | |||
1804 | }; | |||
1805 | ||||
1806 | /// \brief Remove an instruction from the IR. | |||
1807 | class InstructionRemover : public TypePromotionAction { | |||
1808 | /// Original position of the instruction. | |||
1809 | InsertionHandler Inserter; | |||
1810 | /// Helper structure to hide all the link to the instruction. In other | |||
1811 | /// words, this helps to do as if the instruction was removed. | |||
1812 | OperandsHider Hider; | |||
1813 | /// Keep track of the uses replaced, if any. | |||
1814 | UsesReplacer *Replacer; | |||
1815 | ||||
1816 | public: | |||
1817 | /// \brief Remove all reference of \p Inst and optinally replace all its | |||
1818 | /// uses with New. | |||
1819 | /// \pre If !Inst->use_empty(), then New != nullptr | |||
1820 | InstructionRemover(Instruction *Inst, Value *New = nullptr) | |||
1821 | : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst), | |||
1822 | Replacer(nullptr) { | |||
1823 | if (New) | |||
1824 | Replacer = new UsesReplacer(Inst, New); | |||
1825 | DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: InstructionRemover: " << *Inst << "\n"; } } while (0); | |||
1826 | Inst->removeFromParent(); | |||
1827 | } | |||
1828 | ||||
1829 | ~InstructionRemover() { delete Replacer; } | |||
1830 | ||||
1831 | /// \brief Really remove the instruction. | |||
1832 | void commit() override { delete Inst; } | |||
1833 | ||||
1834 | /// \brief Resurrect the instruction and reassign it to the proper uses if | |||
1835 | /// new value was provided when build this action. | |||
1836 | void undo() override { | |||
1837 | DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: InstructionRemover: " << *Inst << "\n"; } } while (0); | |||
1838 | Inserter.insert(Inst); | |||
1839 | if (Replacer) | |||
1840 | Replacer->undo(); | |||
1841 | Hider.undo(); | |||
1842 | } | |||
1843 | }; | |||
1844 | ||||
1845 | public: | |||
1846 | /// Restoration point. | |||
1847 | /// The restoration point is a pointer to an action instead of an iterator | |||
1848 | /// because the iterator may be invalidated but not the pointer. | |||
1849 | typedef const TypePromotionAction *ConstRestorationPt; | |||
1850 | /// Advocate every changes made in that transaction. | |||
1851 | void commit(); | |||
1852 | /// Undo all the changes made after the given point. | |||
1853 | void rollback(ConstRestorationPt Point); | |||
1854 | /// Get the current restoration point. | |||
1855 | ConstRestorationPt getRestorationPoint() const; | |||
1856 | ||||
1857 | /// \name API for IR modification with state keeping to support rollback. | |||
1858 | /// @{ | |||
1859 | /// Same as Instruction::setOperand. | |||
1860 | void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal); | |||
1861 | /// Same as Instruction::eraseFromParent. | |||
1862 | void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr); | |||
1863 | /// Same as Value::replaceAllUsesWith. | |||
1864 | void replaceAllUsesWith(Instruction *Inst, Value *New); | |||
1865 | /// Same as Value::mutateType. | |||
1866 | void mutateType(Instruction *Inst, Type *NewTy); | |||
1867 | /// Same as IRBuilder::createTrunc. | |||
1868 | Value *createTrunc(Instruction *Opnd, Type *Ty); | |||
1869 | /// Same as IRBuilder::createSExt. | |||
1870 | Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty); | |||
1871 | /// Same as IRBuilder::createZExt. | |||
1872 | Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty); | |||
1873 | /// Same as Instruction::moveBefore. | |||
1874 | void moveBefore(Instruction *Inst, Instruction *Before); | |||
1875 | /// @} | |||
1876 | ||||
1877 | private: | |||
1878 | /// The ordered list of actions made so far. | |||
1879 | SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions; | |||
1880 | typedef SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator CommitPt; | |||
1881 | }; | |||
1882 | ||||
1883 | void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx, | |||
1884 | Value *NewVal) { | |||
1885 | Actions.push_back( | |||
1886 | make_unique<TypePromotionTransaction::OperandSetter>(Inst, Idx, NewVal)); | |||
1887 | } | |||
1888 | ||||
1889 | void TypePromotionTransaction::eraseInstruction(Instruction *Inst, | |||
1890 | Value *NewVal) { | |||
1891 | Actions.push_back( | |||
1892 | make_unique<TypePromotionTransaction::InstructionRemover>(Inst, NewVal)); | |||
1893 | } | |||
1894 | ||||
1895 | void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst, | |||
1896 | Value *New) { | |||
1897 | Actions.push_back(make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New)); | |||
1898 | } | |||
1899 | ||||
1900 | void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) { | |||
1901 | Actions.push_back(make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy)); | |||
1902 | } | |||
1903 | ||||
1904 | Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, | |||
1905 | Type *Ty) { | |||
1906 | std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty)); | |||
1907 | Value *Val = Ptr->getBuiltValue(); | |||
1908 | Actions.push_back(std::move(Ptr)); | |||
1909 | return Val; | |||
1910 | } | |||
1911 | ||||
1912 | Value *TypePromotionTransaction::createSExt(Instruction *Inst, | |||
1913 | Value *Opnd, Type *Ty) { | |||
1914 | std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty)); | |||
1915 | Value *Val = Ptr->getBuiltValue(); | |||
1916 | Actions.push_back(std::move(Ptr)); | |||
1917 | return Val; | |||
1918 | } | |||
1919 | ||||
1920 | Value *TypePromotionTransaction::createZExt(Instruction *Inst, | |||
1921 | Value *Opnd, Type *Ty) { | |||
1922 | std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty)); | |||
1923 | Value *Val = Ptr->getBuiltValue(); | |||
1924 | Actions.push_back(std::move(Ptr)); | |||
1925 | return Val; | |||
1926 | } | |||
1927 | ||||
1928 | void TypePromotionTransaction::moveBefore(Instruction *Inst, | |||
1929 | Instruction *Before) { | |||
1930 | Actions.push_back( | |||
1931 | make_unique<TypePromotionTransaction::InstructionMoveBefore>(Inst, Before)); | |||
1932 | } | |||
1933 | ||||
1934 | TypePromotionTransaction::ConstRestorationPt | |||
1935 | TypePromotionTransaction::getRestorationPoint() const { | |||
1936 | return !Actions.empty() ? Actions.back().get() : nullptr; | |||
1937 | } | |||
1938 | ||||
1939 | void TypePromotionTransaction::commit() { | |||
1940 | for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt; | |||
1941 | ++It) | |||
1942 | (*It)->commit(); | |||
1943 | Actions.clear(); | |||
1944 | } | |||
1945 | ||||
1946 | void TypePromotionTransaction::rollback( | |||
1947 | TypePromotionTransaction::ConstRestorationPt Point) { | |||
1948 | while (!Actions.empty() && Point != Actions.back().get()) { | |||
1949 | std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val(); | |||
1950 | Curr->undo(); | |||
1951 | } | |||
1952 | } | |||
1953 | ||||
1954 | /// \brief A helper class for matching addressing modes. | |||
1955 | /// | |||
1956 | /// This encapsulates the logic for matching the target-legal addressing modes. | |||
1957 | class AddressingModeMatcher { | |||
1958 | SmallVectorImpl<Instruction*> &AddrModeInsts; | |||
1959 | const TargetLowering &TLI; | |||
1960 | ||||
1961 | /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and | |||
1962 | /// the memory instruction that we're computing this address for. | |||
1963 | Type *AccessTy; | |||
1964 | Instruction *MemoryInst; | |||
1965 | ||||
1966 | /// AddrMode - This is the addressing mode that we're building up. This is | |||
1967 | /// part of the return value of this addressing mode matching stuff. | |||
1968 | ExtAddrMode &AddrMode; | |||
1969 | ||||
1970 | /// The truncate instruction inserted by other CodeGenPrepare optimizations. | |||
1971 | const SetOfInstrs &InsertedTruncs; | |||
1972 | /// A map from the instructions to their type before promotion. | |||
1973 | InstrToOrigTy &PromotedInsts; | |||
1974 | /// The ongoing transaction where every action should be registered. | |||
1975 | TypePromotionTransaction &TPT; | |||
1976 | ||||
1977 | /// IgnoreProfitability - This is set to true when we should not do | |||
1978 | /// profitability checks. When true, IsProfitableToFoldIntoAddressingMode | |||
1979 | /// always returns true. | |||
1980 | bool IgnoreProfitability; | |||
1981 | ||||
1982 | AddressingModeMatcher(SmallVectorImpl<Instruction*> &AMI, | |||
1983 | const TargetLowering &T, Type *AT, | |||
1984 | Instruction *MI, ExtAddrMode &AM, | |||
1985 | const SetOfInstrs &InsertedTruncs, | |||
1986 | InstrToOrigTy &PromotedInsts, | |||
1987 | TypePromotionTransaction &TPT) | |||
1988 | : AddrModeInsts(AMI), TLI(T), AccessTy(AT), MemoryInst(MI), AddrMode(AM), | |||
1989 | InsertedTruncs(InsertedTruncs), PromotedInsts(PromotedInsts), TPT(TPT) { | |||
1990 | IgnoreProfitability = false; | |||
1991 | } | |||
1992 | public: | |||
1993 | ||||
1994 | /// Match - Find the maximal addressing mode that a load/store of V can fold, | |||
1995 | /// give an access type of AccessTy. This returns a list of involved | |||
1996 | /// instructions in AddrModeInsts. | |||
1997 | /// \p InsertedTruncs The truncate instruction inserted by other | |||
1998 | /// CodeGenPrepare | |||
1999 | /// optimizations. | |||
2000 | /// \p PromotedInsts maps the instructions to their type before promotion. | |||
2001 | /// \p The ongoing transaction where every action should be registered. | |||
2002 | static ExtAddrMode Match(Value *V, Type *AccessTy, | |||
2003 | Instruction *MemoryInst, | |||
2004 | SmallVectorImpl<Instruction*> &AddrModeInsts, | |||
2005 | const TargetLowering &TLI, | |||
2006 | const SetOfInstrs &InsertedTruncs, | |||
2007 | InstrToOrigTy &PromotedInsts, | |||
2008 | TypePromotionTransaction &TPT) { | |||
2009 | ExtAddrMode Result; | |||
2010 | ||||
2011 | bool Success = AddressingModeMatcher(AddrModeInsts, TLI, AccessTy, | |||
2012 | MemoryInst, Result, InsertedTruncs, | |||
2013 | PromotedInsts, TPT).MatchAddr(V, 0); | |||
2014 | (void)Success; assert(Success && "Couldn't select *anything*?")((Success && "Couldn't select *anything*?") ? static_cast <void> (0) : __assert_fail ("Success && \"Couldn't select *anything*?\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 2014, __PRETTY_FUNCTION__)); | |||
2015 | return Result; | |||
2016 | } | |||
2017 | private: | |||
2018 | bool MatchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); | |||
2019 | bool MatchAddr(Value *V, unsigned Depth); | |||
2020 | bool MatchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth, | |||
2021 | bool *MovedAway = nullptr); | |||
2022 | bool IsProfitableToFoldIntoAddressingMode(Instruction *I, | |||
2023 | ExtAddrMode &AMBefore, | |||
2024 | ExtAddrMode &AMAfter); | |||
2025 | bool ValueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2); | |||
2026 | bool IsPromotionProfitable(unsigned MatchedSize, unsigned SizeWithPromotion, | |||
2027 | Value *PromotedOperand) const; | |||
2028 | }; | |||
2029 | ||||
2030 | /// MatchScaledValue - Try adding ScaleReg*Scale to the current addressing mode. | |||
2031 | /// Return true and update AddrMode if this addr mode is legal for the target, | |||
2032 | /// false if not. | |||
2033 | bool AddressingModeMatcher::MatchScaledValue(Value *ScaleReg, int64_t Scale, | |||
2034 | unsigned Depth) { | |||
2035 | // If Scale is 1, then this is the same as adding ScaleReg to the addressing | |||
2036 | // mode. Just process that directly. | |||
2037 | if (Scale == 1) | |||
2038 | return MatchAddr(ScaleReg, Depth); | |||
2039 | ||||
2040 | // If the scale is 0, it takes nothing to add this. | |||
2041 | if (Scale == 0) | |||
2042 | return true; | |||
2043 | ||||
2044 | // If we already have a scale of this value, we can add to it, otherwise, we | |||
2045 | // need an available scale field. | |||
2046 | if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) | |||
2047 | return false; | |||
2048 | ||||
2049 | ExtAddrMode TestAddrMode = AddrMode; | |||
2050 | ||||
2051 | // Add scale to turn X*4+X*3 -> X*7. This could also do things like | |||
2052 | // [A+B + A*7] -> [B+A*8]. | |||
2053 | TestAddrMode.Scale += Scale; | |||
2054 | TestAddrMode.ScaledReg = ScaleReg; | |||
2055 | ||||
2056 | // If the new address isn't legal, bail out. | |||
2057 | if (!TLI.isLegalAddressingMode(TestAddrMode, AccessTy)) | |||
2058 | return false; | |||
2059 | ||||
2060 | // It was legal, so commit it. | |||
2061 | AddrMode = TestAddrMode; | |||
2062 | ||||
2063 | // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now | |||
2064 | // to see if ScaleReg is actually X+C. If so, we can turn this into adding | |||
2065 | // X*Scale + C*Scale to addr mode. | |||
2066 | ConstantInt *CI = nullptr; Value *AddLHS = nullptr; | |||
2067 | if (isa<Instruction>(ScaleReg) && // not a constant expr. | |||
2068 | match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) { | |||
2069 | TestAddrMode.ScaledReg = AddLHS; | |||
2070 | TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale; | |||
2071 | ||||
2072 | // If this addressing mode is legal, commit it and remember that we folded | |||
2073 | // this instruction. | |||
2074 | if (TLI.isLegalAddressingMode(TestAddrMode, AccessTy)) { | |||
2075 | AddrModeInsts.push_back(cast<Instruction>(ScaleReg)); | |||
2076 | AddrMode = TestAddrMode; | |||
2077 | return true; | |||
2078 | } | |||
2079 | } | |||
2080 | ||||
2081 | // Otherwise, not (x+c)*scale, just return what we have. | |||
2082 | return true; | |||
2083 | } | |||
2084 | ||||
2085 | /// MightBeFoldableInst - This is a little filter, which returns true if an | |||
2086 | /// addressing computation involving I might be folded into a load/store | |||
2087 | /// accessing it. This doesn't need to be perfect, but needs to accept at least | |||
2088 | /// the set of instructions that MatchOperationAddr can. | |||
2089 | static bool MightBeFoldableInst(Instruction *I) { | |||
2090 | switch (I->getOpcode()) { | |||
2091 | case Instruction::BitCast: | |||
2092 | case Instruction::AddrSpaceCast: | |||
2093 | // Don't touch identity bitcasts. | |||
2094 | if (I->getType() == I->getOperand(0)->getType()) | |||
2095 | return false; | |||
2096 | return I->getType()->isPointerTy() || I->getType()->isIntegerTy(); | |||
2097 | case Instruction::PtrToInt: | |||
2098 | // PtrToInt is always a noop, as we know that the int type is pointer sized. | |||
2099 | return true; | |||
2100 | case Instruction::IntToPtr: | |||
2101 | // We know the input is intptr_t, so this is foldable. | |||
2102 | return true; | |||
2103 | case Instruction::Add: | |||
2104 | return true; | |||
2105 | case Instruction::Mul: | |||
2106 | case Instruction::Shl: | |||
2107 | // Can only handle X*C and X << C. | |||
2108 | return isa<ConstantInt>(I->getOperand(1)); | |||
2109 | case Instruction::GetElementPtr: | |||
2110 | return true; | |||
2111 | default: | |||
2112 | return false; | |||
2113 | } | |||
2114 | } | |||
2115 | ||||
2116 | /// \brief Check whether or not \p Val is a legal instruction for \p TLI. | |||
2117 | /// \note \p Val is assumed to be the product of some type promotion. | |||
2118 | /// Therefore if \p Val has an undefined state in \p TLI, this is assumed | |||
2119 | /// to be legal, as the non-promoted value would have had the same state. | |||
2120 | static bool isPromotedInstructionLegal(const TargetLowering &TLI, Value *Val) { | |||
2121 | Instruction *PromotedInst = dyn_cast<Instruction>(Val); | |||
2122 | if (!PromotedInst) | |||
2123 | return false; | |||
2124 | int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode()); | |||
2125 | // If the ISDOpcode is undefined, it was undefined before the promotion. | |||
2126 | if (!ISDOpcode) | |||
2127 | return true; | |||
2128 | // Otherwise, check if the promoted instruction is legal or not. | |||
2129 | return TLI.isOperationLegalOrCustom( | |||
2130 | ISDOpcode, TLI.getValueType(PromotedInst->getType())); | |||
2131 | } | |||
2132 | ||||
2133 | /// \brief Hepler class to perform type promotion. | |||
2134 | class TypePromotionHelper { | |||
2135 | /// \brief Utility function to check whether or not a sign or zero extension | |||
2136 | /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by | |||
2137 | /// either using the operands of \p Inst or promoting \p Inst. | |||
2138 | /// The type of the extension is defined by \p IsSExt. | |||
2139 | /// In other words, check if: | |||
2140 | /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType. | |||
2141 | /// #1 Promotion applies: | |||
2142 | /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...). | |||
2143 | /// #2 Operand reuses: | |||
2144 | /// ext opnd1 to ConsideredExtType. | |||
2145 | /// \p PromotedInsts maps the instructions to their type before promotion. | |||
2146 | static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType, | |||
2147 | const InstrToOrigTy &PromotedInsts, bool IsSExt); | |||
2148 | ||||
2149 | /// \brief Utility function to determine if \p OpIdx should be promoted when | |||
2150 | /// promoting \p Inst. | |||
2151 | static bool shouldExtOperand(const Instruction *Inst, int OpIdx) { | |||
2152 | if (isa<SelectInst>(Inst) && OpIdx == 0) | |||
2153 | return false; | |||
2154 | return true; | |||
2155 | } | |||
2156 | ||||
2157 | /// \brief Utility function to promote the operand of \p Ext when this | |||
2158 | /// operand is a promotable trunc or sext or zext. | |||
2159 | /// \p PromotedInsts maps the instructions to their type before promotion. | |||
2160 | /// \p CreatedInsts[out] contains how many non-free instructions have been | |||
2161 | /// created to promote the operand of Ext. | |||
2162 | /// Newly added extensions are inserted in \p Exts. | |||
2163 | /// Newly added truncates are inserted in \p Truncs. | |||
2164 | /// Should never be called directly. | |||
2165 | /// \return The promoted value which is used instead of Ext. | |||
2166 | static Value *promoteOperandForTruncAndAnyExt( | |||
2167 | Instruction *Ext, TypePromotionTransaction &TPT, | |||
2168 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInsts, | |||
2169 | SmallVectorImpl<Instruction *> *Exts, | |||
2170 | SmallVectorImpl<Instruction *> *Truncs); | |||
2171 | ||||
2172 | /// \brief Utility function to promote the operand of \p Ext when this | |||
2173 | /// operand is promotable and is not a supported trunc or sext. | |||
2174 | /// \p PromotedInsts maps the instructions to their type before promotion. | |||
2175 | /// \p CreatedInsts[out] contains how many non-free instructions have been | |||
2176 | /// created to promote the operand of Ext. | |||
2177 | /// Newly added extensions are inserted in \p Exts. | |||
2178 | /// Newly added truncates are inserted in \p Truncs. | |||
2179 | /// Should never be called directly. | |||
2180 | /// \return The promoted value which is used instead of Ext. | |||
2181 | static Value * | |||
2182 | promoteOperandForOther(Instruction *Ext, TypePromotionTransaction &TPT, | |||
2183 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInsts, | |||
2184 | SmallVectorImpl<Instruction *> *Exts, | |||
2185 | SmallVectorImpl<Instruction *> *Truncs, bool IsSExt); | |||
2186 | ||||
2187 | /// \see promoteOperandForOther. | |||
2188 | static Value * | |||
2189 | signExtendOperandForOther(Instruction *Ext, TypePromotionTransaction &TPT, | |||
2190 | InstrToOrigTy &PromotedInsts, | |||
2191 | unsigned &CreatedInsts, | |||
2192 | SmallVectorImpl<Instruction *> *Exts, | |||
2193 | SmallVectorImpl<Instruction *> *Truncs) { | |||
2194 | return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInsts, Exts, | |||
2195 | Truncs, true); | |||
2196 | } | |||
2197 | ||||
2198 | /// \see promoteOperandForOther. | |||
2199 | static Value * | |||
2200 | zeroExtendOperandForOther(Instruction *Ext, TypePromotionTransaction &TPT, | |||
2201 | InstrToOrigTy &PromotedInsts, | |||
2202 | unsigned &CreatedInsts, | |||
2203 | SmallVectorImpl<Instruction *> *Exts, | |||
2204 | SmallVectorImpl<Instruction *> *Truncs) { | |||
2205 | return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInsts, Exts, | |||
2206 | Truncs, false); | |||
2207 | } | |||
2208 | ||||
2209 | public: | |||
2210 | /// Type for the utility function that promotes the operand of Ext. | |||
2211 | typedef Value *(*Action)(Instruction *Ext, TypePromotionTransaction &TPT, | |||
2212 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInsts, | |||
2213 | SmallVectorImpl<Instruction *> *Exts, | |||
2214 | SmallVectorImpl<Instruction *> *Truncs); | |||
2215 | /// \brief Given a sign/zero extend instruction \p Ext, return the approriate | |||
2216 | /// action to promote the operand of \p Ext instead of using Ext. | |||
2217 | /// \return NULL if no promotable action is possible with the current | |||
2218 | /// sign extension. | |||
2219 | /// \p InsertedTruncs keeps track of all the truncate instructions inserted by | |||
2220 | /// the others CodeGenPrepare optimizations. This information is important | |||
2221 | /// because we do not want to promote these instructions as CodeGenPrepare | |||
2222 | /// will reinsert them later. Thus creating an infinite loop: create/remove. | |||
2223 | /// \p PromotedInsts maps the instructions to their type before promotion. | |||
2224 | static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedTruncs, | |||
2225 | const TargetLowering &TLI, | |||
2226 | const InstrToOrigTy &PromotedInsts); | |||
2227 | }; | |||
2228 | ||||
2229 | bool TypePromotionHelper::canGetThrough(const Instruction *Inst, | |||
2230 | Type *ConsideredExtType, | |||
2231 | const InstrToOrigTy &PromotedInsts, | |||
2232 | bool IsSExt) { | |||
2233 | // The promotion helper does not know how to deal with vector types yet. | |||
2234 | // To be able to fix that, we would need to fix the places where we | |||
2235 | // statically extend, e.g., constants and such. | |||
2236 | if (Inst->getType()->isVectorTy()) | |||
2237 | return false; | |||
2238 | ||||
2239 | // We can always get through zext. | |||
2240 | if (isa<ZExtInst>(Inst)) | |||
2241 | return true; | |||
2242 | ||||
2243 | // sext(sext) is ok too. | |||
2244 | if (IsSExt && isa<SExtInst>(Inst)) | |||
2245 | return true; | |||
2246 | ||||
2247 | // We can get through binary operator, if it is legal. In other words, the | |||
2248 | // binary operator must have a nuw or nsw flag. | |||
2249 | const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst); | |||
2250 | if (BinOp && isa<OverflowingBinaryOperator>(BinOp) && | |||
2251 | ((!IsSExt && BinOp->hasNoUnsignedWrap()) || | |||
2252 | (IsSExt && BinOp->hasNoSignedWrap()))) | |||
2253 | return true; | |||
2254 | ||||
2255 | // Check if we can do the following simplification. | |||
2256 | // ext(trunc(opnd)) --> ext(opnd) | |||
2257 | if (!isa<TruncInst>(Inst)) | |||
2258 | return false; | |||
2259 | ||||
2260 | Value *OpndVal = Inst->getOperand(0); | |||
2261 | // Check if we can use this operand in the extension. | |||
2262 | // If the type is larger than the result type of the extension, | |||
2263 | // we cannot. | |||
2264 | if (!OpndVal->getType()->isIntegerTy() || | |||
2265 | OpndVal->getType()->getIntegerBitWidth() > | |||
2266 | ConsideredExtType->getIntegerBitWidth()) | |||
2267 | return false; | |||
2268 | ||||
2269 | // If the operand of the truncate is not an instruction, we will not have | |||
2270 | // any information on the dropped bits. | |||
2271 | // (Actually we could for constant but it is not worth the extra logic). | |||
2272 | Instruction *Opnd = dyn_cast<Instruction>(OpndVal); | |||
2273 | if (!Opnd) | |||
2274 | return false; | |||
2275 | ||||
2276 | // Check if the source of the type is narrow enough. | |||
2277 | // I.e., check that trunc just drops extended bits of the same kind of | |||
2278 | // the extension. | |||
2279 | // #1 get the type of the operand and check the kind of the extended bits. | |||
2280 | const Type *OpndType; | |||
2281 | InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd); | |||
2282 | if (It != PromotedInsts.end() && It->second.IsSExt == IsSExt) | |||
2283 | OpndType = It->second.Ty; | |||
2284 | else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd))) | |||
2285 | OpndType = Opnd->getOperand(0)->getType(); | |||
2286 | else | |||
2287 | return false; | |||
2288 | ||||
2289 | // #2 check that the truncate just drop extended bits. | |||
2290 | if (Inst->getType()->getIntegerBitWidth() >= OpndType->getIntegerBitWidth()) | |||
2291 | return true; | |||
2292 | ||||
2293 | return false; | |||
2294 | } | |||
2295 | ||||
2296 | TypePromotionHelper::Action TypePromotionHelper::getAction( | |||
2297 | Instruction *Ext, const SetOfInstrs &InsertedTruncs, | |||
2298 | const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) { | |||
2299 | assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&(((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && "Unexpected instruction type") ? static_cast<void> (0) : __assert_fail ("(isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && \"Unexpected instruction type\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 2300, __PRETTY_FUNCTION__)) | |||
2300 | "Unexpected instruction type")(((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && "Unexpected instruction type") ? static_cast<void> (0) : __assert_fail ("(isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && \"Unexpected instruction type\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 2300, __PRETTY_FUNCTION__)); | |||
2301 | Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0)); | |||
2302 | Type *ExtTy = Ext->getType(); | |||
2303 | bool IsSExt = isa<SExtInst>(Ext); | |||
2304 | // If the operand of the extension is not an instruction, we cannot | |||
2305 | // get through. | |||
2306 | // If it, check we can get through. | |||
2307 | if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt)) | |||
2308 | return nullptr; | |||
2309 | ||||
2310 | // Do not promote if the operand has been added by codegenprepare. | |||
2311 | // Otherwise, it means we are undoing an optimization that is likely to be | |||
2312 | // redone, thus causing potential infinite loop. | |||
2313 | if (isa<TruncInst>(ExtOpnd) && InsertedTruncs.count(ExtOpnd)) | |||
2314 | return nullptr; | |||
2315 | ||||
2316 | // SExt or Trunc instructions. | |||
2317 | // Return the related handler. | |||
2318 | if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) || | |||
2319 | isa<ZExtInst>(ExtOpnd)) | |||
2320 | return promoteOperandForTruncAndAnyExt; | |||
2321 | ||||
2322 | // Regular instruction. | |||
2323 | // Abort early if we will have to insert non-free instructions. | |||
2324 | if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType())) | |||
2325 | return nullptr; | |||
2326 | return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther; | |||
2327 | } | |||
2328 | ||||
2329 | Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt( | |||
2330 | llvm::Instruction *SExt, TypePromotionTransaction &TPT, | |||
2331 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInsts, | |||
2332 | SmallVectorImpl<Instruction *> *Exts, | |||
2333 | SmallVectorImpl<Instruction *> *Truncs) { | |||
2334 | // By construction, the operand of SExt is an instruction. Otherwise we cannot | |||
2335 | // get through it and this method should not be called. | |||
2336 | Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); | |||
2337 | Value *ExtVal = SExt; | |||
2338 | if (isa<ZExtInst>(SExtOpnd)) { | |||
2339 | // Replace s|zext(zext(opnd)) | |||
2340 | // => zext(opnd). | |||
2341 | Value *ZExt = | |||
2342 | TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType()); | |||
2343 | TPT.replaceAllUsesWith(SExt, ZExt); | |||
2344 | TPT.eraseInstruction(SExt); | |||
2345 | ExtVal = ZExt; | |||
2346 | } else { | |||
2347 | // Replace z|sext(trunc(opnd)) or sext(sext(opnd)) | |||
2348 | // => z|sext(opnd). | |||
2349 | TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0)); | |||
2350 | } | |||
2351 | CreatedInsts = 0; | |||
2352 | ||||
2353 | // Remove dead code. | |||
2354 | if (SExtOpnd->use_empty()) | |||
2355 | TPT.eraseInstruction(SExtOpnd); | |||
2356 | ||||
2357 | // Check if the extension is still needed. | |||
2358 | Instruction *ExtInst = dyn_cast<Instruction>(ExtVal); | |||
2359 | if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) { | |||
2360 | if (ExtInst && Exts) | |||
2361 | Exts->push_back(ExtInst); | |||
2362 | return ExtVal; | |||
2363 | } | |||
2364 | ||||
2365 | // At this point we have: ext ty opnd to ty. | |||
2366 | // Reassign the uses of ExtInst to the opnd and remove ExtInst. | |||
2367 | Value *NextVal = ExtInst->getOperand(0); | |||
2368 | TPT.eraseInstruction(ExtInst, NextVal); | |||
2369 | return NextVal; | |||
2370 | } | |||
2371 | ||||
2372 | Value *TypePromotionHelper::promoteOperandForOther( | |||
2373 | Instruction *Ext, TypePromotionTransaction &TPT, | |||
2374 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInsts, | |||
2375 | SmallVectorImpl<Instruction *> *Exts, | |||
2376 | SmallVectorImpl<Instruction *> *Truncs, bool IsSExt) { | |||
2377 | // By construction, the operand of Ext is an instruction. Otherwise we cannot | |||
2378 | // get through it and this method should not be called. | |||
2379 | Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0)); | |||
2380 | CreatedInsts = 0; | |||
2381 | if (!ExtOpnd->hasOneUse()) { | |||
2382 | // ExtOpnd will be promoted. | |||
2383 | // All its uses, but Ext, will need to use a truncated value of the | |||
2384 | // promoted version. | |||
2385 | // Create the truncate now. | |||
2386 | Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType()); | |||
2387 | if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) { | |||
2388 | ITrunc->removeFromParent(); | |||
2389 | // Insert it just after the definition. | |||
2390 | ITrunc->insertAfter(ExtOpnd); | |||
2391 | if (Truncs) | |||
2392 | Truncs->push_back(ITrunc); | |||
2393 | } | |||
2394 | ||||
2395 | TPT.replaceAllUsesWith(ExtOpnd, Trunc); | |||
2396 | // Restore the operand of Ext (which has been replace by the previous call | |||
2397 | // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext. | |||
2398 | TPT.setOperand(Ext, 0, ExtOpnd); | |||
2399 | } | |||
2400 | ||||
2401 | // Get through the Instruction: | |||
2402 | // 1. Update its type. | |||
2403 | // 2. Replace the uses of Ext by Inst. | |||
2404 | // 3. Extend each operand that needs to be extended. | |||
2405 | ||||
2406 | // Remember the original type of the instruction before promotion. | |||
2407 | // This is useful to know that the high bits are sign extended bits. | |||
2408 | PromotedInsts.insert(std::pair<Instruction *, TypeIsSExt>( | |||
2409 | ExtOpnd, TypeIsSExt(ExtOpnd->getType(), IsSExt))); | |||
2410 | // Step #1. | |||
2411 | TPT.mutateType(ExtOpnd, Ext->getType()); | |||
2412 | // Step #2. | |||
2413 | TPT.replaceAllUsesWith(Ext, ExtOpnd); | |||
2414 | // Step #3. | |||
2415 | Instruction *ExtForOpnd = Ext; | |||
2416 | ||||
2417 | DEBUG(dbgs() << "Propagate Ext to operands\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Propagate Ext to operands\n" ; } } while (0); | |||
2418 | for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx; | |||
2419 | ++OpIdx) { | |||
2420 | DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Operand:\n" << * (ExtOpnd->getOperand(OpIdx)) << '\n'; } } while (0); | |||
2421 | if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() || | |||
2422 | !shouldExtOperand(ExtOpnd, OpIdx)) { | |||
2423 | DEBUG(dbgs() << "No need to propagate\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "No need to propagate\n" ; } } while (0); | |||
2424 | continue; | |||
2425 | } | |||
2426 | // Check if we can statically extend the operand. | |||
2427 | Value *Opnd = ExtOpnd->getOperand(OpIdx); | |||
2428 | if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) { | |||
2429 | DEBUG(dbgs() << "Statically extend\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Statically extend\n"; } } while (0); | |||
2430 | unsigned BitWidth = Ext->getType()->getIntegerBitWidth(); | |||
2431 | APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth) | |||
2432 | : Cst->getValue().zext(BitWidth); | |||
2433 | TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal)); | |||
2434 | continue; | |||
2435 | } | |||
2436 | // UndefValue are typed, so we have to statically sign extend them. | |||
2437 | if (isa<UndefValue>(Opnd)) { | |||
2438 | DEBUG(dbgs() << "Statically extend\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Statically extend\n"; } } while (0); | |||
2439 | TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType())); | |||
2440 | continue; | |||
2441 | } | |||
2442 | ||||
2443 | // Otherwise we have to explicity sign extend the operand. | |||
2444 | // Check if Ext was reused to extend an operand. | |||
2445 | if (!ExtForOpnd) { | |||
2446 | // If yes, create a new one. | |||
2447 | DEBUG(dbgs() << "More operands to ext\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "More operands to ext\n" ; } } while (0); | |||
2448 | Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType()) | |||
2449 | : TPT.createZExt(Ext, Opnd, Ext->getType()); | |||
2450 | if (!isa<Instruction>(ValForExtOpnd)) { | |||
2451 | TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd); | |||
2452 | continue; | |||
2453 | } | |||
2454 | ExtForOpnd = cast<Instruction>(ValForExtOpnd); | |||
2455 | ++CreatedInsts; | |||
2456 | } | |||
2457 | if (Exts) | |||
2458 | Exts->push_back(ExtForOpnd); | |||
2459 | TPT.setOperand(ExtForOpnd, 0, Opnd); | |||
2460 | ||||
2461 | // Move the sign extension before the insertion point. | |||
2462 | TPT.moveBefore(ExtForOpnd, ExtOpnd); | |||
2463 | TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd); | |||
2464 | // If more sext are required, new instructions will have to be created. | |||
2465 | ExtForOpnd = nullptr; | |||
2466 | } | |||
2467 | if (ExtForOpnd == Ext) { | |||
2468 | DEBUG(dbgs() << "Extension is useless now\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Extension is useless now\n" ; } } while (0); | |||
2469 | TPT.eraseInstruction(Ext); | |||
2470 | } | |||
2471 | return ExtOpnd; | |||
2472 | } | |||
2473 | ||||
2474 | /// IsPromotionProfitable - Check whether or not promoting an instruction | |||
2475 | /// to a wider type was profitable. | |||
2476 | /// \p MatchedSize gives the number of instructions that have been matched | |||
2477 | /// in the addressing mode after the promotion was applied. | |||
2478 | /// \p SizeWithPromotion gives the number of created instructions for | |||
2479 | /// the promotion plus the number of instructions that have been | |||
2480 | /// matched in the addressing mode before the promotion. | |||
2481 | /// \p PromotedOperand is the value that has been promoted. | |||
2482 | /// \return True if the promotion is profitable, false otherwise. | |||
2483 | bool | |||
2484 | AddressingModeMatcher::IsPromotionProfitable(unsigned MatchedSize, | |||
2485 | unsigned SizeWithPromotion, | |||
2486 | Value *PromotedOperand) const { | |||
2487 | // We folded less instructions than what we created to promote the operand. | |||
2488 | // This is not profitable. | |||
2489 | if (MatchedSize < SizeWithPromotion) | |||
2490 | return false; | |||
2491 | if (MatchedSize > SizeWithPromotion) | |||
2492 | return true; | |||
2493 | // The promotion is neutral but it may help folding the sign extension in | |||
2494 | // loads for instance. | |||
2495 | // Check that we did not create an illegal instruction. | |||
2496 | return isPromotedInstructionLegal(TLI, PromotedOperand); | |||
2497 | } | |||
2498 | ||||
2499 | /// MatchOperationAddr - Given an instruction or constant expr, see if we can | |||
2500 | /// fold the operation into the addressing mode. If so, update the addressing | |||
2501 | /// mode and return true, otherwise return false without modifying AddrMode. | |||
2502 | /// If \p MovedAway is not NULL, it contains the information of whether or | |||
2503 | /// not AddrInst has to be folded into the addressing mode on success. | |||
2504 | /// If \p MovedAway == true, \p AddrInst will not be part of the addressing | |||
2505 | /// because it has been moved away. | |||
2506 | /// Thus AddrInst must not be added in the matched instructions. | |||
2507 | /// This state can happen when AddrInst is a sext, since it may be moved away. | |||
2508 | /// Therefore, AddrInst may not be valid when MovedAway is true and it must | |||
2509 | /// not be referenced anymore. | |||
2510 | bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode, | |||
2511 | unsigned Depth, | |||
2512 | bool *MovedAway) { | |||
2513 | // Avoid exponential behavior on extremely deep expression trees. | |||
2514 | if (Depth >= 5) return false; | |||
2515 | ||||
2516 | // By default, all matched instructions stay in place. | |||
2517 | if (MovedAway) | |||
2518 | *MovedAway = false; | |||
2519 | ||||
2520 | switch (Opcode) { | |||
2521 | case Instruction::PtrToInt: | |||
2522 | // PtrToInt is always a noop, as we know that the int type is pointer sized. | |||
2523 | return MatchAddr(AddrInst->getOperand(0), Depth); | |||
2524 | case Instruction::IntToPtr: | |||
2525 | // This inttoptr is a no-op if the integer type is pointer sized. | |||
2526 | if (TLI.getValueType(AddrInst->getOperand(0)->getType()) == | |||
2527 | TLI.getPointerTy(AddrInst->getType()->getPointerAddressSpace())) | |||
2528 | return MatchAddr(AddrInst->getOperand(0), Depth); | |||
2529 | return false; | |||
2530 | case Instruction::BitCast: | |||
2531 | case Instruction::AddrSpaceCast: | |||
2532 | // BitCast is always a noop, and we can handle it as long as it is | |||
2533 | // int->int or pointer->pointer (we don't want int<->fp or something). | |||
2534 | if ((AddrInst->getOperand(0)->getType()->isPointerTy() || | |||
2535 | AddrInst->getOperand(0)->getType()->isIntegerTy()) && | |||
2536 | // Don't touch identity bitcasts. These were probably put here by LSR, | |||
2537 | // and we don't want to mess around with them. Assume it knows what it | |||
2538 | // is doing. | |||
2539 | AddrInst->getOperand(0)->getType() != AddrInst->getType()) | |||
2540 | return MatchAddr(AddrInst->getOperand(0), Depth); | |||
2541 | return false; | |||
2542 | case Instruction::Add: { | |||
2543 | // Check to see if we can merge in the RHS then the LHS. If so, we win. | |||
2544 | ExtAddrMode BackupAddrMode = AddrMode; | |||
2545 | unsigned OldSize = AddrModeInsts.size(); | |||
2546 | // Start a transaction at this point. | |||
2547 | // The LHS may match but not the RHS. | |||
2548 | // Therefore, we need a higher level restoration point to undo partially | |||
2549 | // matched operation. | |||
2550 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
2551 | TPT.getRestorationPoint(); | |||
2552 | ||||
2553 | if (MatchAddr(AddrInst->getOperand(1), Depth+1) && | |||
2554 | MatchAddr(AddrInst->getOperand(0), Depth+1)) | |||
2555 | return true; | |||
2556 | ||||
2557 | // Restore the old addr mode info. | |||
2558 | AddrMode = BackupAddrMode; | |||
2559 | AddrModeInsts.resize(OldSize); | |||
2560 | TPT.rollback(LastKnownGood); | |||
2561 | ||||
2562 | // Otherwise this was over-aggressive. Try merging in the LHS then the RHS. | |||
2563 | if (MatchAddr(AddrInst->getOperand(0), Depth+1) && | |||
2564 | MatchAddr(AddrInst->getOperand(1), Depth+1)) | |||
2565 | return true; | |||
2566 | ||||
2567 | // Otherwise we definitely can't merge the ADD in. | |||
2568 | AddrMode = BackupAddrMode; | |||
2569 | AddrModeInsts.resize(OldSize); | |||
2570 | TPT.rollback(LastKnownGood); | |||
2571 | break; | |||
2572 | } | |||
2573 | //case Instruction::Or: | |||
2574 | // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. | |||
2575 | //break; | |||
2576 | case Instruction::Mul: | |||
2577 | case Instruction::Shl: { | |||
2578 | // Can only handle X*C and X << C. | |||
2579 | ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1)); | |||
2580 | if (!RHS) | |||
2581 | return false; | |||
2582 | int64_t Scale = RHS->getSExtValue(); | |||
2583 | if (Opcode == Instruction::Shl) | |||
2584 | Scale = 1LL << Scale; | |||
2585 | ||||
2586 | return MatchScaledValue(AddrInst->getOperand(0), Scale, Depth); | |||
2587 | } | |||
2588 | case Instruction::GetElementPtr: { | |||
2589 | // Scan the GEP. We check it if it contains constant offsets and at most | |||
2590 | // one variable offset. | |||
2591 | int VariableOperand = -1; | |||
2592 | unsigned VariableScale = 0; | |||
2593 | ||||
2594 | int64_t ConstantOffset = 0; | |||
2595 | const DataLayout *TD = TLI.getDataLayout(); | |||
2596 | gep_type_iterator GTI = gep_type_begin(AddrInst); | |||
2597 | for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { | |||
2598 | if (StructType *STy = dyn_cast<StructType>(*GTI)) { | |||
2599 | const StructLayout *SL = TD->getStructLayout(STy); | |||
2600 | unsigned Idx = | |||
2601 | cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue(); | |||
2602 | ConstantOffset += SL->getElementOffset(Idx); | |||
2603 | } else { | |||
2604 | uint64_t TypeSize = TD->getTypeAllocSize(GTI.getIndexedType()); | |||
2605 | if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) { | |||
2606 | ConstantOffset += CI->getSExtValue()*TypeSize; | |||
2607 | } else if (TypeSize) { // Scales of zero don't do anything. | |||
2608 | // We only allow one variable index at the moment. | |||
2609 | if (VariableOperand != -1) | |||
2610 | return false; | |||
2611 | ||||
2612 | // Remember the variable index. | |||
2613 | VariableOperand = i; | |||
2614 | VariableScale = TypeSize; | |||
2615 | } | |||
2616 | } | |||
2617 | } | |||
2618 | ||||
2619 | // A common case is for the GEP to only do a constant offset. In this case, | |||
2620 | // just add it to the disp field and check validity. | |||
2621 | if (VariableOperand == -1) { | |||
2622 | AddrMode.BaseOffs += ConstantOffset; | |||
2623 | if (ConstantOffset == 0 || TLI.isLegalAddressingMode(AddrMode, AccessTy)){ | |||
2624 | // Check to see if we can fold the base pointer in too. | |||
2625 | if (MatchAddr(AddrInst->getOperand(0), Depth+1)) | |||
2626 | return true; | |||
2627 | } | |||
2628 | AddrMode.BaseOffs -= ConstantOffset; | |||
2629 | return false; | |||
2630 | } | |||
2631 | ||||
2632 | // Save the valid addressing mode in case we can't match. | |||
2633 | ExtAddrMode BackupAddrMode = AddrMode; | |||
2634 | unsigned OldSize = AddrModeInsts.size(); | |||
2635 | ||||
2636 | // See if the scale and offset amount is valid for this target. | |||
2637 | AddrMode.BaseOffs += ConstantOffset; | |||
2638 | ||||
2639 | // Match the base operand of the GEP. | |||
2640 | if (!MatchAddr(AddrInst->getOperand(0), Depth+1)) { | |||
2641 | // If it couldn't be matched, just stuff the value in a register. | |||
2642 | if (AddrMode.HasBaseReg) { | |||
2643 | AddrMode = BackupAddrMode; | |||
2644 | AddrModeInsts.resize(OldSize); | |||
2645 | return false; | |||
2646 | } | |||
2647 | AddrMode.HasBaseReg = true; | |||
2648 | AddrMode.BaseReg = AddrInst->getOperand(0); | |||
2649 | } | |||
2650 | ||||
2651 | // Match the remaining variable portion of the GEP. | |||
2652 | if (!MatchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale, | |||
2653 | Depth)) { | |||
2654 | // If it couldn't be matched, try stuffing the base into a register | |||
2655 | // instead of matching it, and retrying the match of the scale. | |||
2656 | AddrMode = BackupAddrMode; | |||
2657 | AddrModeInsts.resize(OldSize); | |||
2658 | if (AddrMode.HasBaseReg) | |||
2659 | return false; | |||
2660 | AddrMode.HasBaseReg = true; | |||
2661 | AddrMode.BaseReg = AddrInst->getOperand(0); | |||
2662 | AddrMode.BaseOffs += ConstantOffset; | |||
2663 | if (!MatchScaledValue(AddrInst->getOperand(VariableOperand), | |||
2664 | VariableScale, Depth)) { | |||
2665 | // If even that didn't work, bail. | |||
2666 | AddrMode = BackupAddrMode; | |||
2667 | AddrModeInsts.resize(OldSize); | |||
2668 | return false; | |||
2669 | } | |||
2670 | } | |||
2671 | ||||
2672 | return true; | |||
2673 | } | |||
2674 | case Instruction::SExt: | |||
2675 | case Instruction::ZExt: { | |||
2676 | Instruction *Ext = dyn_cast<Instruction>(AddrInst); | |||
2677 | if (!Ext) | |||
2678 | return false; | |||
2679 | ||||
2680 | // Try to move this ext out of the way of the addressing mode. | |||
2681 | // Ask for a method for doing so. | |||
2682 | TypePromotionHelper::Action TPH = | |||
2683 | TypePromotionHelper::getAction(Ext, InsertedTruncs, TLI, PromotedInsts); | |||
2684 | if (!TPH) | |||
2685 | return false; | |||
2686 | ||||
2687 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
2688 | TPT.getRestorationPoint(); | |||
2689 | unsigned CreatedInsts = 0; | |||
2690 | Value *PromotedOperand = | |||
2691 | TPH(Ext, TPT, PromotedInsts, CreatedInsts, nullptr, nullptr); | |||
2692 | // SExt has been moved away. | |||
2693 | // Thus either it will be rematched later in the recursive calls or it is | |||
2694 | // gone. Anyway, we must not fold it into the addressing mode at this point. | |||
2695 | // E.g., | |||
2696 | // op = add opnd, 1 | |||
2697 | // idx = ext op | |||
2698 | // addr = gep base, idx | |||
2699 | // is now: | |||
2700 | // promotedOpnd = ext opnd <- no match here | |||
2701 | // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls) | |||
2702 | // addr = gep base, op <- match | |||
2703 | if (MovedAway) | |||
2704 | *MovedAway = true; | |||
2705 | ||||
2706 | assert(PromotedOperand &&((PromotedOperand && "TypePromotionHelper should have filtered out those cases" ) ? static_cast<void> (0) : __assert_fail ("PromotedOperand && \"TypePromotionHelper should have filtered out those cases\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 2707, __PRETTY_FUNCTION__)) | |||
2707 | "TypePromotionHelper should have filtered out those cases")((PromotedOperand && "TypePromotionHelper should have filtered out those cases" ) ? static_cast<void> (0) : __assert_fail ("PromotedOperand && \"TypePromotionHelper should have filtered out those cases\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 2707, __PRETTY_FUNCTION__)); | |||
2708 | ||||
2709 | ExtAddrMode BackupAddrMode = AddrMode; | |||
2710 | unsigned OldSize = AddrModeInsts.size(); | |||
2711 | ||||
2712 | if (!MatchAddr(PromotedOperand, Depth) || | |||
2713 | !IsPromotionProfitable(AddrModeInsts.size(), OldSize + CreatedInsts, | |||
2714 | PromotedOperand)) { | |||
2715 | AddrMode = BackupAddrMode; | |||
2716 | AddrModeInsts.resize(OldSize); | |||
2717 | DEBUG(dbgs() << "Sign extension does not pay off: rollback\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Sign extension does not pay off: rollback\n" ; } } while (0); | |||
2718 | TPT.rollback(LastKnownGood); | |||
2719 | return false; | |||
2720 | } | |||
2721 | return true; | |||
2722 | } | |||
2723 | } | |||
2724 | return false; | |||
2725 | } | |||
2726 | ||||
2727 | /// MatchAddr - If we can, try to add the value of 'Addr' into the current | |||
2728 | /// addressing mode. If Addr can't be added to AddrMode this returns false and | |||
2729 | /// leaves AddrMode unmodified. This assumes that Addr is either a pointer type | |||
2730 | /// or intptr_t for the target. | |||
2731 | /// | |||
2732 | bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) { | |||
2733 | // Start a transaction at this point that we will rollback if the matching | |||
2734 | // fails. | |||
2735 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
2736 | TPT.getRestorationPoint(); | |||
2737 | if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) { | |||
2738 | // Fold in immediates if legal for the target. | |||
2739 | AddrMode.BaseOffs += CI->getSExtValue(); | |||
2740 | if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) | |||
2741 | return true; | |||
2742 | AddrMode.BaseOffs -= CI->getSExtValue(); | |||
2743 | } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) { | |||
2744 | // If this is a global variable, try to fold it into the addressing mode. | |||
2745 | if (!AddrMode.BaseGV) { | |||
2746 | AddrMode.BaseGV = GV; | |||
2747 | if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) | |||
2748 | return true; | |||
2749 | AddrMode.BaseGV = nullptr; | |||
2750 | } | |||
2751 | } else if (Instruction *I = dyn_cast<Instruction>(Addr)) { | |||
2752 | ExtAddrMode BackupAddrMode = AddrMode; | |||
2753 | unsigned OldSize = AddrModeInsts.size(); | |||
2754 | ||||
2755 | // Check to see if it is possible to fold this operation. | |||
2756 | bool MovedAway = false; | |||
2757 | if (MatchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) { | |||
2758 | // This instruction may have been move away. If so, there is nothing | |||
2759 | // to check here. | |||
2760 | if (MovedAway) | |||
2761 | return true; | |||
2762 | // Okay, it's possible to fold this. Check to see if it is actually | |||
2763 | // *profitable* to do so. We use a simple cost model to avoid increasing | |||
2764 | // register pressure too much. | |||
2765 | if (I->hasOneUse() || | |||
2766 | IsProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) { | |||
2767 | AddrModeInsts.push_back(I); | |||
2768 | return true; | |||
2769 | } | |||
2770 | ||||
2771 | // It isn't profitable to do this, roll back. | |||
2772 | //cerr << "NOT FOLDING: " << *I; | |||
2773 | AddrMode = BackupAddrMode; | |||
2774 | AddrModeInsts.resize(OldSize); | |||
2775 | TPT.rollback(LastKnownGood); | |||
2776 | } | |||
2777 | } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) { | |||
2778 | if (MatchOperationAddr(CE, CE->getOpcode(), Depth)) | |||
2779 | return true; | |||
2780 | TPT.rollback(LastKnownGood); | |||
2781 | } else if (isa<ConstantPointerNull>(Addr)) { | |||
2782 | // Null pointer gets folded without affecting the addressing mode. | |||
2783 | return true; | |||
2784 | } | |||
2785 | ||||
2786 | // Worse case, the target should support [reg] addressing modes. :) | |||
2787 | if (!AddrMode.HasBaseReg) { | |||
2788 | AddrMode.HasBaseReg = true; | |||
2789 | AddrMode.BaseReg = Addr; | |||
2790 | // Still check for legality in case the target supports [imm] but not [i+r]. | |||
2791 | if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) | |||
2792 | return true; | |||
2793 | AddrMode.HasBaseReg = false; | |||
2794 | AddrMode.BaseReg = nullptr; | |||
2795 | } | |||
2796 | ||||
2797 | // If the base register is already taken, see if we can do [r+r]. | |||
2798 | if (AddrMode.Scale == 0) { | |||
2799 | AddrMode.Scale = 1; | |||
2800 | AddrMode.ScaledReg = Addr; | |||
2801 | if (TLI.isLegalAddressingMode(AddrMode, AccessTy)) | |||
2802 | return true; | |||
2803 | AddrMode.Scale = 0; | |||
2804 | AddrMode.ScaledReg = nullptr; | |||
2805 | } | |||
2806 | // Couldn't match. | |||
2807 | TPT.rollback(LastKnownGood); | |||
2808 | return false; | |||
2809 | } | |||
2810 | ||||
2811 | /// IsOperandAMemoryOperand - Check to see if all uses of OpVal by the specified | |||
2812 | /// inline asm call are due to memory operands. If so, return true, otherwise | |||
2813 | /// return false. | |||
2814 | static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, | |||
2815 | const TargetLowering &TLI) { | |||
2816 | TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(ImmutableCallSite(CI)); | |||
2817 | for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { | |||
2818 | TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; | |||
2819 | ||||
2820 | // Compute the constraint code and ConstraintType to use. | |||
2821 | TLI.ComputeConstraintToUse(OpInfo, SDValue()); | |||
2822 | ||||
2823 | // If this asm operand is our Value*, and if it isn't an indirect memory | |||
2824 | // operand, we can't fold it! | |||
2825 | if (OpInfo.CallOperandVal == OpVal && | |||
2826 | (OpInfo.ConstraintType != TargetLowering::C_Memory || | |||
2827 | !OpInfo.isIndirect)) | |||
2828 | return false; | |||
2829 | } | |||
2830 | ||||
2831 | return true; | |||
2832 | } | |||
2833 | ||||
2834 | /// FindAllMemoryUses - Recursively walk all the uses of I until we find a | |||
2835 | /// memory use. If we find an obviously non-foldable instruction, return true. | |||
2836 | /// Add the ultimately found memory instructions to MemoryUses. | |||
2837 | static bool FindAllMemoryUses(Instruction *I, | |||
2838 | SmallVectorImpl<std::pair<Instruction*,unsigned> > &MemoryUses, | |||
2839 | SmallPtrSetImpl<Instruction*> &ConsideredInsts, | |||
2840 | const TargetLowering &TLI) { | |||
2841 | // If we already considered this instruction, we're done. | |||
2842 | if (!ConsideredInsts.insert(I).second) | |||
2843 | return false; | |||
2844 | ||||
2845 | // If this is an obviously unfoldable instruction, bail out. | |||
2846 | if (!MightBeFoldableInst(I)) | |||
2847 | return true; | |||
2848 | ||||
2849 | // Loop over all the uses, recursively processing them. | |||
2850 | for (Use &U : I->uses()) { | |||
2851 | Instruction *UserI = cast<Instruction>(U.getUser()); | |||
2852 | ||||
2853 | if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) { | |||
2854 | MemoryUses.push_back(std::make_pair(LI, U.getOperandNo())); | |||
2855 | continue; | |||
2856 | } | |||
2857 | ||||
2858 | if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) { | |||
2859 | unsigned opNo = U.getOperandNo(); | |||
2860 | if (opNo == 0) return true; // Storing addr, not into addr. | |||
2861 | MemoryUses.push_back(std::make_pair(SI, opNo)); | |||
2862 | continue; | |||
2863 | } | |||
2864 | ||||
2865 | if (CallInst *CI = dyn_cast<CallInst>(UserI)) { | |||
2866 | InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue()); | |||
2867 | if (!IA) return true; | |||
2868 | ||||
2869 | // If this is a memory operand, we're cool, otherwise bail out. | |||
2870 | if (!IsOperandAMemoryOperand(CI, IA, I, TLI)) | |||
2871 | return true; | |||
2872 | continue; | |||
2873 | } | |||
2874 | ||||
2875 | if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI)) | |||
2876 | return true; | |||
2877 | } | |||
2878 | ||||
2879 | return false; | |||
2880 | } | |||
2881 | ||||
2882 | /// ValueAlreadyLiveAtInst - Retrn true if Val is already known to be live at | |||
2883 | /// the use site that we're folding it into. If so, there is no cost to | |||
2884 | /// include it in the addressing mode. KnownLive1 and KnownLive2 are two values | |||
2885 | /// that we know are live at the instruction already. | |||
2886 | bool AddressingModeMatcher::ValueAlreadyLiveAtInst(Value *Val,Value *KnownLive1, | |||
2887 | Value *KnownLive2) { | |||
2888 | // If Val is either of the known-live values, we know it is live! | |||
2889 | if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2) | |||
2890 | return true; | |||
2891 | ||||
2892 | // All values other than instructions and arguments (e.g. constants) are live. | |||
2893 | if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true; | |||
2894 | ||||
2895 | // If Val is a constant sized alloca in the entry block, it is live, this is | |||
2896 | // true because it is just a reference to the stack/frame pointer, which is | |||
2897 | // live for the whole function. | |||
2898 | if (AllocaInst *AI = dyn_cast<AllocaInst>(Val)) | |||
2899 | if (AI->isStaticAlloca()) | |||
2900 | return true; | |||
2901 | ||||
2902 | // Check to see if this value is already used in the memory instruction's | |||
2903 | // block. If so, it's already live into the block at the very least, so we | |||
2904 | // can reasonably fold it. | |||
2905 | return Val->isUsedInBasicBlock(MemoryInst->getParent()); | |||
2906 | } | |||
2907 | ||||
2908 | /// IsProfitableToFoldIntoAddressingMode - It is possible for the addressing | |||
2909 | /// mode of the machine to fold the specified instruction into a load or store | |||
2910 | /// that ultimately uses it. However, the specified instruction has multiple | |||
2911 | /// uses. Given this, it may actually increase register pressure to fold it | |||
2912 | /// into the load. For example, consider this code: | |||
2913 | /// | |||
2914 | /// X = ... | |||
2915 | /// Y = X+1 | |||
2916 | /// use(Y) -> nonload/store | |||
2917 | /// Z = Y+1 | |||
2918 | /// load Z | |||
2919 | /// | |||
2920 | /// In this case, Y has multiple uses, and can be folded into the load of Z | |||
2921 | /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to | |||
2922 | /// be live at the use(Y) line. If we don't fold Y into load Z, we use one | |||
2923 | /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the | |||
2924 | /// number of computations either. | |||
2925 | /// | |||
2926 | /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If | |||
2927 | /// X was live across 'load Z' for other reasons, we actually *would* want to | |||
2928 | /// fold the addressing mode in the Z case. This would make Y die earlier. | |||
2929 | bool AddressingModeMatcher:: | |||
2930 | IsProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore, | |||
2931 | ExtAddrMode &AMAfter) { | |||
2932 | if (IgnoreProfitability) return true; | |||
2933 | ||||
2934 | // AMBefore is the addressing mode before this instruction was folded into it, | |||
2935 | // and AMAfter is the addressing mode after the instruction was folded. Get | |||
2936 | // the set of registers referenced by AMAfter and subtract out those | |||
2937 | // referenced by AMBefore: this is the set of values which folding in this | |||
2938 | // address extends the lifetime of. | |||
2939 | // | |||
2940 | // Note that there are only two potential values being referenced here, | |||
2941 | // BaseReg and ScaleReg (global addresses are always available, as are any | |||
2942 | // folded immediates). | |||
2943 | Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg; | |||
2944 | ||||
2945 | // If the BaseReg or ScaledReg was referenced by the previous addrmode, their | |||
2946 | // lifetime wasn't extended by adding this instruction. | |||
2947 | if (ValueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg)) | |||
2948 | BaseReg = nullptr; | |||
2949 | if (ValueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg)) | |||
2950 | ScaledReg = nullptr; | |||
2951 | ||||
2952 | // If folding this instruction (and it's subexprs) didn't extend any live | |||
2953 | // ranges, we're ok with it. | |||
2954 | if (!BaseReg && !ScaledReg) | |||
2955 | return true; | |||
2956 | ||||
2957 | // If all uses of this instruction are ultimately load/store/inlineasm's, | |||
2958 | // check to see if their addressing modes will include this instruction. If | |||
2959 | // so, we can fold it into all uses, so it doesn't matter if it has multiple | |||
2960 | // uses. | |||
2961 | SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses; | |||
2962 | SmallPtrSet<Instruction*, 16> ConsideredInsts; | |||
2963 | if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI)) | |||
2964 | return false; // Has a non-memory, non-foldable use! | |||
2965 | ||||
2966 | // Now that we know that all uses of this instruction are part of a chain of | |||
2967 | // computation involving only operations that could theoretically be folded | |||
2968 | // into a memory use, loop over each of these uses and see if they could | |||
2969 | // *actually* fold the instruction. | |||
2970 | SmallVector<Instruction*, 32> MatchedAddrModeInsts; | |||
2971 | for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) { | |||
2972 | Instruction *User = MemoryUses[i].first; | |||
2973 | unsigned OpNo = MemoryUses[i].second; | |||
2974 | ||||
2975 | // Get the access type of this use. If the use isn't a pointer, we don't | |||
2976 | // know what it accesses. | |||
2977 | Value *Address = User->getOperand(OpNo); | |||
2978 | if (!Address->getType()->isPointerTy()) | |||
2979 | return false; | |||
2980 | Type *AddressAccessTy = Address->getType()->getPointerElementType(); | |||
2981 | ||||
2982 | // Do a match against the root of this address, ignoring profitability. This | |||
2983 | // will tell us if the addressing mode for the memory operation will | |||
2984 | // *actually* cover the shared instruction. | |||
2985 | ExtAddrMode Result; | |||
2986 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
2987 | TPT.getRestorationPoint(); | |||
2988 | AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, AddressAccessTy, | |||
2989 | MemoryInst, Result, InsertedTruncs, | |||
2990 | PromotedInsts, TPT); | |||
2991 | Matcher.IgnoreProfitability = true; | |||
2992 | bool Success = Matcher.MatchAddr(Address, 0); | |||
2993 | (void)Success; assert(Success && "Couldn't select *anything*?")((Success && "Couldn't select *anything*?") ? static_cast <void> (0) : __assert_fail ("Success && \"Couldn't select *anything*?\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 2993, __PRETTY_FUNCTION__)); | |||
2994 | ||||
2995 | // The match was to check the profitability, the changes made are not | |||
2996 | // part of the original matcher. Therefore, they should be dropped | |||
2997 | // otherwise the original matcher will not present the right state. | |||
2998 | TPT.rollback(LastKnownGood); | |||
2999 | ||||
3000 | // If the match didn't cover I, then it won't be shared by it. | |||
3001 | if (std::find(MatchedAddrModeInsts.begin(), MatchedAddrModeInsts.end(), | |||
3002 | I) == MatchedAddrModeInsts.end()) | |||
3003 | return false; | |||
3004 | ||||
3005 | MatchedAddrModeInsts.clear(); | |||
3006 | } | |||
3007 | ||||
3008 | return true; | |||
3009 | } | |||
3010 | ||||
3011 | } // end anonymous namespace | |||
3012 | ||||
3013 | /// IsNonLocalValue - Return true if the specified values are defined in a | |||
3014 | /// different basic block than BB. | |||
3015 | static bool IsNonLocalValue(Value *V, BasicBlock *BB) { | |||
3016 | if (Instruction *I = dyn_cast<Instruction>(V)) | |||
3017 | return I->getParent() != BB; | |||
3018 | return false; | |||
3019 | } | |||
3020 | ||||
3021 | /// OptimizeMemoryInst - Load and Store Instructions often have | |||
3022 | /// addressing modes that can do significant amounts of computation. As such, | |||
3023 | /// instruction selection will try to get the load or store to do as much | |||
3024 | /// computation as possible for the program. The problem is that isel can only | |||
3025 | /// see within a single block. As such, we sink as much legal addressing mode | |||
3026 | /// stuff into the block as possible. | |||
3027 | /// | |||
3028 | /// This method is used to optimize both load/store and inline asms with memory | |||
3029 | /// operands. | |||
3030 | bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, | |||
3031 | Type *AccessTy) { | |||
3032 | Value *Repl = Addr; | |||
3033 | ||||
3034 | // Try to collapse single-value PHI nodes. This is necessary to undo | |||
3035 | // unprofitable PRE transformations. | |||
3036 | SmallVector<Value*, 8> worklist; | |||
3037 | SmallPtrSet<Value*, 16> Visited; | |||
3038 | worklist.push_back(Addr); | |||
3039 | ||||
3040 | // Use a worklist to iteratively look through PHI nodes, and ensure that | |||
3041 | // the addressing mode obtained from the non-PHI roots of the graph | |||
3042 | // are equivalent. | |||
3043 | Value *Consensus = nullptr; | |||
3044 | unsigned NumUsesConsensus = 0; | |||
3045 | bool IsNumUsesConsensusValid = false; | |||
3046 | SmallVector<Instruction*, 16> AddrModeInsts; | |||
3047 | ExtAddrMode AddrMode; | |||
3048 | TypePromotionTransaction TPT; | |||
3049 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
3050 | TPT.getRestorationPoint(); | |||
3051 | while (!worklist.empty()) { | |||
3052 | Value *V = worklist.back(); | |||
3053 | worklist.pop_back(); | |||
3054 | ||||
3055 | // Break use-def graph loops. | |||
3056 | if (!Visited.insert(V).second) { | |||
3057 | Consensus = nullptr; | |||
3058 | break; | |||
3059 | } | |||
3060 | ||||
3061 | // For a PHI node, push all of its incoming values. | |||
3062 | if (PHINode *P = dyn_cast<PHINode>(V)) { | |||
3063 | for (unsigned i = 0, e = P->getNumIncomingValues(); i != e; ++i) | |||
3064 | worklist.push_back(P->getIncomingValue(i)); | |||
3065 | continue; | |||
3066 | } | |||
3067 | ||||
3068 | // For non-PHIs, determine the addressing mode being computed. | |||
3069 | SmallVector<Instruction*, 16> NewAddrModeInsts; | |||
3070 | ExtAddrMode NewAddrMode = AddressingModeMatcher::Match( | |||
3071 | V, AccessTy, MemoryInst, NewAddrModeInsts, *TLI, InsertedTruncsSet, | |||
3072 | PromotedInsts, TPT); | |||
3073 | ||||
3074 | // This check is broken into two cases with very similar code to avoid using | |||
3075 | // getNumUses() as much as possible. Some values have a lot of uses, so | |||
3076 | // calling getNumUses() unconditionally caused a significant compile-time | |||
3077 | // regression. | |||
3078 | if (!Consensus) { | |||
3079 | Consensus = V; | |||
3080 | AddrMode = NewAddrMode; | |||
3081 | AddrModeInsts = NewAddrModeInsts; | |||
3082 | continue; | |||
3083 | } else if (NewAddrMode == AddrMode) { | |||
3084 | if (!IsNumUsesConsensusValid) { | |||
3085 | NumUsesConsensus = Consensus->getNumUses(); | |||
3086 | IsNumUsesConsensusValid = true; | |||
3087 | } | |||
3088 | ||||
3089 | // Ensure that the obtained addressing mode is equivalent to that obtained | |||
3090 | // for all other roots of the PHI traversal. Also, when choosing one | |||
3091 | // such root as representative, select the one with the most uses in order | |||
3092 | // to keep the cost modeling heuristics in AddressingModeMatcher | |||
3093 | // applicable. | |||
3094 | unsigned NumUses = V->getNumUses(); | |||
3095 | if (NumUses > NumUsesConsensus) { | |||
3096 | Consensus = V; | |||
3097 | NumUsesConsensus = NumUses; | |||
3098 | AddrModeInsts = NewAddrModeInsts; | |||
3099 | } | |||
3100 | continue; | |||
3101 | } | |||
3102 | ||||
3103 | Consensus = nullptr; | |||
3104 | break; | |||
3105 | } | |||
3106 | ||||
3107 | // If the addressing mode couldn't be determined, or if multiple different | |||
3108 | // ones were determined, bail out now. | |||
3109 | if (!Consensus) { | |||
3110 | TPT.rollback(LastKnownGood); | |||
3111 | return false; | |||
3112 | } | |||
3113 | TPT.commit(); | |||
3114 | ||||
3115 | // Check to see if any of the instructions supersumed by this addr mode are | |||
3116 | // non-local to I's BB. | |||
3117 | bool AnyNonLocal = false; | |||
3118 | for (unsigned i = 0, e = AddrModeInsts.size(); i != e; ++i) { | |||
3119 | if (IsNonLocalValue(AddrModeInsts[i], MemoryInst->getParent())) { | |||
3120 | AnyNonLocal = true; | |||
3121 | break; | |||
3122 | } | |||
3123 | } | |||
3124 | ||||
3125 | // If all the instructions matched are already in this BB, don't do anything. | |||
3126 | if (!AnyNonLocal) { | |||
3127 | DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n"; } } while (0); | |||
3128 | return false; | |||
3129 | } | |||
3130 | ||||
3131 | // Insert this computation right after this user. Since our caller is | |||
3132 | // scanning from the top of the BB to the bottom, reuse of the expr are | |||
3133 | // guaranteed to happen later. | |||
3134 | IRBuilder<> Builder(MemoryInst); | |||
3135 | ||||
3136 | // Now that we determined the addressing expression we want to use and know | |||
3137 | // that we have to sink it into this block. Check to see if we have already | |||
3138 | // done this for some other load/store instr in this block. If so, reuse the | |||
3139 | // computation. | |||
3140 | Value *&SunkAddr = SunkAddrs[Addr]; | |||
3141 | if (SunkAddr) { | |||
3142 | DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (0) | |||
3143 | << *MemoryInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (0); | |||
3144 | if (SunkAddr->getType() != Addr->getType()) | |||
3145 | SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType()); | |||
3146 | } else if (AddrSinkUsingGEPs || | |||
3147 | (!AddrSinkUsingGEPs.getNumOccurrences() && TM && | |||
3148 | TM->getSubtargetImpl(*MemoryInst->getParent()->getParent()) | |||
3149 | ->useAA())) { | |||
3150 | // By default, we use the GEP-based method when AA is used later. This | |||
3151 | // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities. | |||
3152 | DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (0) | |||
3153 | << *MemoryInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (0); | |||
3154 | Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(Addr->getType()); | |||
3155 | Value *ResultPtr = nullptr, *ResultIndex = nullptr; | |||
3156 | ||||
3157 | // First, find the pointer. | |||
3158 | if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) { | |||
3159 | ResultPtr = AddrMode.BaseReg; | |||
3160 | AddrMode.BaseReg = nullptr; | |||
3161 | } | |||
3162 | ||||
3163 | if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) { | |||
3164 | // We can't add more than one pointer together, nor can we scale a | |||
3165 | // pointer (both of which seem meaningless). | |||
3166 | if (ResultPtr || AddrMode.Scale != 1) | |||
3167 | return false; | |||
3168 | ||||
3169 | ResultPtr = AddrMode.ScaledReg; | |||
3170 | AddrMode.Scale = 0; | |||
3171 | } | |||
3172 | ||||
3173 | if (AddrMode.BaseGV) { | |||
3174 | if (ResultPtr) | |||
3175 | return false; | |||
3176 | ||||
3177 | ResultPtr = AddrMode.BaseGV; | |||
3178 | } | |||
3179 | ||||
3180 | // If the real base value actually came from an inttoptr, then the matcher | |||
3181 | // will look through it and provide only the integer value. In that case, | |||
3182 | // use it here. | |||
3183 | if (!ResultPtr && AddrMode.BaseReg) { | |||
3184 | ResultPtr = | |||
3185 | Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), "sunkaddr"); | |||
3186 | AddrMode.BaseReg = nullptr; | |||
3187 | } else if (!ResultPtr && AddrMode.Scale == 1) { | |||
3188 | ResultPtr = | |||
3189 | Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), "sunkaddr"); | |||
3190 | AddrMode.Scale = 0; | |||
3191 | } | |||
3192 | ||||
3193 | if (!ResultPtr && | |||
3194 | !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) { | |||
3195 | SunkAddr = Constant::getNullValue(Addr->getType()); | |||
3196 | } else if (!ResultPtr) { | |||
3197 | return false; | |||
3198 | } else { | |||
3199 | Type *I8PtrTy = | |||
3200 | Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace()); | |||
3201 | ||||
3202 | // Start with the base register. Do this first so that subsequent address | |||
3203 | // matching finds it last, which will prevent it from trying to match it | |||
3204 | // as the scaled value in case it happens to be a mul. That would be | |||
3205 | // problematic if we've sunk a different mul for the scale, because then | |||
3206 | // we'd end up sinking both muls. | |||
3207 | if (AddrMode.BaseReg) { | |||
3208 | Value *V = AddrMode.BaseReg; | |||
3209 | if (V->getType() != IntPtrTy) | |||
3210 | V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); | |||
3211 | ||||
3212 | ResultIndex = V; | |||
3213 | } | |||
3214 | ||||
3215 | // Add the scale value. | |||
3216 | if (AddrMode.Scale) { | |||
3217 | Value *V = AddrMode.ScaledReg; | |||
3218 | if (V->getType() == IntPtrTy) { | |||
3219 | // done. | |||
3220 | } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < | |||
3221 | cast<IntegerType>(V->getType())->getBitWidth()) { | |||
3222 | V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); | |||
3223 | } else { | |||
3224 | // It is only safe to sign extend the BaseReg if we know that the math | |||
3225 | // required to create it did not overflow before we extend it. Since | |||
3226 | // the original IR value was tossed in favor of a constant back when | |||
3227 | // the AddrMode was created we need to bail out gracefully if widths | |||
3228 | // do not match instead of extending it. | |||
3229 | Instruction *I = dyn_cast_or_null<Instruction>(ResultIndex); | |||
3230 | if (I && (ResultIndex != AddrMode.BaseReg)) | |||
3231 | I->eraseFromParent(); | |||
3232 | return false; | |||
3233 | } | |||
3234 | ||||
3235 | if (AddrMode.Scale != 1) | |||
3236 | V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), | |||
3237 | "sunkaddr"); | |||
3238 | if (ResultIndex) | |||
3239 | ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr"); | |||
3240 | else | |||
3241 | ResultIndex = V; | |||
3242 | } | |||
3243 | ||||
3244 | // Add in the Base Offset if present. | |||
3245 | if (AddrMode.BaseOffs) { | |||
3246 | Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); | |||
3247 | if (ResultIndex) { | |||
3248 | // We need to add this separately from the scale above to help with | |||
3249 | // SDAG consecutive load/store merging. | |||
3250 | if (ResultPtr->getType() != I8PtrTy) | |||
3251 | ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy); | |||
3252 | ResultPtr = Builder.CreateGEP(ResultPtr, ResultIndex, "sunkaddr"); | |||
3253 | } | |||
3254 | ||||
3255 | ResultIndex = V; | |||
3256 | } | |||
3257 | ||||
3258 | if (!ResultIndex) { | |||
3259 | SunkAddr = ResultPtr; | |||
3260 | } else { | |||
3261 | if (ResultPtr->getType() != I8PtrTy) | |||
3262 | ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy); | |||
3263 | SunkAddr = Builder.CreateGEP(ResultPtr, ResultIndex, "sunkaddr"); | |||
3264 | } | |||
3265 | ||||
3266 | if (SunkAddr->getType() != Addr->getType()) | |||
3267 | SunkAddr = Builder.CreateBitCast(SunkAddr, Addr->getType()); | |||
3268 | } | |||
3269 | } else { | |||
3270 | DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (0) | |||
3271 | << *MemoryInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (0); | |||
3272 | Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(Addr->getType()); | |||
3273 | Value *Result = nullptr; | |||
3274 | ||||
3275 | // Start with the base register. Do this first so that subsequent address | |||
3276 | // matching finds it last, which will prevent it from trying to match it | |||
3277 | // as the scaled value in case it happens to be a mul. That would be | |||
3278 | // problematic if we've sunk a different mul for the scale, because then | |||
3279 | // we'd end up sinking both muls. | |||
3280 | if (AddrMode.BaseReg) { | |||
3281 | Value *V = AddrMode.BaseReg; | |||
3282 | if (V->getType()->isPointerTy()) | |||
3283 | V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); | |||
3284 | if (V->getType() != IntPtrTy) | |||
3285 | V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); | |||
3286 | Result = V; | |||
3287 | } | |||
3288 | ||||
3289 | // Add the scale value. | |||
3290 | if (AddrMode.Scale) { | |||
3291 | Value *V = AddrMode.ScaledReg; | |||
3292 | if (V->getType() == IntPtrTy) { | |||
3293 | // done. | |||
3294 | } else if (V->getType()->isPointerTy()) { | |||
3295 | V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); | |||
3296 | } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < | |||
3297 | cast<IntegerType>(V->getType())->getBitWidth()) { | |||
3298 | V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); | |||
3299 | } else { | |||
3300 | // It is only safe to sign extend the BaseReg if we know that the math | |||
3301 | // required to create it did not overflow before we extend it. Since | |||
3302 | // the original IR value was tossed in favor of a constant back when | |||
3303 | // the AddrMode was created we need to bail out gracefully if widths | |||
3304 | // do not match instead of extending it. | |||
3305 | Instruction *I = dyn_cast_or_null<Instruction>(Result); | |||
3306 | if (I && (Result != AddrMode.BaseReg)) | |||
3307 | I->eraseFromParent(); | |||
3308 | return false; | |||
3309 | } | |||
3310 | if (AddrMode.Scale != 1) | |||
3311 | V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), | |||
3312 | "sunkaddr"); | |||
3313 | if (Result) | |||
3314 | Result = Builder.CreateAdd(Result, V, "sunkaddr"); | |||
3315 | else | |||
3316 | Result = V; | |||
3317 | } | |||
3318 | ||||
3319 | // Add in the BaseGV if present. | |||
3320 | if (AddrMode.BaseGV) { | |||
3321 | Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr"); | |||
3322 | if (Result) | |||
3323 | Result = Builder.CreateAdd(Result, V, "sunkaddr"); | |||
3324 | else | |||
3325 | Result = V; | |||
3326 | } | |||
3327 | ||||
3328 | // Add in the Base Offset if present. | |||
3329 | if (AddrMode.BaseOffs) { | |||
3330 | Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); | |||
3331 | if (Result) | |||
3332 | Result = Builder.CreateAdd(Result, V, "sunkaddr"); | |||
3333 | else | |||
3334 | Result = V; | |||
3335 | } | |||
3336 | ||||
3337 | if (!Result) | |||
3338 | SunkAddr = Constant::getNullValue(Addr->getType()); | |||
3339 | else | |||
3340 | SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr"); | |||
3341 | } | |||
3342 | ||||
3343 | MemoryInst->replaceUsesOfWith(Repl, SunkAddr); | |||
3344 | ||||
3345 | // If we have no uses, recursively delete the value and all dead instructions | |||
3346 | // using it. | |||
3347 | if (Repl->use_empty()) { | |||
3348 | // This can cause recursive deletion, which can invalidate our iterator. | |||
3349 | // Use a WeakVH to hold onto it in case this happens. | |||
3350 | WeakVH IterHandle(CurInstIterator); | |||
3351 | BasicBlock *BB = CurInstIterator->getParent(); | |||
3352 | ||||
3353 | RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo); | |||
3354 | ||||
3355 | if (IterHandle != CurInstIterator) { | |||
3356 | // If the iterator instruction was recursively deleted, start over at the | |||
3357 | // start of the block. | |||
3358 | CurInstIterator = BB->begin(); | |||
3359 | SunkAddrs.clear(); | |||
3360 | } | |||
3361 | } | |||
3362 | ++NumMemoryInsts; | |||
3363 | return true; | |||
3364 | } | |||
3365 | ||||
3366 | /// OptimizeInlineAsmInst - If there are any memory operands, use | |||
3367 | /// OptimizeMemoryInst to sink their address computing into the block when | |||
3368 | /// possible / profitable. | |||
3369 | bool CodeGenPrepare::OptimizeInlineAsmInst(CallInst *CS) { | |||
3370 | bool MadeChange = false; | |||
3371 | ||||
3372 | TargetLowering::AsmOperandInfoVector | |||
3373 | TargetConstraints = TLI->ParseConstraints(CS); | |||
3374 | unsigned ArgNo = 0; | |||
3375 | for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { | |||
3376 | TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; | |||
3377 | ||||
3378 | // Compute the constraint code and ConstraintType to use. | |||
3379 | TLI->ComputeConstraintToUse(OpInfo, SDValue()); | |||
3380 | ||||
3381 | if (OpInfo.ConstraintType == TargetLowering::C_Memory && | |||
3382 | OpInfo.isIndirect) { | |||
3383 | Value *OpVal = CS->getArgOperand(ArgNo++); | |||
3384 | MadeChange |= OptimizeMemoryInst(CS, OpVal, OpVal->getType()); | |||
3385 | } else if (OpInfo.Type == InlineAsm::isInput) | |||
3386 | ArgNo++; | |||
3387 | } | |||
3388 | ||||
3389 | return MadeChange; | |||
3390 | } | |||
3391 | ||||
3392 | /// \brief Check if all the uses of \p Inst are equivalent (or free) zero or | |||
3393 | /// sign extensions. | |||
3394 | static bool hasSameExtUse(Instruction *Inst, const TargetLowering &TLI) { | |||
3395 | assert(!Inst->use_empty() && "Input must have at least one use")((!Inst->use_empty() && "Input must have at least one use" ) ? static_cast<void> (0) : __assert_fail ("!Inst->use_empty() && \"Input must have at least one use\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 3395, __PRETTY_FUNCTION__)); | |||
3396 | const Instruction *FirstUser = cast<Instruction>(*Inst->user_begin()); | |||
3397 | bool IsSExt = isa<SExtInst>(FirstUser); | |||
3398 | Type *ExtTy = FirstUser->getType(); | |||
3399 | for (const User *U : Inst->users()) { | |||
3400 | const Instruction *UI = cast<Instruction>(U); | |||
3401 | if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI))) | |||
3402 | return false; | |||
3403 | Type *CurTy = UI->getType(); | |||
3404 | // Same input and output types: Same instruction after CSE. | |||
3405 | if (CurTy == ExtTy) | |||
3406 | continue; | |||
3407 | ||||
3408 | // If IsSExt is true, we are in this situation: | |||
3409 | // a = Inst | |||
3410 | // b = sext ty1 a to ty2 | |||
3411 | // c = sext ty1 a to ty3 | |||
3412 | // Assuming ty2 is shorter than ty3, this could be turned into: | |||
3413 | // a = Inst | |||
3414 | // b = sext ty1 a to ty2 | |||
3415 | // c = sext ty2 b to ty3 | |||
3416 | // However, the last sext is not free. | |||
3417 | if (IsSExt) | |||
3418 | return false; | |||
3419 | ||||
3420 | // This is a ZExt, maybe this is free to extend from one type to another. | |||
3421 | // In that case, we would not account for a different use. | |||
3422 | Type *NarrowTy; | |||
3423 | Type *LargeTy; | |||
3424 | if (ExtTy->getScalarType()->getIntegerBitWidth() > | |||
3425 | CurTy->getScalarType()->getIntegerBitWidth()) { | |||
3426 | NarrowTy = CurTy; | |||
3427 | LargeTy = ExtTy; | |||
3428 | } else { | |||
3429 | NarrowTy = ExtTy; | |||
3430 | LargeTy = CurTy; | |||
3431 | } | |||
3432 | ||||
3433 | if (!TLI.isZExtFree(NarrowTy, LargeTy)) | |||
3434 | return false; | |||
3435 | } | |||
3436 | // All uses are the same or can be derived from one another for free. | |||
3437 | return true; | |||
3438 | } | |||
3439 | ||||
3440 | /// \brief Try to form ExtLd by promoting \p Exts until they reach a | |||
3441 | /// load instruction. | |||
3442 | /// If an ext(load) can be formed, it is returned via \p LI for the load | |||
3443 | /// and \p Inst for the extension. | |||
3444 | /// Otherwise LI == nullptr and Inst == nullptr. | |||
3445 | /// When some promotion happened, \p TPT contains the proper state to | |||
3446 | /// revert them. | |||
3447 | /// | |||
3448 | /// \return true when promoting was necessary to expose the ext(load) | |||
3449 | /// opportunity, false otherwise. | |||
3450 | /// | |||
3451 | /// Example: | |||
3452 | /// \code | |||
3453 | /// %ld = load i32* %addr | |||
3454 | /// %add = add nuw i32 %ld, 4 | |||
3455 | /// %zext = zext i32 %add to i64 | |||
3456 | /// \endcode | |||
3457 | /// => | |||
3458 | /// \code | |||
3459 | /// %ld = load i32* %addr | |||
3460 | /// %zext = zext i32 %ld to i64 | |||
3461 | /// %add = add nuw i64 %zext, 4 | |||
3462 | /// \encode | |||
3463 | /// Thanks to the promotion, we can match zext(load i32*) to i64. | |||
3464 | bool CodeGenPrepare::ExtLdPromotion(TypePromotionTransaction &TPT, | |||
3465 | LoadInst *&LI, Instruction *&Inst, | |||
3466 | const SmallVectorImpl<Instruction *> &Exts, | |||
3467 | unsigned CreatedInsts = 0) { | |||
3468 | // Iterate over all the extensions to see if one form an ext(load). | |||
3469 | for (auto I : Exts) { | |||
3470 | // Check if we directly have ext(load). | |||
3471 | if ((LI = dyn_cast<LoadInst>(I->getOperand(0)))) { | |||
3472 | Inst = I; | |||
3473 | // No promotion happened here. | |||
3474 | return false; | |||
3475 | } | |||
3476 | // Check whether or not we want to do any promotion. | |||
3477 | if (!TLI || !TLI->enableExtLdPromotion() || DisableExtLdPromotion) | |||
3478 | continue; | |||
3479 | // Get the action to perform the promotion. | |||
3480 | TypePromotionHelper::Action TPH = TypePromotionHelper::getAction( | |||
3481 | I, InsertedTruncsSet, *TLI, PromotedInsts); | |||
3482 | // Check if we can promote. | |||
3483 | if (!TPH) | |||
3484 | continue; | |||
3485 | // Save the current state. | |||
3486 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
3487 | TPT.getRestorationPoint(); | |||
3488 | SmallVector<Instruction *, 4> NewExts; | |||
3489 | unsigned NewCreatedInsts = 0; | |||
3490 | // Promote. | |||
3491 | Value *PromotedVal = | |||
3492 | TPH(I, TPT, PromotedInsts, NewCreatedInsts, &NewExts, nullptr); | |||
3493 | assert(PromotedVal &&((PromotedVal && "TypePromotionHelper should have filtered out those cases" ) ? static_cast<void> (0) : __assert_fail ("PromotedVal && \"TypePromotionHelper should have filtered out those cases\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 3494, __PRETTY_FUNCTION__)) | |||
3494 | "TypePromotionHelper should have filtered out those cases")((PromotedVal && "TypePromotionHelper should have filtered out those cases" ) ? static_cast<void> (0) : __assert_fail ("PromotedVal && \"TypePromotionHelper should have filtered out those cases\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 3494, __PRETTY_FUNCTION__)); | |||
3495 | ||||
3496 | // We would be able to merge only one extension in a load. | |||
3497 | // Therefore, if we have more than 1 new extension we heuristically | |||
3498 | // cut this search path, because it means we degrade the code quality. | |||
3499 | // With exactly 2, the transformation is neutral, because we will merge | |||
3500 | // one extension but leave one. However, we optimistically keep going, | |||
3501 | // because the new extension may be removed too. | |||
3502 | unsigned TotalCreatedInsts = CreatedInsts + NewCreatedInsts; | |||
3503 | if (!StressExtLdPromotion && | |||
3504 | (TotalCreatedInsts > 1 || | |||
3505 | !isPromotedInstructionLegal(*TLI, PromotedVal))) { | |||
3506 | // The promotion is not profitable, rollback to the previous state. | |||
3507 | TPT.rollback(LastKnownGood); | |||
3508 | continue; | |||
3509 | } | |||
3510 | // The promotion is profitable. | |||
3511 | // Check if it exposes an ext(load). | |||
3512 | (void)ExtLdPromotion(TPT, LI, Inst, NewExts, TotalCreatedInsts); | |||
3513 | if (LI && (StressExtLdPromotion || NewCreatedInsts == 0 || | |||
3514 | // If we have created a new extension, i.e., now we have two | |||
3515 | // extensions. We must make sure one of them is merged with | |||
3516 | // the load, otherwise we may degrade the code quality. | |||
3517 | (LI->hasOneUse() || hasSameExtUse(LI, *TLI)))) | |||
3518 | // Promotion happened. | |||
3519 | return true; | |||
3520 | // If this does not help to expose an ext(load) then, rollback. | |||
3521 | TPT.rollback(LastKnownGood); | |||
3522 | } | |||
3523 | // None of the extension can form an ext(load). | |||
3524 | LI = nullptr; | |||
3525 | Inst = nullptr; | |||
3526 | return false; | |||
3527 | } | |||
3528 | ||||
3529 | /// MoveExtToFormExtLoad - Move a zext or sext fed by a load into the same | |||
3530 | /// basic block as the load, unless conditions are unfavorable. This allows | |||
3531 | /// SelectionDAG to fold the extend into the load. | |||
3532 | /// \p I[in/out] the extension may be modified during the process if some | |||
3533 | /// promotions apply. | |||
3534 | /// | |||
3535 | bool CodeGenPrepare::MoveExtToFormExtLoad(Instruction *&I) { | |||
3536 | // Try to promote a chain of computation if it allows to form | |||
3537 | // an extended load. | |||
3538 | TypePromotionTransaction TPT; | |||
3539 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
3540 | TPT.getRestorationPoint(); | |||
3541 | SmallVector<Instruction *, 1> Exts; | |||
3542 | Exts.push_back(I); | |||
3543 | // Look for a load being extended. | |||
3544 | LoadInst *LI = nullptr; | |||
3545 | Instruction *OldExt = I; | |||
3546 | bool HasPromoted = ExtLdPromotion(TPT, LI, I, Exts); | |||
3547 | if (!LI || !I) { | |||
3548 | assert(!HasPromoted && !LI && "If we did not match any load instruction "((!HasPromoted && !LI && "If we did not match any load instruction " "the code must remain the same") ? static_cast<void> ( 0) : __assert_fail ("!HasPromoted && !LI && \"If we did not match any load instruction \" \"the code must remain the same\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 3549, __PRETTY_FUNCTION__)) | |||
3549 | "the code must remain the same")((!HasPromoted && !LI && "If we did not match any load instruction " "the code must remain the same") ? static_cast<void> ( 0) : __assert_fail ("!HasPromoted && !LI && \"If we did not match any load instruction \" \"the code must remain the same\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 3549, __PRETTY_FUNCTION__)); | |||
3550 | I = OldExt; | |||
3551 | return false; | |||
3552 | } | |||
3553 | ||||
3554 | // If they're already in the same block, there's nothing to do. | |||
3555 | // Make the cheap checks first if we did not promote. | |||
3556 | // If we promoted, we need to check if it is indeed profitable. | |||
3557 | if (!HasPromoted && LI->getParent() == I->getParent()) | |||
3558 | return false; | |||
3559 | ||||
3560 | EVT VT = TLI->getValueType(I->getType()); | |||
3561 | EVT LoadVT = TLI->getValueType(LI->getType()); | |||
3562 | ||||
3563 | // If the load has other users and the truncate is not free, this probably | |||
3564 | // isn't worthwhile. | |||
3565 | if (!LI->hasOneUse() && TLI && | |||
3566 | (TLI->isTypeLegal(LoadVT) || !TLI->isTypeLegal(VT)) && | |||
3567 | !TLI->isTruncateFree(I->getType(), LI->getType())) { | |||
3568 | I = OldExt; | |||
3569 | TPT.rollback(LastKnownGood); | |||
3570 | return false; | |||
3571 | } | |||
3572 | ||||
3573 | // Check whether the target supports casts folded into loads. | |||
3574 | unsigned LType; | |||
3575 | if (isa<ZExtInst>(I)) | |||
3576 | LType = ISD::ZEXTLOAD; | |||
3577 | else { | |||
3578 | assert(isa<SExtInst>(I) && "Unexpected ext type!")((isa<SExtInst>(I) && "Unexpected ext type!") ? static_cast<void> (0) : __assert_fail ("isa<SExtInst>(I) && \"Unexpected ext type!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 3578, __PRETTY_FUNCTION__)); | |||
3579 | LType = ISD::SEXTLOAD; | |||
3580 | } | |||
3581 | if (TLI && !TLI->isLoadExtLegal(LType, VT, LoadVT)) { | |||
3582 | I = OldExt; | |||
3583 | TPT.rollback(LastKnownGood); | |||
3584 | return false; | |||
3585 | } | |||
3586 | ||||
3587 | // Move the extend into the same block as the load, so that SelectionDAG | |||
3588 | // can fold it. | |||
3589 | TPT.commit(); | |||
3590 | I->removeFromParent(); | |||
3591 | I->insertAfter(LI); | |||
3592 | ++NumExtsMoved; | |||
3593 | return true; | |||
3594 | } | |||
3595 | ||||
3596 | bool CodeGenPrepare::OptimizeExtUses(Instruction *I) { | |||
3597 | BasicBlock *DefBB = I->getParent(); | |||
3598 | ||||
3599 | // If the result of a {s|z}ext and its source are both live out, rewrite all | |||
3600 | // other uses of the source with result of extension. | |||
3601 | Value *Src = I->getOperand(0); | |||
3602 | if (Src->hasOneUse()) | |||
3603 | return false; | |||
3604 | ||||
3605 | // Only do this xform if truncating is free. | |||
3606 | if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType())) | |||
3607 | return false; | |||
3608 | ||||
3609 | // Only safe to perform the optimization if the source is also defined in | |||
3610 | // this block. | |||
3611 | if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) | |||
3612 | return false; | |||
3613 | ||||
3614 | bool DefIsLiveOut = false; | |||
3615 | for (User *U : I->users()) { | |||
3616 | Instruction *UI = cast<Instruction>(U); | |||
3617 | ||||
3618 | // Figure out which BB this ext is used in. | |||
3619 | BasicBlock *UserBB = UI->getParent(); | |||
3620 | if (UserBB == DefBB) continue; | |||
3621 | DefIsLiveOut = true; | |||
3622 | break; | |||
3623 | } | |||
3624 | if (!DefIsLiveOut) | |||
3625 | return false; | |||
3626 | ||||
3627 | // Make sure none of the uses are PHI nodes. | |||
3628 | for (User *U : Src->users()) { | |||
3629 | Instruction *UI = cast<Instruction>(U); | |||
3630 | BasicBlock *UserBB = UI->getParent(); | |||
3631 | if (UserBB == DefBB) continue; | |||
3632 | // Be conservative. We don't want this xform to end up introducing | |||
3633 | // reloads just before load / store instructions. | |||
3634 | if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI)) | |||
3635 | return false; | |||
3636 | } | |||
3637 | ||||
3638 | // InsertedTruncs - Only insert one trunc in each block once. | |||
3639 | DenseMap<BasicBlock*, Instruction*> InsertedTruncs; | |||
3640 | ||||
3641 | bool MadeChange = false; | |||
3642 | for (Use &U : Src->uses()) { | |||
3643 | Instruction *User = cast<Instruction>(U.getUser()); | |||
3644 | ||||
3645 | // Figure out which BB this ext is used in. | |||
3646 | BasicBlock *UserBB = User->getParent(); | |||
3647 | if (UserBB == DefBB) continue; | |||
3648 | ||||
3649 | // Both src and def are live in this block. Rewrite the use. | |||
3650 | Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; | |||
3651 | ||||
3652 | if (!InsertedTrunc) { | |||
3653 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); | |||
3654 | InsertedTrunc = new TruncInst(I, Src->getType(), "", InsertPt); | |||
3655 | InsertedTruncsSet.insert(InsertedTrunc); | |||
3656 | } | |||
3657 | ||||
3658 | // Replace a use of the {s|z}ext source with a use of the result. | |||
3659 | U = InsertedTrunc; | |||
3660 | ++NumExtUses; | |||
3661 | MadeChange = true; | |||
3662 | } | |||
3663 | ||||
3664 | return MadeChange; | |||
3665 | } | |||
3666 | ||||
3667 | /// isFormingBranchFromSelectProfitable - Returns true if a SelectInst should be | |||
3668 | /// turned into an explicit branch. | |||
3669 | static bool isFormingBranchFromSelectProfitable(SelectInst *SI) { | |||
3670 | // FIXME: This should use the same heuristics as IfConversion to determine | |||
3671 | // whether a select is better represented as a branch. This requires that | |||
3672 | // branch probability metadata is preserved for the select, which is not the | |||
3673 | // case currently. | |||
3674 | ||||
3675 | CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); | |||
3676 | ||||
3677 | // If the branch is predicted right, an out of order CPU can avoid blocking on | |||
3678 | // the compare. Emit cmovs on compares with a memory operand as branches to | |||
3679 | // avoid stalls on the load from memory. If the compare has more than one use | |||
3680 | // there's probably another cmov or setcc around so it's not worth emitting a | |||
3681 | // branch. | |||
3682 | if (!Cmp) | |||
3683 | return false; | |||
3684 | ||||
3685 | Value *CmpOp0 = Cmp->getOperand(0); | |||
3686 | Value *CmpOp1 = Cmp->getOperand(1); | |||
3687 | ||||
3688 | // We check that the memory operand has one use to avoid uses of the loaded | |||
3689 | // value directly after the compare, making branches unprofitable. | |||
3690 | return Cmp->hasOneUse() && | |||
3691 | ((isa<LoadInst>(CmpOp0) && CmpOp0->hasOneUse()) || | |||
3692 | (isa<LoadInst>(CmpOp1) && CmpOp1->hasOneUse())); | |||
3693 | } | |||
3694 | ||||
3695 | ||||
3696 | /// If we have a SelectInst that will likely profit from branch prediction, | |||
3697 | /// turn it into a branch. | |||
3698 | bool CodeGenPrepare::OptimizeSelectInst(SelectInst *SI) { | |||
3699 | bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1); | |||
3700 | ||||
3701 | // Can we convert the 'select' to CF ? | |||
3702 | if (DisableSelectToBranch || OptSize || !TLI || VectorCond) | |||
3703 | return false; | |||
3704 | ||||
3705 | TargetLowering::SelectSupportKind SelectKind; | |||
3706 | if (VectorCond) | |||
3707 | SelectKind = TargetLowering::VectorMaskSelect; | |||
3708 | else if (SI->getType()->isVectorTy()) | |||
3709 | SelectKind = TargetLowering::ScalarCondVectorVal; | |||
3710 | else | |||
3711 | SelectKind = TargetLowering::ScalarValSelect; | |||
3712 | ||||
3713 | // Do we have efficient codegen support for this kind of 'selects' ? | |||
3714 | if (TLI->isSelectSupported(SelectKind)) { | |||
3715 | // We have efficient codegen support for the select instruction. | |||
3716 | // Check if it is profitable to keep this 'select'. | |||
3717 | if (!TLI->isPredictableSelectExpensive() || | |||
3718 | !isFormingBranchFromSelectProfitable(SI)) | |||
3719 | return false; | |||
3720 | } | |||
3721 | ||||
3722 | ModifiedDT = true; | |||
3723 | ||||
3724 | // First, we split the block containing the select into 2 blocks. | |||
3725 | BasicBlock *StartBlock = SI->getParent(); | |||
3726 | BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(SI)); | |||
3727 | BasicBlock *NextBlock = StartBlock->splitBasicBlock(SplitPt, "select.end"); | |||
3728 | ||||
3729 | // Create a new block serving as the landing pad for the branch. | |||
3730 | BasicBlock *SmallBlock = BasicBlock::Create(SI->getContext(), "select.mid", | |||
3731 | NextBlock->getParent(), NextBlock); | |||
3732 | ||||
3733 | // Move the unconditional branch from the block with the select in it into our | |||
3734 | // landing pad block. | |||
3735 | StartBlock->getTerminator()->eraseFromParent(); | |||
3736 | BranchInst::Create(NextBlock, SmallBlock); | |||
3737 | ||||
3738 | // Insert the real conditional branch based on the original condition. | |||
3739 | BranchInst::Create(NextBlock, SmallBlock, SI->getCondition(), SI); | |||
3740 | ||||
3741 | // The select itself is replaced with a PHI Node. | |||
3742 | PHINode *PN = PHINode::Create(SI->getType(), 2, "", NextBlock->begin()); | |||
3743 | PN->takeName(SI); | |||
3744 | PN->addIncoming(SI->getTrueValue(), StartBlock); | |||
3745 | PN->addIncoming(SI->getFalseValue(), SmallBlock); | |||
3746 | SI->replaceAllUsesWith(PN); | |||
3747 | SI->eraseFromParent(); | |||
3748 | ||||
3749 | // Instruct OptimizeBlock to skip to the next block. | |||
3750 | CurInstIterator = StartBlock->end(); | |||
3751 | ++NumSelectsExpanded; | |||
3752 | return true; | |||
3753 | } | |||
3754 | ||||
3755 | static bool isBroadcastShuffle(ShuffleVectorInst *SVI) { | |||
3756 | SmallVector<int, 16> Mask(SVI->getShuffleMask()); | |||
3757 | int SplatElem = -1; | |||
3758 | for (unsigned i = 0; i < Mask.size(); ++i) { | |||
3759 | if (SplatElem != -1 && Mask[i] != -1 && Mask[i] != SplatElem) | |||
3760 | return false; | |||
3761 | SplatElem = Mask[i]; | |||
3762 | } | |||
3763 | ||||
3764 | return true; | |||
3765 | } | |||
3766 | ||||
3767 | /// Some targets have expensive vector shifts if the lanes aren't all the same | |||
3768 | /// (e.g. x86 only introduced "vpsllvd" and friends with AVX2). In these cases | |||
3769 | /// it's often worth sinking a shufflevector splat down to its use so that | |||
3770 | /// codegen can spot all lanes are identical. | |||
3771 | bool CodeGenPrepare::OptimizeShuffleVectorInst(ShuffleVectorInst *SVI) { | |||
3772 | BasicBlock *DefBB = SVI->getParent(); | |||
3773 | ||||
3774 | // Only do this xform if variable vector shifts are particularly expensive. | |||
3775 | if (!TLI || !TLI->isVectorShiftByScalarCheap(SVI->getType())) | |||
3776 | return false; | |||
3777 | ||||
3778 | // We only expect better codegen by sinking a shuffle if we can recognise a | |||
3779 | // constant splat. | |||
3780 | if (!isBroadcastShuffle(SVI)) | |||
3781 | return false; | |||
3782 | ||||
3783 | // InsertedShuffles - Only insert a shuffle in each block once. | |||
3784 | DenseMap<BasicBlock*, Instruction*> InsertedShuffles; | |||
3785 | ||||
3786 | bool MadeChange = false; | |||
3787 | for (User *U : SVI->users()) { | |||
3788 | Instruction *UI = cast<Instruction>(U); | |||
3789 | ||||
3790 | // Figure out which BB this ext is used in. | |||
3791 | BasicBlock *UserBB = UI->getParent(); | |||
3792 | if (UserBB == DefBB) continue; | |||
3793 | ||||
3794 | // For now only apply this when the splat is used by a shift instruction. | |||
3795 | if (!UI->isShift()) continue; | |||
3796 | ||||
3797 | // Everything checks out, sink the shuffle if the user's block doesn't | |||
3798 | // already have a copy. | |||
3799 | Instruction *&InsertedShuffle = InsertedShuffles[UserBB]; | |||
3800 | ||||
3801 | if (!InsertedShuffle) { | |||
3802 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); | |||
3803 | InsertedShuffle = new ShuffleVectorInst(SVI->getOperand(0), | |||
3804 | SVI->getOperand(1), | |||
3805 | SVI->getOperand(2), "", InsertPt); | |||
3806 | } | |||
3807 | ||||
3808 | UI->replaceUsesOfWith(SVI, InsertedShuffle); | |||
3809 | MadeChange = true; | |||
3810 | } | |||
3811 | ||||
3812 | // If we removed all uses, nuke the shuffle. | |||
3813 | if (SVI->use_empty()) { | |||
3814 | SVI->eraseFromParent(); | |||
3815 | MadeChange = true; | |||
3816 | } | |||
3817 | ||||
3818 | return MadeChange; | |||
3819 | } | |||
3820 | ||||
3821 | namespace { | |||
3822 | /// \brief Helper class to promote a scalar operation to a vector one. | |||
3823 | /// This class is used to move downward extractelement transition. | |||
3824 | /// E.g., | |||
3825 | /// a = vector_op <2 x i32> | |||
3826 | /// b = extractelement <2 x i32> a, i32 0 | |||
3827 | /// c = scalar_op b | |||
3828 | /// store c | |||
3829 | /// | |||
3830 | /// => | |||
3831 | /// a = vector_op <2 x i32> | |||
3832 | /// c = vector_op a (equivalent to scalar_op on the related lane) | |||
3833 | /// * d = extractelement <2 x i32> c, i32 0 | |||
3834 | /// * store d | |||
3835 | /// Assuming both extractelement and store can be combine, we get rid of the | |||
3836 | /// transition. | |||
3837 | class VectorPromoteHelper { | |||
3838 | /// Used to perform some checks on the legality of vector operations. | |||
3839 | const TargetLowering &TLI; | |||
3840 | ||||
3841 | /// Used to estimated the cost of the promoted chain. | |||
3842 | const TargetTransformInfo &TTI; | |||
3843 | ||||
3844 | /// The transition being moved downwards. | |||
3845 | Instruction *Transition; | |||
3846 | /// The sequence of instructions to be promoted. | |||
3847 | SmallVector<Instruction *, 4> InstsToBePromoted; | |||
3848 | /// Cost of combining a store and an extract. | |||
3849 | unsigned StoreExtractCombineCost; | |||
3850 | /// Instruction that will be combined with the transition. | |||
3851 | Instruction *CombineInst; | |||
3852 | ||||
3853 | /// \brief The instruction that represents the current end of the transition. | |||
3854 | /// Since we are faking the promotion until we reach the end of the chain | |||
3855 | /// of computation, we need a way to get the current end of the transition. | |||
3856 | Instruction *getEndOfTransition() const { | |||
3857 | if (InstsToBePromoted.empty()) | |||
3858 | return Transition; | |||
3859 | return InstsToBePromoted.back(); | |||
3860 | } | |||
3861 | ||||
3862 | /// \brief Return the index of the original value in the transition. | |||
3863 | /// E.g., for "extractelement <2 x i32> c, i32 1" the original value, | |||
3864 | /// c, is at index 0. | |||
3865 | unsigned getTransitionOriginalValueIdx() const { | |||
3866 | assert(isa<ExtractElementInst>(Transition) &&((isa<ExtractElementInst>(Transition) && "Other kind of transitions are not supported yet" ) ? static_cast<void> (0) : __assert_fail ("isa<ExtractElementInst>(Transition) && \"Other kind of transitions are not supported yet\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 3867, __PRETTY_FUNCTION__)) | |||
3867 | "Other kind of transitions are not supported yet")((isa<ExtractElementInst>(Transition) && "Other kind of transitions are not supported yet" ) ? static_cast<void> (0) : __assert_fail ("isa<ExtractElementInst>(Transition) && \"Other kind of transitions are not supported yet\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 3867, __PRETTY_FUNCTION__)); | |||
3868 | return 0; | |||
3869 | } | |||
3870 | ||||
3871 | /// \brief Return the index of the index in the transition. | |||
3872 | /// E.g., for "extractelement <2 x i32> c, i32 0" the index | |||
3873 | /// is at index 1. | |||
3874 | unsigned getTransitionIdx() const { | |||
3875 | assert(isa<ExtractElementInst>(Transition) &&((isa<ExtractElementInst>(Transition) && "Other kind of transitions are not supported yet" ) ? static_cast<void> (0) : __assert_fail ("isa<ExtractElementInst>(Transition) && \"Other kind of transitions are not supported yet\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 3876, __PRETTY_FUNCTION__)) | |||
3876 | "Other kind of transitions are not supported yet")((isa<ExtractElementInst>(Transition) && "Other kind of transitions are not supported yet" ) ? static_cast<void> (0) : __assert_fail ("isa<ExtractElementInst>(Transition) && \"Other kind of transitions are not supported yet\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 3876, __PRETTY_FUNCTION__)); | |||
3877 | return 1; | |||
3878 | } | |||
3879 | ||||
3880 | /// \brief Get the type of the transition. | |||
3881 | /// This is the type of the original value. | |||
3882 | /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the | |||
3883 | /// transition is <2 x i32>. | |||
3884 | Type *getTransitionType() const { | |||
3885 | return Transition->getOperand(getTransitionOriginalValueIdx())->getType(); | |||
3886 | } | |||
3887 | ||||
3888 | /// \brief Promote \p ToBePromoted by moving \p Def downward through. | |||
3889 | /// I.e., we have the following sequence: | |||
3890 | /// Def = Transition <ty1> a to <ty2> | |||
3891 | /// b = ToBePromoted <ty2> Def, ... | |||
3892 | /// => | |||
3893 | /// b = ToBePromoted <ty1> a, ... | |||
3894 | /// Def = Transition <ty1> ToBePromoted to <ty2> | |||
3895 | void promoteImpl(Instruction *ToBePromoted); | |||
3896 | ||||
3897 | /// \brief Check whether or not it is profitable to promote all the | |||
3898 | /// instructions enqueued to be promoted. | |||
3899 | bool isProfitableToPromote() { | |||
3900 | Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx()); | |||
3901 | unsigned Index = isa<ConstantInt>(ValIdx) | |||
3902 | ? cast<ConstantInt>(ValIdx)->getZExtValue() | |||
3903 | : -1; | |||
3904 | Type *PromotedType = getTransitionType(); | |||
3905 | ||||
3906 | StoreInst *ST = cast<StoreInst>(CombineInst); | |||
3907 | unsigned AS = ST->getPointerAddressSpace(); | |||
3908 | unsigned Align = ST->getAlignment(); | |||
3909 | // Check if this store is supported. | |||
3910 | if (!TLI.allowsMisalignedMemoryAccesses( | |||
3911 | TLI.getValueType(ST->getValueOperand()->getType()), AS, Align)) { | |||
3912 | // If this is not supported, there is no way we can combine | |||
3913 | // the extract with the store. | |||
3914 | return false; | |||
3915 | } | |||
3916 | ||||
3917 | // The scalar chain of computation has to pay for the transition | |||
3918 | // scalar to vector. | |||
3919 | // The vector chain has to account for the combining cost. | |||
3920 | uint64_t ScalarCost = | |||
3921 | TTI.getVectorInstrCost(Transition->getOpcode(), PromotedType, Index); | |||
3922 | uint64_t VectorCost = StoreExtractCombineCost; | |||
3923 | for (const auto &Inst : InstsToBePromoted) { | |||
3924 | // Compute the cost. | |||
3925 | // By construction, all instructions being promoted are arithmetic ones. | |||
3926 | // Moreover, one argument is a constant that can be viewed as a splat | |||
3927 | // constant. | |||
3928 | Value *Arg0 = Inst->getOperand(0); | |||
3929 | bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) || | |||
3930 | isa<ConstantFP>(Arg0); | |||
3931 | TargetTransformInfo::OperandValueKind Arg0OVK = | |||
3932 | IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue | |||
3933 | : TargetTransformInfo::OK_AnyValue; | |||
3934 | TargetTransformInfo::OperandValueKind Arg1OVK = | |||
3935 | !IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue | |||
3936 | : TargetTransformInfo::OK_AnyValue; | |||
3937 | ScalarCost += TTI.getArithmeticInstrCost( | |||
3938 | Inst->getOpcode(), Inst->getType(), Arg0OVK, Arg1OVK); | |||
3939 | VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType, | |||
3940 | Arg0OVK, Arg1OVK); | |||
3941 | } | |||
3942 | DEBUG(dbgs() << "Estimated cost of computation to be promoted:\nScalar: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Estimated cost of computation to be promoted:\nScalar: " << ScalarCost << "\nVector: " << VectorCost << '\n'; } } while (0) | |||
3943 | << ScalarCost << "\nVector: " << VectorCost << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Estimated cost of computation to be promoted:\nScalar: " << ScalarCost << "\nVector: " << VectorCost << '\n'; } } while (0); | |||
3944 | return ScalarCost > VectorCost; | |||
3945 | } | |||
3946 | ||||
3947 | /// \brief Generate a constant vector with \p Val with the same | |||
3948 | /// number of elements as the transition. | |||
3949 | /// \p UseSplat defines whether or not \p Val should be replicated | |||
3950 | /// accross the whole vector. | |||
3951 | /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>, | |||
3952 | /// otherwise we generate a vector with as many undef as possible: | |||
3953 | /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only | |||
3954 | /// used at the index of the extract. | |||
3955 | Value *getConstantVector(Constant *Val, bool UseSplat) const { | |||
3956 | unsigned ExtractIdx = UINT_MAX(2147483647 *2U +1U); | |||
3957 | if (!UseSplat) { | |||
3958 | // If we cannot determine where the constant must be, we have to | |||
3959 | // use a splat constant. | |||
3960 | Value *ValExtractIdx = Transition->getOperand(getTransitionIdx()); | |||
3961 | if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx)) | |||
3962 | ExtractIdx = CstVal->getSExtValue(); | |||
3963 | else | |||
3964 | UseSplat = true; | |||
3965 | } | |||
3966 | ||||
3967 | unsigned End = getTransitionType()->getVectorNumElements(); | |||
3968 | if (UseSplat) | |||
3969 | return ConstantVector::getSplat(End, Val); | |||
3970 | ||||
3971 | SmallVector<Constant *, 4> ConstVec; | |||
3972 | UndefValue *UndefVal = UndefValue::get(Val->getType()); | |||
3973 | for (unsigned Idx = 0; Idx != End; ++Idx) { | |||
3974 | if (Idx == ExtractIdx) | |||
3975 | ConstVec.push_back(Val); | |||
3976 | else | |||
3977 | ConstVec.push_back(UndefVal); | |||
3978 | } | |||
3979 | return ConstantVector::get(ConstVec); | |||
3980 | } | |||
3981 | ||||
3982 | /// \brief Check if promoting to a vector type an operand at \p OperandIdx | |||
3983 | /// in \p Use can trigger undefined behavior. | |||
3984 | static bool canCauseUndefinedBehavior(const Instruction *Use, | |||
3985 | unsigned OperandIdx) { | |||
3986 | // This is not safe to introduce undef when the operand is on | |||
3987 | // the right hand side of a division-like instruction. | |||
3988 | if (OperandIdx != 1) | |||
3989 | return false; | |||
3990 | switch (Use->getOpcode()) { | |||
3991 | default: | |||
3992 | return false; | |||
3993 | case Instruction::SDiv: | |||
3994 | case Instruction::UDiv: | |||
3995 | case Instruction::SRem: | |||
3996 | case Instruction::URem: | |||
3997 | return true; | |||
3998 | case Instruction::FDiv: | |||
3999 | case Instruction::FRem: | |||
4000 | return !Use->hasNoNaNs(); | |||
4001 | } | |||
4002 | llvm_unreachable(nullptr)::llvm::llvm_unreachable_internal(nullptr, "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 4002); | |||
4003 | } | |||
4004 | ||||
4005 | public: | |||
4006 | VectorPromoteHelper(const TargetLowering &TLI, const TargetTransformInfo &TTI, | |||
4007 | Instruction *Transition, unsigned CombineCost) | |||
4008 | : TLI(TLI), TTI(TTI), Transition(Transition), | |||
4009 | StoreExtractCombineCost(CombineCost), CombineInst(nullptr) { | |||
4010 | assert(Transition && "Do not know how to promote null")((Transition && "Do not know how to promote null") ? static_cast <void> (0) : __assert_fail ("Transition && \"Do not know how to promote null\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 4010, __PRETTY_FUNCTION__)); | |||
4011 | } | |||
4012 | ||||
4013 | /// \brief Check if we can promote \p ToBePromoted to \p Type. | |||
4014 | bool canPromote(const Instruction *ToBePromoted) const { | |||
4015 | // We could support CastInst too. | |||
4016 | return isa<BinaryOperator>(ToBePromoted); | |||
4017 | } | |||
4018 | ||||
4019 | /// \brief Check if it is profitable to promote \p ToBePromoted | |||
4020 | /// by moving downward the transition through. | |||
4021 | bool shouldPromote(const Instruction *ToBePromoted) const { | |||
4022 | // Promote only if all the operands can be statically expanded. | |||
4023 | // Indeed, we do not want to introduce any new kind of transitions. | |||
4024 | for (const Use &U : ToBePromoted->operands()) { | |||
4025 | const Value *Val = U.get(); | |||
4026 | if (Val == getEndOfTransition()) { | |||
4027 | // If the use is a division and the transition is on the rhs, | |||
4028 | // we cannot promote the operation, otherwise we may create a | |||
4029 | // division by zero. | |||
4030 | if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())) | |||
4031 | return false; | |||
4032 | continue; | |||
4033 | } | |||
4034 | if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) && | |||
4035 | !isa<ConstantFP>(Val)) | |||
4036 | return false; | |||
4037 | } | |||
4038 | // Check that the resulting operation is legal. | |||
4039 | int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode()); | |||
4040 | if (!ISDOpcode) | |||
4041 | return false; | |||
4042 | return StressStoreExtract || | |||
4043 | TLI.isOperationLegalOrCustom( | |||
4044 | ISDOpcode, TLI.getValueType(getTransitionType(), true)); | |||
4045 | } | |||
4046 | ||||
4047 | /// \brief Check whether or not \p Use can be combined | |||
4048 | /// with the transition. | |||
4049 | /// I.e., is it possible to do Use(Transition) => AnotherUse? | |||
4050 | bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); } | |||
4051 | ||||
4052 | /// \brief Record \p ToBePromoted as part of the chain to be promoted. | |||
4053 | void enqueueForPromotion(Instruction *ToBePromoted) { | |||
4054 | InstsToBePromoted.push_back(ToBePromoted); | |||
4055 | } | |||
4056 | ||||
4057 | /// \brief Set the instruction that will be combined with the transition. | |||
4058 | void recordCombineInstruction(Instruction *ToBeCombined) { | |||
4059 | assert(canCombine(ToBeCombined) && "Unsupported instruction to combine")((canCombine(ToBeCombined) && "Unsupported instruction to combine" ) ? static_cast<void> (0) : __assert_fail ("canCombine(ToBeCombined) && \"Unsupported instruction to combine\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 4059, __PRETTY_FUNCTION__)); | |||
4060 | CombineInst = ToBeCombined; | |||
4061 | } | |||
4062 | ||||
4063 | /// \brief Promote all the instructions enqueued for promotion if it is | |||
4064 | /// is profitable. | |||
4065 | /// \return True if the promotion happened, false otherwise. | |||
4066 | bool promote() { | |||
4067 | // Check if there is something to promote. | |||
4068 | // Right now, if we do not have anything to combine with, | |||
4069 | // we assume the promotion is not profitable. | |||
4070 | if (InstsToBePromoted.empty() || !CombineInst) | |||
4071 | return false; | |||
4072 | ||||
4073 | // Check cost. | |||
4074 | if (!StressStoreExtract && !isProfitableToPromote()) | |||
4075 | return false; | |||
4076 | ||||
4077 | // Promote. | |||
4078 | for (auto &ToBePromoted : InstsToBePromoted) | |||
4079 | promoteImpl(ToBePromoted); | |||
4080 | InstsToBePromoted.clear(); | |||
4081 | return true; | |||
4082 | } | |||
4083 | }; | |||
4084 | } // End of anonymous namespace. | |||
4085 | ||||
4086 | void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) { | |||
4087 | // At this point, we know that all the operands of ToBePromoted but Def | |||
4088 | // can be statically promoted. | |||
4089 | // For Def, we need to use its parameter in ToBePromoted: | |||
4090 | // b = ToBePromoted ty1 a | |||
4091 | // Def = Transition ty1 b to ty2 | |||
4092 | // Move the transition down. | |||
4093 | // 1. Replace all uses of the promoted operation by the transition. | |||
4094 | // = ... b => = ... Def. | |||
4095 | assert(ToBePromoted->getType() == Transition->getType() &&((ToBePromoted->getType() == Transition->getType() && "The type of the result of the transition does not match " "the final type" ) ? static_cast<void> (0) : __assert_fail ("ToBePromoted->getType() == Transition->getType() && \"The type of the result of the transition does not match \" \"the final type\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 4097, __PRETTY_FUNCTION__)) | |||
4096 | "The type of the result of the transition does not match "((ToBePromoted->getType() == Transition->getType() && "The type of the result of the transition does not match " "the final type" ) ? static_cast<void> (0) : __assert_fail ("ToBePromoted->getType() == Transition->getType() && \"The type of the result of the transition does not match \" \"the final type\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 4097, __PRETTY_FUNCTION__)) | |||
4097 | "the final type")((ToBePromoted->getType() == Transition->getType() && "The type of the result of the transition does not match " "the final type" ) ? static_cast<void> (0) : __assert_fail ("ToBePromoted->getType() == Transition->getType() && \"The type of the result of the transition does not match \" \"the final type\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 4097, __PRETTY_FUNCTION__)); | |||
4098 | ToBePromoted->replaceAllUsesWith(Transition); | |||
4099 | // 2. Update the type of the uses. | |||
4100 | // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def. | |||
4101 | Type *TransitionTy = getTransitionType(); | |||
4102 | ToBePromoted->mutateType(TransitionTy); | |||
4103 | // 3. Update all the operands of the promoted operation with promoted | |||
4104 | // operands. | |||
4105 | // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a. | |||
4106 | for (Use &U : ToBePromoted->operands()) { | |||
4107 | Value *Val = U.get(); | |||
4108 | Value *NewVal = nullptr; | |||
4109 | if (Val == Transition) | |||
4110 | NewVal = Transition->getOperand(getTransitionOriginalValueIdx()); | |||
4111 | else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) || | |||
4112 | isa<ConstantFP>(Val)) { | |||
4113 | // Use a splat constant if it is not safe to use undef. | |||
4114 | NewVal = getConstantVector( | |||
4115 | cast<Constant>(Val), | |||
4116 | isa<UndefValue>(Val) || | |||
4117 | canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())); | |||
4118 | } else | |||
4119 | llvm_unreachable("Did you modified shouldPromote and forgot to update "::llvm::llvm_unreachable_internal("Did you modified shouldPromote and forgot to update " "this?", "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 4120) | |||
4120 | "this?")::llvm::llvm_unreachable_internal("Did you modified shouldPromote and forgot to update " "this?", "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 4120); | |||
4121 | ToBePromoted->setOperand(U.getOperandNo(), NewVal); | |||
4122 | } | |||
4123 | Transition->removeFromParent(); | |||
4124 | Transition->insertAfter(ToBePromoted); | |||
4125 | Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted); | |||
4126 | } | |||
4127 | ||||
4128 | // See if we can speculate calls to intrinsic cttz/ctlz. | |||
4129 | // | |||
4130 | // Example: | |||
4131 | // entry: | |||
4132 | // ... | |||
4133 | // %cmp = icmp eq i64 %val, 0 | |||
4134 | // br i1 %cmp, label %end.bb, label %then.bb | |||
4135 | // | |||
4136 | // then.bb: | |||
4137 | // %c = tail call i64 @llvm.cttz.i64(i64 %val, i1 true) | |||
4138 | // br label %EndBB | |||
4139 | // | |||
4140 | // end.bb: | |||
4141 | // %cond = phi i64 [ %c, %then.bb ], [ 64, %entry ] | |||
4142 | // | |||
4143 | // ==> | |||
4144 | // | |||
4145 | // entry: | |||
4146 | // ... | |||
4147 | // %c = tail call i64 @llvm.cttz.i64(i64 %val, i1 false) | |||
4148 | // | |||
4149 | static bool OptimizeBranchInst(BranchInst *BrInst, const TargetLowering &TLI) { | |||
4150 | assert(BrInst->isConditional() && "Expected a conditional branch!")((BrInst->isConditional() && "Expected a conditional branch!" ) ? static_cast<void> (0) : __assert_fail ("BrInst->isConditional() && \"Expected a conditional branch!\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 4150, __PRETTY_FUNCTION__)); | |||
4151 | BasicBlock *ThenBB = BrInst->getSuccessor(1); | |||
4152 | BasicBlock *EndBB = BrInst->getSuccessor(0); | |||
4153 | ||||
4154 | // See if ThenBB contains only one instruction (excluding the | |||
4155 | // terminator and DbgInfoIntrinsic calls). | |||
4156 | IntrinsicInst *II = nullptr; | |||
4157 | CastInst *CI = nullptr; | |||
4158 | for (BasicBlock::iterator I = ThenBB->begin(), | |||
4159 | E = std::prev(ThenBB->end()); I != E; ++I) { | |||
4160 | // Skip debug info. | |||
4161 | if (isa<DbgInfoIntrinsic>(I)) | |||
4162 | continue; | |||
4163 | ||||
4164 | // Check if this is a zero extension or a truncate of a previously | |||
4165 | // matched call to intrinsic cttz/ctlz. | |||
4166 | if (II) { | |||
4167 | // Early exit if we already found a "free" zero extend/truncate. | |||
4168 | if (CI) | |||
4169 | return false; | |||
4170 | ||||
4171 | Type *SrcTy = II->getType(); | |||
4172 | Type *DestTy = I->getType(); | |||
4173 | Value *V; | |||
4174 | ||||
4175 | if (match(cast<Instruction>(I), m_ZExt(m_Value(V))) && V == II) { | |||
4176 | // Speculate this zero extend only if it is "free" for the target. | |||
4177 | if (TLI.isZExtFree(SrcTy, DestTy)) { | |||
4178 | CI = cast<CastInst>(I); | |||
4179 | continue; | |||
4180 | } | |||
4181 | } else if (match(cast<Instruction>(I), m_Trunc(m_Value(V))) && V == II) { | |||
4182 | // Speculate this truncate only if it is "free" for the target. | |||
4183 | if (TLI.isTruncateFree(SrcTy, DestTy)) { | |||
4184 | CI = cast<CastInst>(I); | |||
4185 | continue; | |||
4186 | } | |||
4187 | } else { | |||
4188 | // Avoid speculating more than one instruction. | |||
4189 | return false; | |||
4190 | } | |||
4191 | } | |||
4192 | ||||
4193 | // See if this is a call to intrinsic cttz/ctlz. | |||
4194 | if (match(cast<Instruction>(I), m_Intrinsic<Intrinsic::cttz>())) { | |||
4195 | // Avoid speculating expensive intrinsic calls. | |||
4196 | if (!TLI.isCheapToSpeculateCttz()) | |||
4197 | return false; | |||
4198 | } | |||
4199 | else if (match(cast<Instruction>(I), m_Intrinsic<Intrinsic::ctlz>())) { | |||
4200 | // Avoid speculating expensive intrinsic calls. | |||
4201 | if (!TLI.isCheapToSpeculateCtlz()) | |||
4202 | return false; | |||
4203 | } else | |||
4204 | return false; | |||
4205 | ||||
4206 | II = cast<IntrinsicInst>(I); | |||
4207 | } | |||
4208 | ||||
4209 | // Look for PHI nodes with 'II' as the incoming value from 'ThenBB'. | |||
4210 | BasicBlock *EntryBB = BrInst->getParent(); | |||
4211 | for (BasicBlock::iterator I = EndBB->begin(); | |||
4212 | PHINode *PN = dyn_cast<PHINode>(I); ++I) { | |||
4213 | Value *ThenV = PN->getIncomingValueForBlock(ThenBB); | |||
4214 | Value *OrigV = PN->getIncomingValueForBlock(EntryBB); | |||
4215 | ||||
4216 | if (!OrigV) | |||
4217 | return false; | |||
4218 | ||||
4219 | if (ThenV != II && (!CI || ThenV != CI)) | |||
4220 | return false; | |||
4221 | ||||
4222 | if (ConstantInt *CInt = dyn_cast<ConstantInt>(OrigV)) { | |||
4223 | unsigned BitWidth = II->getType()->getIntegerBitWidth(); | |||
| ||||
4224 | ||||
4225 | // Don't try to simplify this phi node if 'ThenV' is a cttz/ctlz | |||
4226 | // intrinsic call, but 'OrigV' is not equal to the 'size-of' in bits | |||
4227 | // of the value in input to the cttz/ctlz. | |||
4228 | if (CInt->getValue() != BitWidth) | |||
4229 | return false; | |||
4230 | ||||
4231 | // Hoist the call to cttz/ctlz from ThenBB into EntryBB. | |||
4232 | EntryBB->getInstList().splice(BrInst, ThenBB->getInstList(), | |||
4233 | ThenBB->begin(), std::prev(ThenBB->end())); | |||
4234 | ||||
4235 | // Update PN setting ThenV as the incoming value from both 'EntryBB' | |||
4236 | // and 'ThenBB'. Eventually, method 'OptimizeInst' will fold this | |||
4237 | // phi node if all the incoming values are the same. | |||
4238 | PN->setIncomingValue(PN->getBasicBlockIndex(EntryBB), ThenV); | |||
4239 | PN->setIncomingValue(PN->getBasicBlockIndex(ThenBB), ThenV); | |||
4240 | ||||
4241 | // Clear the 'undef on zero' flag of the cttz/ctlz intrinsic call. | |||
4242 | if (cast<ConstantInt>(II->getArgOperand(1))->isOne()) { | |||
4243 | Type *Ty = II->getArgOperand(0)->getType(); | |||
4244 | Value *Args[] = { II->getArgOperand(0), | |||
4245 | ConstantInt::getFalse(II->getContext()) }; | |||
4246 | Module *M = EntryBB->getParent()->getParent(); | |||
4247 | Value *IF = Intrinsic::getDeclaration(M, II->getIntrinsicID(), Ty); | |||
4248 | IRBuilder<> Builder(II); | |||
4249 | Instruction *NewI = Builder.CreateCall(IF, Args); | |||
4250 | ||||
4251 | // Replace the old call to cttz/ctlz. | |||
4252 | II->replaceAllUsesWith(NewI); | |||
4253 | II->eraseFromParent(); | |||
4254 | } | |||
4255 | ||||
4256 | // Update BrInst condition so that the branch to EndBB is always taken. | |||
4257 | // Later on, method 'ConstantFoldTerminator' will simplify this branch | |||
4258 | // replacing it with a direct branch to 'EndBB'. | |||
4259 | // As a side effect, CodeGenPrepare will attempt to simplify the control | |||
4260 | // flow graph by deleting basic block 'ThenBB' and merging 'EntryBB' into | |||
4261 | // 'EndBB' (calling method 'EliminateFallThrough'). | |||
4262 | BrInst->setCondition(ConstantInt::getTrue(BrInst->getContext())); | |||
4263 | return true; | |||
4264 | } | |||
4265 | } | |||
4266 | ||||
4267 | return false; | |||
4268 | } | |||
4269 | ||||
4270 | /// Some targets can do store(extractelement) with one instruction. | |||
4271 | /// Try to push the extractelement towards the stores when the target | |||
4272 | /// has this feature and this is profitable. | |||
4273 | bool CodeGenPrepare::OptimizeExtractElementInst(Instruction *Inst) { | |||
4274 | unsigned CombineCost = UINT_MAX(2147483647 *2U +1U); | |||
4275 | if (DisableStoreExtract || !TLI || | |||
4276 | (!StressStoreExtract && | |||
4277 | !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(), | |||
4278 | Inst->getOperand(1), CombineCost))) | |||
4279 | return false; | |||
4280 | ||||
4281 | // At this point we know that Inst is a vector to scalar transition. | |||
4282 | // Try to move it down the def-use chain, until: | |||
4283 | // - We can combine the transition with its single use | |||
4284 | // => we got rid of the transition. | |||
4285 | // - We escape the current basic block | |||
4286 | // => we would need to check that we are moving it at a cheaper place and | |||
4287 | // we do not do that for now. | |||
4288 | BasicBlock *Parent = Inst->getParent(); | |||
4289 | DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Found an interesting transition: " << *Inst << '\n'; } } while (0); | |||
4290 | VectorPromoteHelper VPH(*TLI, *TTI, Inst, CombineCost); | |||
4291 | // If the transition has more than one use, assume this is not going to be | |||
4292 | // beneficial. | |||
4293 | while (Inst->hasOneUse()) { | |||
4294 | Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin()); | |||
4295 | DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Use: " << *ToBePromoted << '\n'; } } while (0); | |||
4296 | ||||
4297 | if (ToBePromoted->getParent() != Parent) { | |||
4298 | DEBUG(dbgs() << "Instruction to promote is in a different block ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Instruction to promote is in a different block (" << ToBePromoted->getParent()->getName() << ") than the transition (" << Parent->getName() << ").\n"; } } while (0) | |||
4299 | << ToBePromoted->getParent()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Instruction to promote is in a different block (" << ToBePromoted->getParent()->getName() << ") than the transition (" << Parent->getName() << ").\n"; } } while (0) | |||
4300 | << ") than the transition (" << Parent->getName() << ").\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Instruction to promote is in a different block (" << ToBePromoted->getParent()->getName() << ") than the transition (" << Parent->getName() << ").\n"; } } while (0); | |||
4301 | return false; | |||
4302 | } | |||
4303 | ||||
4304 | if (VPH.canCombine(ToBePromoted)) { | |||
4305 | DEBUG(dbgs() << "Assume " << *Inst << '\n'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Assume " << *Inst << '\n' << "will be combined with: " << *ToBePromoted << '\n'; } } while (0) | |||
4306 | << "will be combined with: " << *ToBePromoted << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Assume " << *Inst << '\n' << "will be combined with: " << *ToBePromoted << '\n'; } } while (0); | |||
4307 | VPH.recordCombineInstruction(ToBePromoted); | |||
4308 | bool Changed = VPH.promote(); | |||
4309 | NumStoreExtractExposed += Changed; | |||
4310 | return Changed; | |||
4311 | } | |||
4312 | ||||
4313 | DEBUG(dbgs() << "Try promoting.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Try promoting.\n"; } } while (0); | |||
4314 | if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted)) | |||
4315 | return false; | |||
4316 | ||||
4317 | DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Promoting is possible... Enqueue for promotion!\n" ; } } while (0); | |||
4318 | ||||
4319 | VPH.enqueueForPromotion(ToBePromoted); | |||
4320 | Inst = ToBePromoted; | |||
4321 | } | |||
4322 | return false; | |||
4323 | } | |||
4324 | ||||
4325 | bool CodeGenPrepare::OptimizeInst(Instruction *I, bool& ModifiedDT) { | |||
4326 | if (PHINode *P = dyn_cast<PHINode>(I)) { | |||
| ||||
4327 | // It is possible for very late stage optimizations (such as SimplifyCFG) | |||
4328 | // to introduce PHI nodes too late to be cleaned up. If we detect such a | |||
4329 | // trivial PHI, go ahead and zap it here. | |||
4330 | if (Value *V = SimplifyInstruction(P, TLI ? TLI->getDataLayout() : nullptr, | |||
4331 | TLInfo, DT)) { | |||
4332 | P->replaceAllUsesWith(V); | |||
4333 | P->eraseFromParent(); | |||
4334 | ++NumPHIsElim; | |||
4335 | return true; | |||
4336 | } | |||
4337 | return false; | |||
4338 | } | |||
4339 | ||||
4340 | if (CastInst *CI = dyn_cast<CastInst>(I)) { | |||
4341 | // If the source of the cast is a constant, then this should have | |||
4342 | // already been constant folded. The only reason NOT to constant fold | |||
4343 | // it is if something (e.g. LSR) was careful to place the constant | |||
4344 | // evaluation in a block other than then one that uses it (e.g. to hoist | |||
4345 | // the address of globals out of a loop). If this is the case, we don't | |||
4346 | // want to forward-subst the cast. | |||
4347 | if (isa<Constant>(CI->getOperand(0))) | |||
4348 | return false; | |||
4349 | ||||
4350 | if (TLI && OptimizeNoopCopyExpression(CI, *TLI)) | |||
4351 | return true; | |||
4352 | ||||
4353 | if (isa<ZExtInst>(I) || isa<SExtInst>(I)) { | |||
4354 | /// Sink a zext or sext into its user blocks if the target type doesn't | |||
4355 | /// fit in one register | |||
4356 | if (TLI && TLI->getTypeAction(CI->getContext(), | |||
4357 | TLI->getValueType(CI->getType())) == | |||
4358 | TargetLowering::TypeExpandInteger) { | |||
4359 | return SinkCast(CI); | |||
4360 | } else { | |||
4361 | bool MadeChange = MoveExtToFormExtLoad(I); | |||
4362 | return MadeChange | OptimizeExtUses(I); | |||
4363 | } | |||
4364 | } | |||
4365 | return false; | |||
4366 | } | |||
4367 | ||||
4368 | if (CmpInst *CI = dyn_cast<CmpInst>(I)) | |||
4369 | if (!TLI || !TLI->hasMultipleConditionRegisters()) | |||
4370 | return OptimizeCmpExpression(CI); | |||
4371 | ||||
4372 | if (LoadInst *LI = dyn_cast<LoadInst>(I)) { | |||
4373 | if (TLI) | |||
4374 | return OptimizeMemoryInst(I, I->getOperand(0), LI->getType()); | |||
4375 | return false; | |||
4376 | } | |||
4377 | ||||
4378 | if (StoreInst *SI = dyn_cast<StoreInst>(I)) { | |||
4379 | if (TLI) | |||
4380 | return OptimizeMemoryInst(I, SI->getOperand(1), | |||
4381 | SI->getOperand(0)->getType()); | |||
4382 | return false; | |||
4383 | } | |||
4384 | ||||
4385 | BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I); | |||
4386 | ||||
4387 | if (BinOp && (BinOp->getOpcode() == Instruction::AShr || | |||
4388 | BinOp->getOpcode() == Instruction::LShr)) { | |||
4389 | ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1)); | |||
4390 | if (TLI && CI && TLI->hasExtractBitsInsn()) | |||
4391 | return OptimizeExtractBits(BinOp, CI, *TLI); | |||
4392 | ||||
4393 | return false; | |||
4394 | } | |||
4395 | ||||
4396 | if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { | |||
4397 | if (GEPI->hasAllZeroIndices()) { | |||
4398 | /// The GEP operand must be a pointer, so must its result -> BitCast | |||
4399 | Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), | |||
4400 | GEPI->getName(), GEPI); | |||
4401 | GEPI->replaceAllUsesWith(NC); | |||
4402 | GEPI->eraseFromParent(); | |||
4403 | ++NumGEPsElim; | |||
4404 | OptimizeInst(NC, ModifiedDT); | |||
4405 | return true; | |||
4406 | } | |||
4407 | return false; | |||
4408 | } | |||
4409 | ||||
4410 | if (CallInst *CI = dyn_cast<CallInst>(I)) | |||
4411 | return OptimizeCallInst(CI, ModifiedDT); | |||
4412 | ||||
4413 | if (SelectInst *SI = dyn_cast<SelectInst>(I)) | |||
4414 | return OptimizeSelectInst(SI); | |||
4415 | ||||
4416 | if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I)) | |||
4417 | return OptimizeShuffleVectorInst(SVI); | |||
4418 | ||||
4419 | if (isa<ExtractElementInst>(I)) | |||
4420 | return OptimizeExtractElementInst(I); | |||
4421 | ||||
4422 | if (BranchInst *BI = dyn_cast<BranchInst>(I)) { | |||
4423 | if (TLI && BI->isConditional() && BI->getCondition()->hasOneUse()) { | |||
4424 | // Check if the branch condition compares a value agaist zero. | |||
4425 | if (ICmpInst *ICI = dyn_cast<ICmpInst>(BI->getCondition())) { | |||
4426 | if (ICI->getPredicate() == ICmpInst::ICMP_EQ && | |||
4427 | match(ICI->getOperand(1), m_Zero())) { | |||
4428 | BasicBlock *ThenBB = BI->getSuccessor(1); | |||
4429 | BasicBlock *EndBB = BI->getSuccessor(0); | |||
4430 | ||||
4431 | // Check if ThenBB is only reachable from this basic block; also, | |||
4432 | // check if EndBB has more than one predecessor. | |||
4433 | if (ThenBB->getSinglePredecessor() && | |||
4434 | !EndBB->getSinglePredecessor()) { | |||
4435 | TerminatorInst *TI = ThenBB->getTerminator(); | |||
4436 | ||||
4437 | if (TI->getNumSuccessors() == 1 && TI->getSuccessor(0) == EndBB && | |||
4438 | // Try to speculate calls to intrinsic cttz/ctlz from 'ThenBB'. | |||
4439 | OptimizeBranchInst(BI, *TLI)) { | |||
4440 | ModifiedDT = true; | |||
4441 | return true; | |||
4442 | } | |||
4443 | } | |||
4444 | } | |||
4445 | } | |||
4446 | } | |||
4447 | return false; | |||
4448 | } | |||
4449 | ||||
4450 | return false; | |||
4451 | } | |||
4452 | ||||
4453 | // In this pass we look for GEP and cast instructions that are used | |||
4454 | // across basic blocks and rewrite them to improve basic-block-at-a-time | |||
4455 | // selection. | |||
4456 | bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB, bool& ModifiedDT) { | |||
4457 | SunkAddrs.clear(); | |||
4458 | bool MadeChange = false; | |||
4459 | ||||
4460 | CurInstIterator = BB.begin(); | |||
4461 | while (CurInstIterator != BB.end()) { | |||
4462 | MadeChange |= OptimizeInst(CurInstIterator++, ModifiedDT); | |||
4463 | if (ModifiedDT) | |||
4464 | return true; | |||
4465 | } | |||
4466 | MadeChange |= DupRetToEnableTailCallOpts(&BB); | |||
4467 | ||||
4468 | return MadeChange; | |||
4469 | } | |||
4470 | ||||
4471 | // llvm.dbg.value is far away from the value then iSel may not be able | |||
4472 | // handle it properly. iSel will drop llvm.dbg.value if it can not | |||
4473 | // find a node corresponding to the value. | |||
4474 | bool CodeGenPrepare::PlaceDbgValues(Function &F) { | |||
4475 | bool MadeChange = false; | |||
4476 | for (BasicBlock &BB : F) { | |||
4477 | Instruction *PrevNonDbgInst = nullptr; | |||
4478 | for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { | |||
4479 | Instruction *Insn = BI++; | |||
4480 | DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn); | |||
4481 | // Leave dbg.values that refer to an alloca alone. These | |||
4482 | // instrinsics describe the address of a variable (= the alloca) | |||
4483 | // being taken. They should not be moved next to the alloca | |||
4484 | // (and to the beginning of the scope), but rather stay close to | |||
4485 | // where said address is used. | |||
4486 | if (!DVI || (DVI->getValue() && isa<AllocaInst>(DVI->getValue()))) { | |||
4487 | PrevNonDbgInst = Insn; | |||
4488 | continue; | |||
4489 | } | |||
4490 | ||||
4491 | Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue()); | |||
4492 | if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) { | |||
4493 | DEBUG(dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI; } } while (0); | |||
4494 | DVI->removeFromParent(); | |||
4495 | if (isa<PHINode>(VI)) | |||
4496 | DVI->insertBefore(VI->getParent()->getFirstInsertionPt()); | |||
4497 | else | |||
4498 | DVI->insertAfter(VI); | |||
4499 | MadeChange = true; | |||
4500 | ++NumDbgValueMoved; | |||
4501 | } | |||
4502 | } | |||
4503 | } | |||
4504 | return MadeChange; | |||
4505 | } | |||
4506 | ||||
4507 | // If there is a sequence that branches based on comparing a single bit | |||
4508 | // against zero that can be combined into a single instruction, and the | |||
4509 | // target supports folding these into a single instruction, sink the | |||
4510 | // mask and compare into the branch uses. Do this before OptimizeBlock -> | |||
4511 | // OptimizeInst -> OptimizeCmpExpression, which perturbs the pattern being | |||
4512 | // searched for. | |||
4513 | bool CodeGenPrepare::sinkAndCmp(Function &F) { | |||
4514 | if (!EnableAndCmpSinking) | |||
4515 | return false; | |||
4516 | if (!TLI || !TLI->isMaskAndBranchFoldingLegal()) | |||
4517 | return false; | |||
4518 | bool MadeChange = false; | |||
4519 | for (Function::iterator I = F.begin(), E = F.end(); I != E; ) { | |||
4520 | BasicBlock *BB = I++; | |||
4521 | ||||
4522 | // Does this BB end with the following? | |||
4523 | // %andVal = and %val, #single-bit-set | |||
4524 | // %icmpVal = icmp %andResult, 0 | |||
4525 | // br i1 %cmpVal label %dest1, label %dest2" | |||
4526 | BranchInst *Brcc = dyn_cast<BranchInst>(BB->getTerminator()); | |||
4527 | if (!Brcc || !Brcc->isConditional()) | |||
4528 | continue; | |||
4529 | ICmpInst *Cmp = dyn_cast<ICmpInst>(Brcc->getOperand(0)); | |||
4530 | if (!Cmp || Cmp->getParent() != BB) | |||
4531 | continue; | |||
4532 | ConstantInt *Zero = dyn_cast<ConstantInt>(Cmp->getOperand(1)); | |||
4533 | if (!Zero || !Zero->isZero()) | |||
4534 | continue; | |||
4535 | Instruction *And = dyn_cast<Instruction>(Cmp->getOperand(0)); | |||
4536 | if (!And || And->getOpcode() != Instruction::And || And->getParent() != BB) | |||
4537 | continue; | |||
4538 | ConstantInt* Mask = dyn_cast<ConstantInt>(And->getOperand(1)); | |||
4539 | if (!Mask || !Mask->getUniqueInteger().isPowerOf2()) | |||
4540 | continue; | |||
4541 | DEBUG(dbgs() << "found and; icmp ?,0; brcc\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "found and; icmp ?,0; brcc\n" ; } } while (0); DEBUG(BB->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { BB->dump(); } } while (0); | |||
4542 | ||||
4543 | // Push the "and; icmp" for any users that are conditional branches. | |||
4544 | // Since there can only be one branch use per BB, we don't need to keep | |||
4545 | // track of which BBs we insert into. | |||
4546 | for (Value::use_iterator UI = Cmp->use_begin(), E = Cmp->use_end(); | |||
4547 | UI != E; ) { | |||
4548 | Use &TheUse = *UI; | |||
4549 | // Find brcc use. | |||
4550 | BranchInst *BrccUser = dyn_cast<BranchInst>(*UI); | |||
4551 | ++UI; | |||
4552 | if (!BrccUser || !BrccUser->isConditional()) | |||
4553 | continue; | |||
4554 | BasicBlock *UserBB = BrccUser->getParent(); | |||
4555 | if (UserBB == BB) continue; | |||
4556 | DEBUG(dbgs() << "found Brcc use\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "found Brcc use\n"; } } while (0); | |||
4557 | ||||
4558 | // Sink the "and; icmp" to use. | |||
4559 | MadeChange = true; | |||
4560 | BinaryOperator *NewAnd = | |||
4561 | BinaryOperator::CreateAnd(And->getOperand(0), And->getOperand(1), "", | |||
4562 | BrccUser); | |||
4563 | CmpInst *NewCmp = | |||
4564 | CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(), NewAnd, Zero, | |||
4565 | "", BrccUser); | |||
4566 | TheUse = NewCmp; | |||
4567 | ++NumAndCmpsMoved; | |||
4568 | DEBUG(BrccUser->getParent()->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { BrccUser->getParent()->dump(); } } while (0); | |||
4569 | } | |||
4570 | } | |||
4571 | return MadeChange; | |||
4572 | } | |||
4573 | ||||
4574 | /// \brief Retrieve the probabilities of a conditional branch. Returns true on | |||
4575 | /// success, or returns false if no or invalid metadata was found. | |||
4576 | static bool extractBranchMetadata(BranchInst *BI, | |||
4577 | uint64_t &ProbTrue, uint64_t &ProbFalse) { | |||
4578 | assert(BI->isConditional() &&((BI->isConditional() && "Looking for probabilities on unconditional branch?" ) ? static_cast<void> (0) : __assert_fail ("BI->isConditional() && \"Looking for probabilities on unconditional branch?\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 4579, __PRETTY_FUNCTION__)) | |||
4579 | "Looking for probabilities on unconditional branch?")((BI->isConditional() && "Looking for probabilities on unconditional branch?" ) ? static_cast<void> (0) : __assert_fail ("BI->isConditional() && \"Looking for probabilities on unconditional branch?\"" , "/tmp/buildd/llvm-toolchain-snapshot-3.7~svn228917/lib/CodeGen/CodeGenPrepare.cpp" , 4579, __PRETTY_FUNCTION__)); | |||
4580 | auto *ProfileData = BI->getMetadata(LLVMContext::MD_prof); | |||
4581 | if (!ProfileData || ProfileData->getNumOperands() != 3) | |||
4582 | return false; | |||
4583 | ||||
4584 | const auto *CITrue = | |||
4585 | mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(1)); | |||
4586 | const auto *CIFalse = | |||
4587 | mdconst::dyn_extract<ConstantInt>(ProfileData->getOperand(2)); | |||
4588 | if (!CITrue || !CIFalse) | |||
4589 | return false; | |||
4590 | ||||
4591 | ProbTrue = CITrue->getValue().getZExtValue(); | |||
4592 | ProbFalse = CIFalse->getValue().getZExtValue(); | |||
4593 | ||||
4594 | return true; | |||
4595 | } | |||
4596 | ||||
4597 | /// \brief Scale down both weights to fit into uint32_t. | |||
4598 | static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) { | |||
4599 | uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse; | |||
4600 | uint32_t Scale = (NewMax / UINT32_MAX(4294967295U)) + 1; | |||
4601 | NewTrue = NewTrue / Scale; | |||
4602 | NewFalse = NewFalse / Scale; | |||
4603 | } | |||
4604 | ||||
4605 | /// \brief Some targets prefer to split a conditional branch like: | |||
4606 | /// \code | |||
4607 | /// %0 = icmp ne i32 %a, 0 | |||
4608 | /// %1 = icmp ne i32 %b, 0 | |||
4609 | /// %or.cond = or i1 %0, %1 | |||
4610 | /// br i1 %or.cond, label %TrueBB, label %FalseBB | |||
4611 | /// \endcode | |||
4612 | /// into multiple branch instructions like: | |||
4613 | /// \code | |||
4614 | /// bb1: | |||
4615 | /// %0 = icmp ne i32 %a, 0 | |||
4616 | /// br i1 %0, label %TrueBB, label %bb2 | |||
4617 | /// bb2: | |||
4618 | /// %1 = icmp ne i32 %b, 0 | |||
4619 | /// br i1 %1, label %TrueBB, label %FalseBB | |||
4620 | /// \endcode | |||
4621 | /// This usually allows instruction selection to do even further optimizations | |||
4622 | /// and combine the compare with the branch instruction. Currently this is | |||
4623 | /// applied for targets which have "cheap" jump instructions. | |||
4624 | /// | |||
4625 | /// FIXME: Remove the (equivalent?) implementation in SelectionDAG. | |||
4626 | /// | |||
4627 | bool CodeGenPrepare::splitBranchCondition(Function &F) { | |||
4628 | if (!TM || TM->Options.EnableFastISel != true || | |||
4629 | !TLI || TLI->isJumpExpensive()) | |||
4630 | return false; | |||
4631 | ||||
4632 | bool MadeChange = false; | |||
4633 | for (auto &BB : F) { | |||
4634 | // Does this BB end with the following? | |||
4635 | // %cond1 = icmp|fcmp|binary instruction ... | |||
4636 | // %cond2 = icmp|fcmp|binary instruction ... | |||
4637 | // %cond.or = or|and i1 %cond1, cond2 | |||
4638 | // br i1 %cond.or label %dest1, label %dest2" | |||
4639 | BinaryOperator *LogicOp; | |||
4640 | BasicBlock *TBB, *FBB; | |||
4641 | if (!match(BB.getTerminator(), m_Br(m_OneUse(m_BinOp(LogicOp)), TBB, FBB))) | |||
4642 | continue; | |||
4643 | ||||
4644 | unsigned Opc; | |||
4645 | Value *Cond1, *Cond2; | |||
4646 | if (match(LogicOp, m_And(m_OneUse(m_Value(Cond1)), | |||
4647 | m_OneUse(m_Value(Cond2))))) | |||
4648 | Opc = Instruction::And; | |||
4649 | else if (match(LogicOp, m_Or(m_OneUse(m_Value(Cond1)), | |||
4650 | m_OneUse(m_Value(Cond2))))) | |||
4651 | Opc = Instruction::Or; | |||
4652 | else | |||
4653 | continue; | |||
4654 | ||||
4655 | if (!match(Cond1, m_CombineOr(m_Cmp(), m_BinOp())) || | |||
4656 | !match(Cond2, m_CombineOr(m_Cmp(), m_BinOp())) ) | |||
4657 | continue; | |||
4658 | ||||
4659 | DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Before branch condition splitting\n" ; BB.dump(); } } while (0); | |||
4660 | ||||
4661 | // Create a new BB. | |||
4662 | auto *InsertBefore = std::next(Function::iterator(BB)) | |||
4663 | .getNodePtrUnchecked(); | |||
4664 | auto TmpBB = BasicBlock::Create(BB.getContext(), | |||
4665 | BB.getName() + ".cond.split", | |||
4666 | BB.getParent(), InsertBefore); | |||
4667 | ||||
4668 | // Update original basic block by using the first condition directly by the | |||
4669 | // branch instruction and removing the no longer needed and/or instruction. | |||
4670 | auto *Br1 = cast<BranchInst>(BB.getTerminator()); | |||
4671 | Br1->setCondition(Cond1); | |||
4672 | LogicOp->eraseFromParent(); | |||
4673 | ||||
4674 | // Depending on the conditon we have to either replace the true or the false | |||
4675 | // successor of the original branch instruction. | |||
4676 | if (Opc == Instruction::And) | |||
4677 | Br1->setSuccessor(0, TmpBB); | |||
4678 | else | |||
4679 | Br1->setSuccessor(1, TmpBB); | |||
4680 | ||||
4681 | // Fill in the new basic block. | |||
4682 | auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB); | |||
4683 | if (auto *I = dyn_cast<Instruction>(Cond2)) { | |||
4684 | I->removeFromParent(); | |||
4685 | I->insertBefore(Br2); | |||
4686 | } | |||
4687 | ||||
4688 | // Update PHI nodes in both successors. The original BB needs to be | |||
4689 | // replaced in one succesor's PHI nodes, because the branch comes now from | |||
4690 | // the newly generated BB (NewBB). In the other successor we need to add one | |||
4691 | // incoming edge to the PHI nodes, because both branch instructions target | |||
4692 | // now the same successor. Depending on the original branch condition | |||
4693 | // (and/or) we have to swap the successors (TrueDest, FalseDest), so that | |||
4694 | // we perfrom the correct update for the PHI nodes. | |||
4695 | // This doesn't change the successor order of the just created branch | |||
4696 | // instruction (or any other instruction). | |||
4697 | if (Opc == Instruction::Or) | |||
4698 | std::swap(TBB, FBB); | |||
4699 | ||||
4700 | // Replace the old BB with the new BB. | |||
4701 | for (auto &I : *TBB) { | |||
4702 | PHINode *PN = dyn_cast<PHINode>(&I); | |||
4703 | if (!PN) | |||
4704 | break; | |||
4705 | int i; | |||
4706 | while ((i = PN->getBasicBlockIndex(&BB)) >= 0) | |||
4707 | PN->setIncomingBlock(i, TmpBB); | |||
4708 | } | |||
4709 | ||||
4710 | // Add another incoming edge form the new BB. | |||
4711 | for (auto &I : *FBB) { | |||
4712 | PHINode *PN = dyn_cast<PHINode>(&I); | |||
4713 | if (!PN) | |||
4714 | break; | |||
4715 | auto *Val = PN->getIncomingValueForBlock(&BB); | |||
4716 | PN->addIncoming(Val, TmpBB); | |||
4717 | } | |||
4718 | ||||
4719 | // Update the branch weights (from SelectionDAGBuilder:: | |||
4720 | // FindMergedConditions). | |||
4721 | if (Opc == Instruction::Or) { | |||
4722 | // Codegen X | Y as: | |||
4723 | // BB1: | |||
4724 | // jmp_if_X TBB | |||
4725 | // jmp TmpBB | |||
4726 | // TmpBB: | |||
4727 | // jmp_if_Y TBB | |||
4728 | // jmp FBB | |||
4729 | // | |||
4730 | ||||
4731 | // We have flexibility in setting Prob for BB1 and Prob for NewBB. | |||
4732 | // The requirement is that | |||
4733 | // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) | |||
4734 | // = TrueProb for orignal BB. | |||
4735 | // Assuming the orignal weights are A and B, one choice is to set BB1's | |||
4736 | // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice | |||
4737 | // assumes that | |||
4738 | // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. | |||
4739 | // Another choice is to assume TrueProb for BB1 equals to TrueProb for | |||
4740 | // TmpBB, but the math is more complicated. | |||
4741 | uint64_t TrueWeight, FalseWeight; | |||
4742 | if (extractBranchMetadata(Br1, TrueWeight, FalseWeight)) { | |||
4743 | uint64_t NewTrueWeight = TrueWeight; | |||
4744 | uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight; | |||
4745 | scaleWeights(NewTrueWeight, NewFalseWeight); | |||
4746 | Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) | |||
4747 | .createBranchWeights(TrueWeight, FalseWeight)); | |||
4748 | ||||
4749 | NewTrueWeight = TrueWeight; | |||
4750 | NewFalseWeight = 2 * FalseWeight; | |||
4751 | scaleWeights(NewTrueWeight, NewFalseWeight); | |||
4752 | Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) | |||
4753 | .createBranchWeights(TrueWeight, FalseWeight)); | |||
4754 | } | |||
4755 | } else { | |||
4756 | // Codegen X & Y as: | |||
4757 | // BB1: | |||
4758 | // jmp_if_X TmpBB | |||
4759 | // jmp FBB | |||
4760 | // TmpBB: | |||
4761 | // jmp_if_Y TBB | |||
4762 | // jmp FBB | |||
4763 | // | |||
4764 | // This requires creation of TmpBB after CurBB. | |||
4765 | ||||
4766 | // We have flexibility in setting Prob for BB1 and Prob for TmpBB. | |||
4767 | // The requirement is that | |||
4768 | // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) | |||
4769 | // = FalseProb for orignal BB. | |||
4770 | // Assuming the orignal weights are A and B, one choice is to set BB1's | |||
4771 | // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice | |||
4772 | // assumes that | |||
4773 | // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB. | |||
4774 | uint64_t TrueWeight, FalseWeight; | |||
4775 | if (extractBranchMetadata(Br1, TrueWeight, FalseWeight)) { | |||
4776 | uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight; | |||
4777 | uint64_t NewFalseWeight = FalseWeight; | |||
4778 | scaleWeights(NewTrueWeight, NewFalseWeight); | |||
4779 | Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) | |||
4780 | .createBranchWeights(TrueWeight, FalseWeight)); | |||
4781 | ||||
4782 | NewTrueWeight = 2 * TrueWeight; | |||
4783 | NewFalseWeight = FalseWeight; | |||
4784 | scaleWeights(NewTrueWeight, NewFalseWeight); | |||
4785 | Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) | |||
4786 | .createBranchWeights(TrueWeight, FalseWeight)); | |||
4787 | } | |||
4788 | } | |||
4789 | ||||
4790 | // Request DOM Tree update. | |||
4791 | // Note: No point in getting fancy here, since the DT info is never | |||
4792 | // available to CodeGenPrepare and the existing update code is broken | |||
4793 | // anyways. | |||
4794 | ModifiedDT = true; | |||
4795 | ||||
4796 | MadeChange = true; | |||
4797 | ||||
4798 | DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "After branch condition splitting\n" ; BB.dump(); TmpBB->dump(); } } while (0) | |||
4799 | TmpBB->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "After branch condition splitting\n" ; BB.dump(); TmpBB->dump(); } } while (0); | |||
4800 | } | |||
4801 | return MadeChange; | |||
4802 | } |