File: | lib/CodeGen/CodeGenPrepare.cpp |
Warning: | line 1961, column 60 Called C++ object pointer is uninitialized |
1 | //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===// | |||
2 | // | |||
3 | // The LLVM Compiler Infrastructure | |||
4 | // | |||
5 | // This file is distributed under the University of Illinois Open Source | |||
6 | // License. See LICENSE.TXT for details. | |||
7 | // | |||
8 | //===----------------------------------------------------------------------===// | |||
9 | // | |||
10 | // This pass munges the code in the input function to better prepare it for | |||
11 | // SelectionDAG-based code generation. This works around limitations in it's | |||
12 | // basic-block-at-a-time approach. It should eventually be removed. | |||
13 | // | |||
14 | //===----------------------------------------------------------------------===// | |||
15 | ||||
16 | #include "llvm/ADT/APInt.h" | |||
17 | #include "llvm/ADT/ArrayRef.h" | |||
18 | #include "llvm/ADT/DenseMap.h" | |||
19 | #include "llvm/ADT/PointerIntPair.h" | |||
20 | #include "llvm/ADT/STLExtras.h" | |||
21 | #include "llvm/ADT/SetVector.h" | |||
22 | #include "llvm/ADT/SmallPtrSet.h" | |||
23 | #include "llvm/ADT/SmallVector.h" | |||
24 | #include "llvm/ADT/Statistic.h" | |||
25 | #include "llvm/Analysis/BlockFrequencyInfo.h" | |||
26 | #include "llvm/Analysis/BranchProbabilityInfo.h" | |||
27 | #include "llvm/Analysis/ConstantFolding.h" | |||
28 | #include "llvm/Analysis/InstructionSimplify.h" | |||
29 | #include "llvm/Analysis/LoopInfo.h" | |||
30 | #include "llvm/Analysis/MemoryBuiltins.h" | |||
31 | #include "llvm/Analysis/ProfileSummaryInfo.h" | |||
32 | #include "llvm/Analysis/TargetLibraryInfo.h" | |||
33 | #include "llvm/Analysis/TargetTransformInfo.h" | |||
34 | #include "llvm/Analysis/ValueTracking.h" | |||
35 | #include "llvm/CodeGen/Analysis.h" | |||
36 | #include "llvm/CodeGen/ISDOpcodes.h" | |||
37 | #include "llvm/CodeGen/MachineValueType.h" | |||
38 | #include "llvm/CodeGen/SelectionDAGNodes.h" | |||
39 | #include "llvm/CodeGen/TargetPassConfig.h" | |||
40 | #include "llvm/CodeGen/ValueTypes.h" | |||
41 | #include "llvm/IR/Argument.h" | |||
42 | #include "llvm/IR/Attributes.h" | |||
43 | #include "llvm/IR/BasicBlock.h" | |||
44 | #include "llvm/IR/CallSite.h" | |||
45 | #include "llvm/IR/Constant.h" | |||
46 | #include "llvm/IR/Constants.h" | |||
47 | #include "llvm/IR/DataLayout.h" | |||
48 | #include "llvm/IR/DerivedTypes.h" | |||
49 | #include "llvm/IR/Dominators.h" | |||
50 | #include "llvm/IR/Function.h" | |||
51 | #include "llvm/IR/GetElementPtrTypeIterator.h" | |||
52 | #include "llvm/IR/GlobalValue.h" | |||
53 | #include "llvm/IR/GlobalVariable.h" | |||
54 | #include "llvm/IR/IRBuilder.h" | |||
55 | #include "llvm/IR/InlineAsm.h" | |||
56 | #include "llvm/IR/InstrTypes.h" | |||
57 | #include "llvm/IR/Instruction.h" | |||
58 | #include "llvm/IR/Instructions.h" | |||
59 | #include "llvm/IR/IntrinsicInst.h" | |||
60 | #include "llvm/IR/Intrinsics.h" | |||
61 | #include "llvm/IR/LLVMContext.h" | |||
62 | #include "llvm/IR/MDBuilder.h" | |||
63 | #include "llvm/IR/Module.h" | |||
64 | #include "llvm/IR/Operator.h" | |||
65 | #include "llvm/IR/PatternMatch.h" | |||
66 | #include "llvm/IR/Statepoint.h" | |||
67 | #include "llvm/IR/Type.h" | |||
68 | #include "llvm/IR/Use.h" | |||
69 | #include "llvm/IR/User.h" | |||
70 | #include "llvm/IR/Value.h" | |||
71 | #include "llvm/IR/ValueHandle.h" | |||
72 | #include "llvm/IR/ValueMap.h" | |||
73 | #include "llvm/Pass.h" | |||
74 | #include "llvm/Support/BlockFrequency.h" | |||
75 | #include "llvm/Support/BranchProbability.h" | |||
76 | #include "llvm/Support/Casting.h" | |||
77 | #include "llvm/Support/CommandLine.h" | |||
78 | #include "llvm/Support/Compiler.h" | |||
79 | #include "llvm/Support/Debug.h" | |||
80 | #include "llvm/Support/ErrorHandling.h" | |||
81 | #include "llvm/Support/MathExtras.h" | |||
82 | #include "llvm/Support/raw_ostream.h" | |||
83 | #include "llvm/Target/TargetLowering.h" | |||
84 | #include "llvm/Target/TargetMachine.h" | |||
85 | #include "llvm/Target/TargetOptions.h" | |||
86 | #include "llvm/Target/TargetSubtargetInfo.h" | |||
87 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" | |||
88 | #include "llvm/Transforms/Utils/BypassSlowDivision.h" | |||
89 | #include "llvm/Transforms/Utils/Cloning.h" | |||
90 | #include "llvm/Transforms/Utils/Local.h" | |||
91 | #include "llvm/Transforms/Utils/SimplifyLibCalls.h" | |||
92 | #include "llvm/Transforms/Utils/ValueMapper.h" | |||
93 | #include <algorithm> | |||
94 | #include <cassert> | |||
95 | #include <cstdint> | |||
96 | #include <iterator> | |||
97 | #include <limits> | |||
98 | #include <memory> | |||
99 | #include <utility> | |||
100 | #include <vector> | |||
101 | ||||
102 | using namespace llvm; | |||
103 | using namespace llvm::PatternMatch; | |||
104 | ||||
105 | #define DEBUG_TYPE"codegenprepare" "codegenprepare" | |||
106 | ||||
107 | STATISTIC(NumBlocksElim, "Number of blocks eliminated")static llvm::Statistic NumBlocksElim = {"codegenprepare", "NumBlocksElim" , "Number of blocks eliminated", {0}, false}; | |||
108 | STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated")static llvm::Statistic NumPHIsElim = {"codegenprepare", "NumPHIsElim" , "Number of trivial PHIs eliminated", {0}, false}; | |||
109 | STATISTIC(NumGEPsElim, "Number of GEPs converted to casts")static llvm::Statistic NumGEPsElim = {"codegenprepare", "NumGEPsElim" , "Number of GEPs converted to casts", {0}, false}; | |||
110 | STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "static llvm::Statistic NumCmpUses = {"codegenprepare", "NumCmpUses" , "Number of uses of Cmp expressions replaced with uses of " "sunken Cmps" , {0}, false} | |||
111 | "sunken Cmps")static llvm::Statistic NumCmpUses = {"codegenprepare", "NumCmpUses" , "Number of uses of Cmp expressions replaced with uses of " "sunken Cmps" , {0}, false}; | |||
112 | STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "static llvm::Statistic NumCastUses = {"codegenprepare", "NumCastUses" , "Number of uses of Cast expressions replaced with uses " "of sunken Casts" , {0}, false} | |||
113 | "of sunken Casts")static llvm::Statistic NumCastUses = {"codegenprepare", "NumCastUses" , "Number of uses of Cast expressions replaced with uses " "of sunken Casts" , {0}, false}; | |||
114 | STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "static llvm::Statistic NumMemoryInsts = {"codegenprepare", "NumMemoryInsts" , "Number of memory instructions whose address " "computations were sunk" , {0}, false} | |||
115 | "computations were sunk")static llvm::Statistic NumMemoryInsts = {"codegenprepare", "NumMemoryInsts" , "Number of memory instructions whose address " "computations were sunk" , {0}, false}; | |||
116 | STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads")static llvm::Statistic NumExtsMoved = {"codegenprepare", "NumExtsMoved" , "Number of [s|z]ext instructions combined with loads", {0}, false}; | |||
117 | STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized")static llvm::Statistic NumExtUses = {"codegenprepare", "NumExtUses" , "Number of uses of [s|z]ext instructions optimized", {0}, false }; | |||
118 | STATISTIC(NumAndsAdded,static llvm::Statistic NumAndsAdded = {"codegenprepare", "NumAndsAdded" , "Number of and mask instructions added to form ext loads", { 0}, false} | |||
119 | "Number of and mask instructions added to form ext loads")static llvm::Statistic NumAndsAdded = {"codegenprepare", "NumAndsAdded" , "Number of and mask instructions added to form ext loads", { 0}, false}; | |||
120 | STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized")static llvm::Statistic NumAndUses = {"codegenprepare", "NumAndUses" , "Number of uses of and mask instructions optimized", {0}, false }; | |||
121 | STATISTIC(NumRetsDup, "Number of return instructions duplicated")static llvm::Statistic NumRetsDup = {"codegenprepare", "NumRetsDup" , "Number of return instructions duplicated", {0}, false}; | |||
122 | STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved")static llvm::Statistic NumDbgValueMoved = {"codegenprepare", "NumDbgValueMoved" , "Number of debug value instructions moved", {0}, false}; | |||
123 | STATISTIC(NumSelectsExpanded, "Number of selects turned into branches")static llvm::Statistic NumSelectsExpanded = {"codegenprepare" , "NumSelectsExpanded", "Number of selects turned into branches" , {0}, false}; | |||
124 | STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed")static llvm::Statistic NumStoreExtractExposed = {"codegenprepare" , "NumStoreExtractExposed", "Number of store(extractelement) exposed" , {0}, false}; | |||
125 | ||||
126 | STATISTIC(NumMemCmpCalls, "Number of memcmp calls")static llvm::Statistic NumMemCmpCalls = {"codegenprepare", "NumMemCmpCalls" , "Number of memcmp calls", {0}, false}; | |||
127 | STATISTIC(NumMemCmpNotConstant, "Number of memcmp calls without constant size")static llvm::Statistic NumMemCmpNotConstant = {"codegenprepare" , "NumMemCmpNotConstant", "Number of memcmp calls without constant size" , {0}, false}; | |||
128 | STATISTIC(NumMemCmpGreaterThanMax,static llvm::Statistic NumMemCmpGreaterThanMax = {"codegenprepare" , "NumMemCmpGreaterThanMax", "Number of memcmp calls with size greater than max size" , {0}, false} | |||
129 | "Number of memcmp calls with size greater than max size")static llvm::Statistic NumMemCmpGreaterThanMax = {"codegenprepare" , "NumMemCmpGreaterThanMax", "Number of memcmp calls with size greater than max size" , {0}, false}; | |||
130 | STATISTIC(NumMemCmpInlined, "Number of inlined memcmp calls")static llvm::Statistic NumMemCmpInlined = {"codegenprepare", "NumMemCmpInlined" , "Number of inlined memcmp calls", {0}, false}; | |||
131 | ||||
132 | static cl::opt<bool> DisableBranchOpts( | |||
133 | "disable-cgp-branch-opts", cl::Hidden, cl::init(false), | |||
134 | cl::desc("Disable branch optimizations in CodeGenPrepare")); | |||
135 | ||||
136 | static cl::opt<bool> | |||
137 | DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false), | |||
138 | cl::desc("Disable GC optimizations in CodeGenPrepare")); | |||
139 | ||||
140 | static cl::opt<bool> DisableSelectToBranch( | |||
141 | "disable-cgp-select2branch", cl::Hidden, cl::init(false), | |||
142 | cl::desc("Disable select to branch conversion.")); | |||
143 | ||||
144 | static cl::opt<bool> AddrSinkUsingGEPs( | |||
145 | "addr-sink-using-gep", cl::Hidden, cl::init(true), | |||
146 | cl::desc("Address sinking in CGP using GEPs.")); | |||
147 | ||||
148 | static cl::opt<bool> EnableAndCmpSinking( | |||
149 | "enable-andcmp-sinking", cl::Hidden, cl::init(true), | |||
150 | cl::desc("Enable sinkinig and/cmp into branches.")); | |||
151 | ||||
152 | static cl::opt<bool> DisableStoreExtract( | |||
153 | "disable-cgp-store-extract", cl::Hidden, cl::init(false), | |||
154 | cl::desc("Disable store(extract) optimizations in CodeGenPrepare")); | |||
155 | ||||
156 | static cl::opt<bool> StressStoreExtract( | |||
157 | "stress-cgp-store-extract", cl::Hidden, cl::init(false), | |||
158 | cl::desc("Stress test store(extract) optimizations in CodeGenPrepare")); | |||
159 | ||||
160 | static cl::opt<bool> DisableExtLdPromotion( | |||
161 | "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), | |||
162 | cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in " | |||
163 | "CodeGenPrepare")); | |||
164 | ||||
165 | static cl::opt<bool> StressExtLdPromotion( | |||
166 | "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false), | |||
167 | cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) " | |||
168 | "optimization in CodeGenPrepare")); | |||
169 | ||||
170 | static cl::opt<bool> DisablePreheaderProtect( | |||
171 | "disable-preheader-prot", cl::Hidden, cl::init(false), | |||
172 | cl::desc("Disable protection against removing loop preheaders")); | |||
173 | ||||
174 | static cl::opt<bool> ProfileGuidedSectionPrefix( | |||
175 | "profile-guided-section-prefix", cl::Hidden, cl::init(true), cl::ZeroOrMore, | |||
176 | cl::desc("Use profile info to add section prefix for hot/cold functions")); | |||
177 | ||||
178 | static cl::opt<unsigned> FreqRatioToSkipMerge( | |||
179 | "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2), | |||
180 | cl::desc("Skip merging empty blocks if (frequency of empty block) / " | |||
181 | "(frequency of destination block) is greater than this ratio")); | |||
182 | ||||
183 | static cl::opt<bool> ForceSplitStore( | |||
184 | "force-split-store", cl::Hidden, cl::init(false), | |||
185 | cl::desc("Force store splitting no matter what the target query says.")); | |||
186 | ||||
187 | static cl::opt<bool> | |||
188 | EnableTypePromotionMerge("cgp-type-promotion-merge", cl::Hidden, | |||
189 | cl::desc("Enable merging of redundant sexts when one is dominating" | |||
190 | " the other."), cl::init(true)); | |||
191 | ||||
192 | static cl::opt<unsigned> MemCmpNumLoadsPerBlock( | |||
193 | "memcmp-num-loads-per-block", cl::Hidden, cl::init(1), | |||
194 | cl::desc("The number of loads per basic block for inline expansion of " | |||
195 | "memcmp that is only being compared against zero.")); | |||
196 | ||||
197 | namespace { | |||
198 | ||||
199 | using SetOfInstrs = SmallPtrSet<Instruction *, 16>; | |||
200 | using TypeIsSExt = PointerIntPair<Type *, 1, bool>; | |||
201 | using InstrToOrigTy = DenseMap<Instruction *, TypeIsSExt>; | |||
202 | using SExts = SmallVector<Instruction *, 16>; | |||
203 | using ValueToSExts = DenseMap<Value *, SExts>; | |||
204 | ||||
205 | class TypePromotionTransaction; | |||
206 | ||||
207 | class CodeGenPrepare : public FunctionPass { | |||
208 | const TargetMachine *TM = nullptr; | |||
209 | const TargetSubtargetInfo *SubtargetInfo; | |||
210 | const TargetLowering *TLI = nullptr; | |||
211 | const TargetRegisterInfo *TRI; | |||
212 | const TargetTransformInfo *TTI = nullptr; | |||
213 | const TargetLibraryInfo *TLInfo; | |||
214 | const LoopInfo *LI; | |||
215 | std::unique_ptr<BlockFrequencyInfo> BFI; | |||
216 | std::unique_ptr<BranchProbabilityInfo> BPI; | |||
217 | ||||
218 | /// As we scan instructions optimizing them, this is the next instruction | |||
219 | /// to optimize. Transforms that can invalidate this should update it. | |||
220 | BasicBlock::iterator CurInstIterator; | |||
221 | ||||
222 | /// Keeps track of non-local addresses that have been sunk into a block. | |||
223 | /// This allows us to avoid inserting duplicate code for blocks with | |||
224 | /// multiple load/stores of the same address. | |||
225 | ValueMap<Value*, Value*> SunkAddrs; | |||
226 | ||||
227 | /// Keeps track of all instructions inserted for the current function. | |||
228 | SetOfInstrs InsertedInsts; | |||
229 | ||||
230 | /// Keeps track of the type of the related instruction before their | |||
231 | /// promotion for the current function. | |||
232 | InstrToOrigTy PromotedInsts; | |||
233 | ||||
234 | /// Keep track of instructions removed during promotion. | |||
235 | SetOfInstrs RemovedInsts; | |||
236 | ||||
237 | /// Keep track of sext chains based on their initial value. | |||
238 | DenseMap<Value *, Instruction *> SeenChainsForSExt; | |||
239 | ||||
240 | /// Keep track of SExt promoted. | |||
241 | ValueToSExts ValToSExtendedUses; | |||
242 | ||||
243 | /// True if CFG is modified in any way. | |||
244 | bool ModifiedDT; | |||
245 | ||||
246 | /// True if optimizing for size. | |||
247 | bool OptSize; | |||
248 | ||||
249 | /// DataLayout for the Function being processed. | |||
250 | const DataLayout *DL = nullptr; | |||
251 | ||||
252 | public: | |||
253 | static char ID; // Pass identification, replacement for typeid | |||
254 | ||||
255 | CodeGenPrepare() : FunctionPass(ID) { | |||
256 | initializeCodeGenPreparePass(*PassRegistry::getPassRegistry()); | |||
257 | } | |||
258 | ||||
259 | bool runOnFunction(Function &F) override; | |||
260 | ||||
261 | StringRef getPassName() const override { return "CodeGen Prepare"; } | |||
262 | ||||
263 | void getAnalysisUsage(AnalysisUsage &AU) const override { | |||
264 | // FIXME: When we can selectively preserve passes, preserve the domtree. | |||
265 | AU.addRequired<ProfileSummaryInfoWrapperPass>(); | |||
266 | AU.addRequired<TargetLibraryInfoWrapperPass>(); | |||
267 | AU.addRequired<TargetTransformInfoWrapperPass>(); | |||
268 | AU.addRequired<LoopInfoWrapperPass>(); | |||
269 | } | |||
270 | ||||
271 | private: | |||
272 | bool eliminateFallThrough(Function &F); | |||
273 | bool eliminateMostlyEmptyBlocks(Function &F); | |||
274 | BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB); | |||
275 | bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const; | |||
276 | void eliminateMostlyEmptyBlock(BasicBlock *BB); | |||
277 | bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB, | |||
278 | bool isPreheader); | |||
279 | bool optimizeBlock(BasicBlock &BB, bool &ModifiedDT); | |||
280 | bool optimizeInst(Instruction *I, bool &ModifiedDT); | |||
281 | bool optimizeMemoryInst(Instruction *I, Value *Addr, | |||
282 | Type *AccessTy, unsigned AS); | |||
283 | bool optimizeInlineAsmInst(CallInst *CS); | |||
284 | bool optimizeCallInst(CallInst *CI, bool &ModifiedDT); | |||
285 | bool optimizeExt(Instruction *&I); | |||
286 | bool optimizeExtUses(Instruction *I); | |||
287 | bool optimizeLoadExt(LoadInst *I); | |||
288 | bool optimizeSelectInst(SelectInst *SI); | |||
289 | bool optimizeShuffleVectorInst(ShuffleVectorInst *SI); | |||
290 | bool optimizeSwitchInst(SwitchInst *CI); | |||
291 | bool optimizeExtractElementInst(Instruction *Inst); | |||
292 | bool dupRetToEnableTailCallOpts(BasicBlock *BB); | |||
293 | bool placeDbgValues(Function &F); | |||
294 | bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts, | |||
295 | LoadInst *&LI, Instruction *&Inst, bool HasPromoted); | |||
296 | bool tryToPromoteExts(TypePromotionTransaction &TPT, | |||
297 | const SmallVectorImpl<Instruction *> &Exts, | |||
298 | SmallVectorImpl<Instruction *> &ProfitablyMovedExts, | |||
299 | unsigned CreatedInstsCost = 0); | |||
300 | bool mergeSExts(Function &F); | |||
301 | bool performAddressTypePromotion( | |||
302 | Instruction *&Inst, | |||
303 | bool AllowPromotionWithoutCommonHeader, | |||
304 | bool HasPromoted, TypePromotionTransaction &TPT, | |||
305 | SmallVectorImpl<Instruction *> &SpeculativelyMovedExts); | |||
306 | bool splitBranchCondition(Function &F); | |||
307 | bool simplifyOffsetableRelocate(Instruction &I); | |||
308 | bool splitIndirectCriticalEdges(Function &F); | |||
309 | }; | |||
310 | ||||
311 | } // end anonymous namespace | |||
312 | ||||
313 | char CodeGenPrepare::ID = 0; | |||
314 | ||||
315 | INITIALIZE_PASS_BEGIN(CodeGenPrepare, DEBUG_TYPE,static void *initializeCodeGenPreparePassOnce(PassRegistry & Registry) { | |||
316 | "Optimize for code generation", false, false)static void *initializeCodeGenPreparePassOnce(PassRegistry & Registry) { | |||
317 | INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)initializeProfileSummaryInfoWrapperPassPass(Registry); | |||
318 | INITIALIZE_PASS_END(CodeGenPrepare, DEBUG_TYPE,PassInfo *PI = new PassInfo( "Optimize for code generation", "codegenprepare" , &CodeGenPrepare::ID, PassInfo::NormalCtor_t(callDefaultCtor <CodeGenPrepare>), false, false); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeCodeGenPreparePassFlag ; void llvm::initializeCodeGenPreparePass(PassRegistry &Registry ) { llvm::call_once(InitializeCodeGenPreparePassFlag, initializeCodeGenPreparePassOnce , std::ref(Registry)); } | |||
319 | "Optimize for code generation", false, false)PassInfo *PI = new PassInfo( "Optimize for code generation", "codegenprepare" , &CodeGenPrepare::ID, PassInfo::NormalCtor_t(callDefaultCtor <CodeGenPrepare>), false, false); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeCodeGenPreparePassFlag ; void llvm::initializeCodeGenPreparePass(PassRegistry &Registry ) { llvm::call_once(InitializeCodeGenPreparePassFlag, initializeCodeGenPreparePassOnce , std::ref(Registry)); } | |||
320 | ||||
321 | FunctionPass *llvm::createCodeGenPreparePass() { return new CodeGenPrepare(); } | |||
322 | ||||
323 | bool CodeGenPrepare::runOnFunction(Function &F) { | |||
324 | if (skipFunction(F)) | |||
325 | return false; | |||
326 | ||||
327 | DL = &F.getParent()->getDataLayout(); | |||
328 | ||||
329 | bool EverMadeChange = false; | |||
330 | // Clear per function information. | |||
331 | InsertedInsts.clear(); | |||
332 | PromotedInsts.clear(); | |||
333 | BFI.reset(); | |||
334 | BPI.reset(); | |||
335 | ||||
336 | ModifiedDT = false; | |||
337 | if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) { | |||
338 | TM = &TPC->getTM<TargetMachine>(); | |||
339 | SubtargetInfo = TM->getSubtargetImpl(F); | |||
340 | TLI = SubtargetInfo->getTargetLowering(); | |||
341 | TRI = SubtargetInfo->getRegisterInfo(); | |||
342 | } | |||
343 | TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); | |||
344 | TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); | |||
345 | LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); | |||
346 | OptSize = F.optForSize(); | |||
347 | ||||
348 | if (ProfileGuidedSectionPrefix) { | |||
349 | ProfileSummaryInfo *PSI = | |||
350 | getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); | |||
351 | if (PSI->isFunctionHotInCallGraph(&F)) | |||
352 | F.setSectionPrefix(".hot"); | |||
353 | else if (PSI->isFunctionColdInCallGraph(&F)) | |||
354 | F.setSectionPrefix(".unlikely"); | |||
355 | } | |||
356 | ||||
357 | /// This optimization identifies DIV instructions that can be | |||
358 | /// profitably bypassed and carried out with a shorter, faster divide. | |||
359 | if (!OptSize && TLI && TLI->isSlowDivBypassed()) { | |||
360 | const DenseMap<unsigned int, unsigned int> &BypassWidths = | |||
361 | TLI->getBypassSlowDivWidths(); | |||
362 | BasicBlock* BB = &*F.begin(); | |||
363 | while (BB != nullptr) { | |||
364 | // bypassSlowDivision may create new BBs, but we don't want to reapply the | |||
365 | // optimization to those blocks. | |||
366 | BasicBlock* Next = BB->getNextNode(); | |||
367 | EverMadeChange |= bypassSlowDivision(BB, BypassWidths); | |||
368 | BB = Next; | |||
369 | } | |||
370 | } | |||
371 | ||||
372 | // Eliminate blocks that contain only PHI nodes and an | |||
373 | // unconditional branch. | |||
374 | EverMadeChange |= eliminateMostlyEmptyBlocks(F); | |||
375 | ||||
376 | // llvm.dbg.value is far away from the value then iSel may not be able | |||
377 | // handle it properly. iSel will drop llvm.dbg.value if it can not | |||
378 | // find a node corresponding to the value. | |||
379 | EverMadeChange |= placeDbgValues(F); | |||
380 | ||||
381 | if (!DisableBranchOpts) | |||
382 | EverMadeChange |= splitBranchCondition(F); | |||
383 | ||||
384 | // Split some critical edges where one of the sources is an indirect branch, | |||
385 | // to help generate sane code for PHIs involving such edges. | |||
386 | EverMadeChange |= splitIndirectCriticalEdges(F); | |||
387 | ||||
388 | bool MadeChange = true; | |||
389 | while (MadeChange) { | |||
390 | MadeChange = false; | |||
391 | SeenChainsForSExt.clear(); | |||
392 | ValToSExtendedUses.clear(); | |||
393 | RemovedInsts.clear(); | |||
394 | for (Function::iterator I = F.begin(); I != F.end(); ) { | |||
395 | BasicBlock *BB = &*I++; | |||
396 | bool ModifiedDTOnIteration = false; | |||
397 | MadeChange |= optimizeBlock(*BB, ModifiedDTOnIteration); | |||
398 | ||||
399 | // Restart BB iteration if the dominator tree of the Function was changed | |||
400 | if (ModifiedDTOnIteration) | |||
401 | break; | |||
402 | } | |||
403 | if (EnableTypePromotionMerge && !ValToSExtendedUses.empty()) | |||
404 | MadeChange |= mergeSExts(F); | |||
405 | ||||
406 | // Really free removed instructions during promotion. | |||
407 | for (Instruction *I : RemovedInsts) | |||
408 | I->deleteValue(); | |||
409 | ||||
410 | EverMadeChange |= MadeChange; | |||
411 | } | |||
412 | ||||
413 | SunkAddrs.clear(); | |||
414 | ||||
415 | if (!DisableBranchOpts) { | |||
416 | MadeChange = false; | |||
417 | SmallPtrSet<BasicBlock*, 8> WorkList; | |||
418 | for (BasicBlock &BB : F) { | |||
419 | SmallVector<BasicBlock *, 2> Successors(succ_begin(&BB), succ_end(&BB)); | |||
420 | MadeChange |= ConstantFoldTerminator(&BB, true); | |||
421 | if (!MadeChange) continue; | |||
422 | ||||
423 | for (SmallVectorImpl<BasicBlock*>::iterator | |||
424 | II = Successors.begin(), IE = Successors.end(); II != IE; ++II) | |||
425 | if (pred_begin(*II) == pred_end(*II)) | |||
426 | WorkList.insert(*II); | |||
427 | } | |||
428 | ||||
429 | // Delete the dead blocks and any of their dead successors. | |||
430 | MadeChange |= !WorkList.empty(); | |||
431 | while (!WorkList.empty()) { | |||
432 | BasicBlock *BB = *WorkList.begin(); | |||
433 | WorkList.erase(BB); | |||
434 | SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB)); | |||
435 | ||||
436 | DeleteDeadBlock(BB); | |||
437 | ||||
438 | for (SmallVectorImpl<BasicBlock*>::iterator | |||
439 | II = Successors.begin(), IE = Successors.end(); II != IE; ++II) | |||
440 | if (pred_begin(*II) == pred_end(*II)) | |||
441 | WorkList.insert(*II); | |||
442 | } | |||
443 | ||||
444 | // Merge pairs of basic blocks with unconditional branches, connected by | |||
445 | // a single edge. | |||
446 | if (EverMadeChange || MadeChange) | |||
447 | MadeChange |= eliminateFallThrough(F); | |||
448 | ||||
449 | EverMadeChange |= MadeChange; | |||
450 | } | |||
451 | ||||
452 | if (!DisableGCOpts) { | |||
453 | SmallVector<Instruction *, 2> Statepoints; | |||
454 | for (BasicBlock &BB : F) | |||
455 | for (Instruction &I : BB) | |||
456 | if (isStatepoint(I)) | |||
457 | Statepoints.push_back(&I); | |||
458 | for (auto &I : Statepoints) | |||
459 | EverMadeChange |= simplifyOffsetableRelocate(*I); | |||
460 | } | |||
461 | ||||
462 | return EverMadeChange; | |||
463 | } | |||
464 | ||||
465 | /// Merge basic blocks which are connected by a single edge, where one of the | |||
466 | /// basic blocks has a single successor pointing to the other basic block, | |||
467 | /// which has a single predecessor. | |||
468 | bool CodeGenPrepare::eliminateFallThrough(Function &F) { | |||
469 | bool Changed = false; | |||
470 | // Scan all of the blocks in the function, except for the entry block. | |||
471 | for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { | |||
472 | BasicBlock *BB = &*I++; | |||
473 | // If the destination block has a single pred, then this is a trivial | |||
474 | // edge, just collapse it. | |||
475 | BasicBlock *SinglePred = BB->getSinglePredecessor(); | |||
476 | ||||
477 | // Don't merge if BB's address is taken. | |||
478 | if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue; | |||
479 | ||||
480 | BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator()); | |||
481 | if (Term && !Term->isConditional()) { | |||
482 | Changed = true; | |||
483 | DEBUG(dbgs() << "To merge:\n"<< *SinglePred << "\n\n\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "To merge:\n"<< * SinglePred << "\n\n\n"; } } while (false); | |||
484 | // Remember if SinglePred was the entry block of the function. | |||
485 | // If so, we will need to move BB back to the entry position. | |||
486 | bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); | |||
487 | MergeBasicBlockIntoOnlyPred(BB, nullptr); | |||
488 | ||||
489 | if (isEntry && BB != &BB->getParent()->getEntryBlock()) | |||
490 | BB->moveBefore(&BB->getParent()->getEntryBlock()); | |||
491 | ||||
492 | // We have erased a block. Update the iterator. | |||
493 | I = BB->getIterator(); | |||
494 | } | |||
495 | } | |||
496 | return Changed; | |||
497 | } | |||
498 | ||||
499 | /// Find a destination block from BB if BB is mergeable empty block. | |||
500 | BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) { | |||
501 | // If this block doesn't end with an uncond branch, ignore it. | |||
502 | BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); | |||
503 | if (!BI || !BI->isUnconditional()) | |||
504 | return nullptr; | |||
505 | ||||
506 | // If the instruction before the branch (skipping debug info) isn't a phi | |||
507 | // node, then other stuff is happening here. | |||
508 | BasicBlock::iterator BBI = BI->getIterator(); | |||
509 | if (BBI != BB->begin()) { | |||
510 | --BBI; | |||
511 | while (isa<DbgInfoIntrinsic>(BBI)) { | |||
512 | if (BBI == BB->begin()) | |||
513 | break; | |||
514 | --BBI; | |||
515 | } | |||
516 | if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI)) | |||
517 | return nullptr; | |||
518 | } | |||
519 | ||||
520 | // Do not break infinite loops. | |||
521 | BasicBlock *DestBB = BI->getSuccessor(0); | |||
522 | if (DestBB == BB) | |||
523 | return nullptr; | |||
524 | ||||
525 | if (!canMergeBlocks(BB, DestBB)) | |||
526 | DestBB = nullptr; | |||
527 | ||||
528 | return DestBB; | |||
529 | } | |||
530 | ||||
531 | // Return the unique indirectbr predecessor of a block. This may return null | |||
532 | // even if such a predecessor exists, if it's not useful for splitting. | |||
533 | // If a predecessor is found, OtherPreds will contain all other (non-indirectbr) | |||
534 | // predecessors of BB. | |||
535 | static BasicBlock * | |||
536 | findIBRPredecessor(BasicBlock *BB, SmallVectorImpl<BasicBlock *> &OtherPreds) { | |||
537 | // If the block doesn't have any PHIs, we don't care about it, since there's | |||
538 | // no point in splitting it. | |||
539 | PHINode *PN = dyn_cast<PHINode>(BB->begin()); | |||
540 | if (!PN) | |||
541 | return nullptr; | |||
542 | ||||
543 | // Verify we have exactly one IBR predecessor. | |||
544 | // Conservatively bail out if one of the other predecessors is not a "regular" | |||
545 | // terminator (that is, not a switch or a br). | |||
546 | BasicBlock *IBB = nullptr; | |||
547 | for (unsigned Pred = 0, E = PN->getNumIncomingValues(); Pred != E; ++Pred) { | |||
548 | BasicBlock *PredBB = PN->getIncomingBlock(Pred); | |||
549 | TerminatorInst *PredTerm = PredBB->getTerminator(); | |||
550 | switch (PredTerm->getOpcode()) { | |||
551 | case Instruction::IndirectBr: | |||
552 | if (IBB) | |||
553 | return nullptr; | |||
554 | IBB = PredBB; | |||
555 | break; | |||
556 | case Instruction::Br: | |||
557 | case Instruction::Switch: | |||
558 | OtherPreds.push_back(PredBB); | |||
559 | continue; | |||
560 | default: | |||
561 | return nullptr; | |||
562 | } | |||
563 | } | |||
564 | ||||
565 | return IBB; | |||
566 | } | |||
567 | ||||
568 | // Split critical edges where the source of the edge is an indirectbr | |||
569 | // instruction. This isn't always possible, but we can handle some easy cases. | |||
570 | // This is useful because MI is unable to split such critical edges, | |||
571 | // which means it will not be able to sink instructions along those edges. | |||
572 | // This is especially painful for indirect branches with many successors, where | |||
573 | // we end up having to prepare all outgoing values in the origin block. | |||
574 | // | |||
575 | // Our normal algorithm for splitting critical edges requires us to update | |||
576 | // the outgoing edges of the edge origin block, but for an indirectbr this | |||
577 | // is hard, since it would require finding and updating the block addresses | |||
578 | // the indirect branch uses. But if a block only has a single indirectbr | |||
579 | // predecessor, with the others being regular branches, we can do it in a | |||
580 | // different way. | |||
581 | // Say we have A -> D, B -> D, I -> D where only I -> D is an indirectbr. | |||
582 | // We can split D into D0 and D1, where D0 contains only the PHIs from D, | |||
583 | // and D1 is the D block body. We can then duplicate D0 as D0A and D0B, and | |||
584 | // create the following structure: | |||
585 | // A -> D0A, B -> D0A, I -> D0B, D0A -> D1, D0B -> D1 | |||
586 | bool CodeGenPrepare::splitIndirectCriticalEdges(Function &F) { | |||
587 | // Check whether the function has any indirectbrs, and collect which blocks | |||
588 | // they may jump to. Since most functions don't have indirect branches, | |||
589 | // this lowers the common case's overhead to O(Blocks) instead of O(Edges). | |||
590 | SmallSetVector<BasicBlock *, 16> Targets; | |||
591 | for (auto &BB : F) { | |||
592 | auto *IBI = dyn_cast<IndirectBrInst>(BB.getTerminator()); | |||
593 | if (!IBI) | |||
594 | continue; | |||
595 | ||||
596 | for (unsigned Succ = 0, E = IBI->getNumSuccessors(); Succ != E; ++Succ) | |||
597 | Targets.insert(IBI->getSuccessor(Succ)); | |||
598 | } | |||
599 | ||||
600 | if (Targets.empty()) | |||
601 | return false; | |||
602 | ||||
603 | bool Changed = false; | |||
604 | for (BasicBlock *Target : Targets) { | |||
605 | SmallVector<BasicBlock *, 16> OtherPreds; | |||
606 | BasicBlock *IBRPred = findIBRPredecessor(Target, OtherPreds); | |||
607 | // If we did not found an indirectbr, or the indirectbr is the only | |||
608 | // incoming edge, this isn't the kind of edge we're looking for. | |||
609 | if (!IBRPred || OtherPreds.empty()) | |||
610 | continue; | |||
611 | ||||
612 | // Don't even think about ehpads/landingpads. | |||
613 | Instruction *FirstNonPHI = Target->getFirstNonPHI(); | |||
614 | if (FirstNonPHI->isEHPad() || Target->isLandingPad()) | |||
615 | continue; | |||
616 | ||||
617 | BasicBlock *BodyBlock = Target->splitBasicBlock(FirstNonPHI, ".split"); | |||
618 | // It's possible Target was its own successor through an indirectbr. | |||
619 | // In this case, the indirectbr now comes from BodyBlock. | |||
620 | if (IBRPred == Target) | |||
621 | IBRPred = BodyBlock; | |||
622 | ||||
623 | // At this point Target only has PHIs, and BodyBlock has the rest of the | |||
624 | // block's body. Create a copy of Target that will be used by the "direct" | |||
625 | // preds. | |||
626 | ValueToValueMapTy VMap; | |||
627 | BasicBlock *DirectSucc = CloneBasicBlock(Target, VMap, ".clone", &F); | |||
628 | ||||
629 | for (BasicBlock *Pred : OtherPreds) { | |||
630 | // If the target is a loop to itself, then the terminator of the split | |||
631 | // block needs to be updated. | |||
632 | if (Pred == Target) | |||
633 | BodyBlock->getTerminator()->replaceUsesOfWith(Target, DirectSucc); | |||
634 | else | |||
635 | Pred->getTerminator()->replaceUsesOfWith(Target, DirectSucc); | |||
636 | } | |||
637 | ||||
638 | // Ok, now fix up the PHIs. We know the two blocks only have PHIs, and that | |||
639 | // they are clones, so the number of PHIs are the same. | |||
640 | // (a) Remove the edge coming from IBRPred from the "Direct" PHI | |||
641 | // (b) Leave that as the only edge in the "Indirect" PHI. | |||
642 | // (c) Merge the two in the body block. | |||
643 | BasicBlock::iterator Indirect = Target->begin(), | |||
644 | End = Target->getFirstNonPHI()->getIterator(); | |||
645 | BasicBlock::iterator Direct = DirectSucc->begin(); | |||
646 | BasicBlock::iterator MergeInsert = BodyBlock->getFirstInsertionPt(); | |||
647 | ||||
648 | assert(&*End == Target->getTerminator() &&((&*End == Target->getTerminator() && "Block was expected to only contain PHIs" ) ? static_cast<void> (0) : __assert_fail ("&*End == Target->getTerminator() && \"Block was expected to only contain PHIs\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 649, __PRETTY_FUNCTION__)) | |||
649 | "Block was expected to only contain PHIs")((&*End == Target->getTerminator() && "Block was expected to only contain PHIs" ) ? static_cast<void> (0) : __assert_fail ("&*End == Target->getTerminator() && \"Block was expected to only contain PHIs\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 649, __PRETTY_FUNCTION__)); | |||
650 | ||||
651 | while (Indirect != End) { | |||
652 | PHINode *DirPHI = cast<PHINode>(Direct); | |||
653 | PHINode *IndPHI = cast<PHINode>(Indirect); | |||
654 | ||||
655 | // Now, clean up - the direct block shouldn't get the indirect value, | |||
656 | // and vice versa. | |||
657 | DirPHI->removeIncomingValue(IBRPred); | |||
658 | Direct++; | |||
659 | ||||
660 | // Advance the pointer here, to avoid invalidation issues when the old | |||
661 | // PHI is erased. | |||
662 | Indirect++; | |||
663 | ||||
664 | PHINode *NewIndPHI = PHINode::Create(IndPHI->getType(), 1, "ind", IndPHI); | |||
665 | NewIndPHI->addIncoming(IndPHI->getIncomingValueForBlock(IBRPred), | |||
666 | IBRPred); | |||
667 | ||||
668 | // Create a PHI in the body block, to merge the direct and indirect | |||
669 | // predecessors. | |||
670 | PHINode *MergePHI = | |||
671 | PHINode::Create(IndPHI->getType(), 2, "merge", &*MergeInsert); | |||
672 | MergePHI->addIncoming(NewIndPHI, Target); | |||
673 | MergePHI->addIncoming(DirPHI, DirectSucc); | |||
674 | ||||
675 | IndPHI->replaceAllUsesWith(MergePHI); | |||
676 | IndPHI->eraseFromParent(); | |||
677 | } | |||
678 | ||||
679 | Changed = true; | |||
680 | } | |||
681 | ||||
682 | return Changed; | |||
683 | } | |||
684 | ||||
685 | /// Eliminate blocks that contain only PHI nodes, debug info directives, and an | |||
686 | /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split | |||
687 | /// edges in ways that are non-optimal for isel. Start by eliminating these | |||
688 | /// blocks so we can split them the way we want them. | |||
689 | bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) { | |||
690 | SmallPtrSet<BasicBlock *, 16> Preheaders; | |||
691 | SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end()); | |||
692 | while (!LoopList.empty()) { | |||
693 | Loop *L = LoopList.pop_back_val(); | |||
694 | LoopList.insert(LoopList.end(), L->begin(), L->end()); | |||
695 | if (BasicBlock *Preheader = L->getLoopPreheader()) | |||
696 | Preheaders.insert(Preheader); | |||
697 | } | |||
698 | ||||
699 | bool MadeChange = false; | |||
700 | // Note that this intentionally skips the entry block. | |||
701 | for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) { | |||
702 | BasicBlock *BB = &*I++; | |||
703 | BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB); | |||
704 | if (!DestBB || | |||
705 | !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB))) | |||
706 | continue; | |||
707 | ||||
708 | eliminateMostlyEmptyBlock(BB); | |||
709 | MadeChange = true; | |||
710 | } | |||
711 | return MadeChange; | |||
712 | } | |||
713 | ||||
714 | bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB, | |||
715 | BasicBlock *DestBB, | |||
716 | bool isPreheader) { | |||
717 | // Do not delete loop preheaders if doing so would create a critical edge. | |||
718 | // Loop preheaders can be good locations to spill registers. If the | |||
719 | // preheader is deleted and we create a critical edge, registers may be | |||
720 | // spilled in the loop body instead. | |||
721 | if (!DisablePreheaderProtect && isPreheader && | |||
722 | !(BB->getSinglePredecessor() && | |||
723 | BB->getSinglePredecessor()->getSingleSuccessor())) | |||
724 | return false; | |||
725 | ||||
726 | // Try to skip merging if the unique predecessor of BB is terminated by a | |||
727 | // switch or indirect branch instruction, and BB is used as an incoming block | |||
728 | // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to | |||
729 | // add COPY instructions in the predecessor of BB instead of BB (if it is not | |||
730 | // merged). Note that the critical edge created by merging such blocks wont be | |||
731 | // split in MachineSink because the jump table is not analyzable. By keeping | |||
732 | // such empty block (BB), ISel will place COPY instructions in BB, not in the | |||
733 | // predecessor of BB. | |||
734 | BasicBlock *Pred = BB->getUniquePredecessor(); | |||
735 | if (!Pred || | |||
736 | !(isa<SwitchInst>(Pred->getTerminator()) || | |||
737 | isa<IndirectBrInst>(Pred->getTerminator()))) | |||
738 | return true; | |||
739 | ||||
740 | if (BB->getTerminator() != BB->getFirstNonPHI()) | |||
741 | return true; | |||
742 | ||||
743 | // We use a simple cost heuristic which determine skipping merging is | |||
744 | // profitable if the cost of skipping merging is less than the cost of | |||
745 | // merging : Cost(skipping merging) < Cost(merging BB), where the | |||
746 | // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and | |||
747 | // the Cost(merging BB) is Freq(Pred) * Cost(Copy). | |||
748 | // Assuming Cost(Copy) == Cost(Branch), we could simplify it to : | |||
749 | // Freq(Pred) / Freq(BB) > 2. | |||
750 | // Note that if there are multiple empty blocks sharing the same incoming | |||
751 | // value for the PHIs in the DestBB, we consider them together. In such | |||
752 | // case, Cost(merging BB) will be the sum of their frequencies. | |||
753 | ||||
754 | if (!isa<PHINode>(DestBB->begin())) | |||
755 | return true; | |||
756 | ||||
757 | SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs; | |||
758 | ||||
759 | // Find all other incoming blocks from which incoming values of all PHIs in | |||
760 | // DestBB are the same as the ones from BB. | |||
761 | for (pred_iterator PI = pred_begin(DestBB), E = pred_end(DestBB); PI != E; | |||
762 | ++PI) { | |||
763 | BasicBlock *DestBBPred = *PI; | |||
764 | if (DestBBPred == BB) | |||
765 | continue; | |||
766 | ||||
767 | bool HasAllSameValue = true; | |||
768 | BasicBlock::const_iterator DestBBI = DestBB->begin(); | |||
769 | while (const PHINode *DestPN = dyn_cast<PHINode>(DestBBI++)) { | |||
770 | if (DestPN->getIncomingValueForBlock(BB) != | |||
771 | DestPN->getIncomingValueForBlock(DestBBPred)) { | |||
772 | HasAllSameValue = false; | |||
773 | break; | |||
774 | } | |||
775 | } | |||
776 | if (HasAllSameValue) | |||
777 | SameIncomingValueBBs.insert(DestBBPred); | |||
778 | } | |||
779 | ||||
780 | // See if all BB's incoming values are same as the value from Pred. In this | |||
781 | // case, no reason to skip merging because COPYs are expected to be place in | |||
782 | // Pred already. | |||
783 | if (SameIncomingValueBBs.count(Pred)) | |||
784 | return true; | |||
785 | ||||
786 | if (!BFI) { | |||
787 | Function &F = *BB->getParent(); | |||
788 | LoopInfo LI{DominatorTree(F)}; | |||
789 | BPI.reset(new BranchProbabilityInfo(F, LI)); | |||
790 | BFI.reset(new BlockFrequencyInfo(F, *BPI, LI)); | |||
791 | } | |||
792 | ||||
793 | BlockFrequency PredFreq = BFI->getBlockFreq(Pred); | |||
794 | BlockFrequency BBFreq = BFI->getBlockFreq(BB); | |||
795 | ||||
796 | for (auto SameValueBB : SameIncomingValueBBs) | |||
797 | if (SameValueBB->getUniquePredecessor() == Pred && | |||
798 | DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB)) | |||
799 | BBFreq += BFI->getBlockFreq(SameValueBB); | |||
800 | ||||
801 | return PredFreq.getFrequency() <= | |||
802 | BBFreq.getFrequency() * FreqRatioToSkipMerge; | |||
803 | } | |||
804 | ||||
805 | /// Return true if we can merge BB into DestBB if there is a single | |||
806 | /// unconditional branch between them, and BB contains no other non-phi | |||
807 | /// instructions. | |||
808 | bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB, | |||
809 | const BasicBlock *DestBB) const { | |||
810 | // We only want to eliminate blocks whose phi nodes are used by phi nodes in | |||
811 | // the successor. If there are more complex condition (e.g. preheaders), | |||
812 | // don't mess around with them. | |||
813 | BasicBlock::const_iterator BBI = BB->begin(); | |||
814 | while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { | |||
815 | for (const User *U : PN->users()) { | |||
816 | const Instruction *UI = cast<Instruction>(U); | |||
817 | if (UI->getParent() != DestBB || !isa<PHINode>(UI)) | |||
818 | return false; | |||
819 | // If User is inside DestBB block and it is a PHINode then check | |||
820 | // incoming value. If incoming value is not from BB then this is | |||
821 | // a complex condition (e.g. preheaders) we want to avoid here. | |||
822 | if (UI->getParent() == DestBB) { | |||
823 | if (const PHINode *UPN = dyn_cast<PHINode>(UI)) | |||
824 | for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) { | |||
825 | Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I)); | |||
826 | if (Insn && Insn->getParent() == BB && | |||
827 | Insn->getParent() != UPN->getIncomingBlock(I)) | |||
828 | return false; | |||
829 | } | |||
830 | } | |||
831 | } | |||
832 | } | |||
833 | ||||
834 | // If BB and DestBB contain any common predecessors, then the phi nodes in BB | |||
835 | // and DestBB may have conflicting incoming values for the block. If so, we | |||
836 | // can't merge the block. | |||
837 | const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin()); | |||
838 | if (!DestBBPN) return true; // no conflict. | |||
839 | ||||
840 | // Collect the preds of BB. | |||
841 | SmallPtrSet<const BasicBlock*, 16> BBPreds; | |||
842 | if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { | |||
843 | // It is faster to get preds from a PHI than with pred_iterator. | |||
844 | for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) | |||
845 | BBPreds.insert(BBPN->getIncomingBlock(i)); | |||
846 | } else { | |||
847 | BBPreds.insert(pred_begin(BB), pred_end(BB)); | |||
848 | } | |||
849 | ||||
850 | // Walk the preds of DestBB. | |||
851 | for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) { | |||
852 | BasicBlock *Pred = DestBBPN->getIncomingBlock(i); | |||
853 | if (BBPreds.count(Pred)) { // Common predecessor? | |||
854 | BBI = DestBB->begin(); | |||
855 | while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) { | |||
856 | const Value *V1 = PN->getIncomingValueForBlock(Pred); | |||
857 | const Value *V2 = PN->getIncomingValueForBlock(BB); | |||
858 | ||||
859 | // If V2 is a phi node in BB, look up what the mapped value will be. | |||
860 | if (const PHINode *V2PN = dyn_cast<PHINode>(V2)) | |||
861 | if (V2PN->getParent() == BB) | |||
862 | V2 = V2PN->getIncomingValueForBlock(Pred); | |||
863 | ||||
864 | // If there is a conflict, bail out. | |||
865 | if (V1 != V2) return false; | |||
866 | } | |||
867 | } | |||
868 | } | |||
869 | ||||
870 | return true; | |||
871 | } | |||
872 | ||||
873 | /// Eliminate a basic block that has only phi's and an unconditional branch in | |||
874 | /// it. | |||
875 | void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) { | |||
876 | BranchInst *BI = cast<BranchInst>(BB->getTerminator()); | |||
877 | BasicBlock *DestBB = BI->getSuccessor(0); | |||
878 | ||||
879 | DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB; } } while (false); | |||
880 | ||||
881 | // If the destination block has a single pred, then this is a trivial edge, | |||
882 | // just collapse it. | |||
883 | if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) { | |||
884 | if (SinglePred != DestBB) { | |||
885 | // Remember if SinglePred was the entry block of the function. If so, we | |||
886 | // will need to move BB back to the entry position. | |||
887 | bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); | |||
888 | MergeBasicBlockIntoOnlyPred(DestBB, nullptr); | |||
889 | ||||
890 | if (isEntry && BB != &BB->getParent()->getEntryBlock()) | |||
891 | BB->moveBefore(&BB->getParent()->getEntryBlock()); | |||
892 | ||||
893 | DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"; } } while (false); | |||
894 | return; | |||
895 | } | |||
896 | } | |||
897 | ||||
898 | // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB | |||
899 | // to handle the new incoming edges it is about to have. | |||
900 | PHINode *PN; | |||
901 | for (BasicBlock::iterator BBI = DestBB->begin(); | |||
902 | (PN = dyn_cast<PHINode>(BBI)); ++BBI) { | |||
903 | // Remove the incoming value for BB, and remember it. | |||
904 | Value *InVal = PN->removeIncomingValue(BB, false); | |||
905 | ||||
906 | // Two options: either the InVal is a phi node defined in BB or it is some | |||
907 | // value that dominates BB. | |||
908 | PHINode *InValPhi = dyn_cast<PHINode>(InVal); | |||
909 | if (InValPhi && InValPhi->getParent() == BB) { | |||
910 | // Add all of the input values of the input PHI as inputs of this phi. | |||
911 | for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i) | |||
912 | PN->addIncoming(InValPhi->getIncomingValue(i), | |||
913 | InValPhi->getIncomingBlock(i)); | |||
914 | } else { | |||
915 | // Otherwise, add one instance of the dominating value for each edge that | |||
916 | // we will be adding. | |||
917 | if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) { | |||
918 | for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i) | |||
919 | PN->addIncoming(InVal, BBPN->getIncomingBlock(i)); | |||
920 | } else { | |||
921 | for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) | |||
922 | PN->addIncoming(InVal, *PI); | |||
923 | } | |||
924 | } | |||
925 | } | |||
926 | ||||
927 | // The PHIs are now updated, change everything that refers to BB to use | |||
928 | // DestBB and remove BB. | |||
929 | BB->replaceAllUsesWith(DestBB); | |||
930 | BB->eraseFromParent(); | |||
931 | ++NumBlocksElim; | |||
932 | ||||
933 | DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "AFTER:\n" << *DestBB << "\n\n\n"; } } while (false); | |||
934 | } | |||
935 | ||||
936 | // Computes a map of base pointer relocation instructions to corresponding | |||
937 | // derived pointer relocation instructions given a vector of all relocate calls | |||
938 | static void computeBaseDerivedRelocateMap( | |||
939 | const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls, | |||
940 | DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> | |||
941 | &RelocateInstMap) { | |||
942 | // Collect information in two maps: one primarily for locating the base object | |||
943 | // while filling the second map; the second map is the final structure holding | |||
944 | // a mapping between Base and corresponding Derived relocate calls | |||
945 | DenseMap<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap; | |||
946 | for (auto *ThisRelocate : AllRelocateCalls) { | |||
947 | auto K = std::make_pair(ThisRelocate->getBasePtrIndex(), | |||
948 | ThisRelocate->getDerivedPtrIndex()); | |||
949 | RelocateIdxMap.insert(std::make_pair(K, ThisRelocate)); | |||
950 | } | |||
951 | for (auto &Item : RelocateIdxMap) { | |||
952 | std::pair<unsigned, unsigned> Key = Item.first; | |||
953 | if (Key.first == Key.second) | |||
954 | // Base relocation: nothing to insert | |||
955 | continue; | |||
956 | ||||
957 | GCRelocateInst *I = Item.second; | |||
958 | auto BaseKey = std::make_pair(Key.first, Key.first); | |||
959 | ||||
960 | // We're iterating over RelocateIdxMap so we cannot modify it. | |||
961 | auto MaybeBase = RelocateIdxMap.find(BaseKey); | |||
962 | if (MaybeBase == RelocateIdxMap.end()) | |||
963 | // TODO: We might want to insert a new base object relocate and gep off | |||
964 | // that, if there are enough derived object relocates. | |||
965 | continue; | |||
966 | ||||
967 | RelocateInstMap[MaybeBase->second].push_back(I); | |||
968 | } | |||
969 | } | |||
970 | ||||
971 | // Accepts a GEP and extracts the operands into a vector provided they're all | |||
972 | // small integer constants | |||
973 | static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP, | |||
974 | SmallVectorImpl<Value *> &OffsetV) { | |||
975 | for (unsigned i = 1; i < GEP->getNumOperands(); i++) { | |||
976 | // Only accept small constant integer operands | |||
977 | auto Op = dyn_cast<ConstantInt>(GEP->getOperand(i)); | |||
978 | if (!Op || Op->getZExtValue() > 20) | |||
979 | return false; | |||
980 | } | |||
981 | ||||
982 | for (unsigned i = 1; i < GEP->getNumOperands(); i++) | |||
983 | OffsetV.push_back(GEP->getOperand(i)); | |||
984 | return true; | |||
985 | } | |||
986 | ||||
987 | // Takes a RelocatedBase (base pointer relocation instruction) and Targets to | |||
988 | // replace, computes a replacement, and affects it. | |||
989 | static bool | |||
990 | simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase, | |||
991 | const SmallVectorImpl<GCRelocateInst *> &Targets) { | |||
992 | bool MadeChange = false; | |||
993 | // We must ensure the relocation of derived pointer is defined after | |||
994 | // relocation of base pointer. If we find a relocation corresponding to base | |||
995 | // defined earlier than relocation of base then we move relocation of base | |||
996 | // right before found relocation. We consider only relocation in the same | |||
997 | // basic block as relocation of base. Relocations from other basic block will | |||
998 | // be skipped by optimization and we do not care about them. | |||
999 | for (auto R = RelocatedBase->getParent()->getFirstInsertionPt(); | |||
1000 | &*R != RelocatedBase; ++R) | |||
1001 | if (auto RI = dyn_cast<GCRelocateInst>(R)) | |||
1002 | if (RI->getStatepoint() == RelocatedBase->getStatepoint()) | |||
1003 | if (RI->getBasePtrIndex() == RelocatedBase->getBasePtrIndex()) { | |||
1004 | RelocatedBase->moveBefore(RI); | |||
1005 | break; | |||
1006 | } | |||
1007 | ||||
1008 | for (GCRelocateInst *ToReplace : Targets) { | |||
1009 | assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() &&((ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex () && "Not relocating a derived object of the original base object" ) ? static_cast<void> (0) : __assert_fail ("ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() && \"Not relocating a derived object of the original base object\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 1010, __PRETTY_FUNCTION__)) | |||
1010 | "Not relocating a derived object of the original base object")((ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex () && "Not relocating a derived object of the original base object" ) ? static_cast<void> (0) : __assert_fail ("ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() && \"Not relocating a derived object of the original base object\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 1010, __PRETTY_FUNCTION__)); | |||
1011 | if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) { | |||
1012 | // A duplicate relocate call. TODO: coalesce duplicates. | |||
1013 | continue; | |||
1014 | } | |||
1015 | ||||
1016 | if (RelocatedBase->getParent() != ToReplace->getParent()) { | |||
1017 | // Base and derived relocates are in different basic blocks. | |||
1018 | // In this case transform is only valid when base dominates derived | |||
1019 | // relocate. However it would be too expensive to check dominance | |||
1020 | // for each such relocate, so we skip the whole transformation. | |||
1021 | continue; | |||
1022 | } | |||
1023 | ||||
1024 | Value *Base = ToReplace->getBasePtr(); | |||
1025 | auto Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr()); | |||
1026 | if (!Derived || Derived->getPointerOperand() != Base) | |||
1027 | continue; | |||
1028 | ||||
1029 | SmallVector<Value *, 2> OffsetV; | |||
1030 | if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV)) | |||
1031 | continue; | |||
1032 | ||||
1033 | // Create a Builder and replace the target callsite with a gep | |||
1034 | assert(RelocatedBase->getNextNode() &&((RelocatedBase->getNextNode() && "Should always have one since it's not a terminator" ) ? static_cast<void> (0) : __assert_fail ("RelocatedBase->getNextNode() && \"Should always have one since it's not a terminator\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 1035, __PRETTY_FUNCTION__)) | |||
1035 | "Should always have one since it's not a terminator")((RelocatedBase->getNextNode() && "Should always have one since it's not a terminator" ) ? static_cast<void> (0) : __assert_fail ("RelocatedBase->getNextNode() && \"Should always have one since it's not a terminator\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 1035, __PRETTY_FUNCTION__)); | |||
1036 | ||||
1037 | // Insert after RelocatedBase | |||
1038 | IRBuilder<> Builder(RelocatedBase->getNextNode()); | |||
1039 | Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc()); | |||
1040 | ||||
1041 | // If gc_relocate does not match the actual type, cast it to the right type. | |||
1042 | // In theory, there must be a bitcast after gc_relocate if the type does not | |||
1043 | // match, and we should reuse it to get the derived pointer. But it could be | |||
1044 | // cases like this: | |||
1045 | // bb1: | |||
1046 | // ... | |||
1047 | // %g1 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) | |||
1048 | // br label %merge | |||
1049 | // | |||
1050 | // bb2: | |||
1051 | // ... | |||
1052 | // %g2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...) | |||
1053 | // br label %merge | |||
1054 | // | |||
1055 | // merge: | |||
1056 | // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ] | |||
1057 | // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)* | |||
1058 | // | |||
1059 | // In this case, we can not find the bitcast any more. So we insert a new bitcast | |||
1060 | // no matter there is already one or not. In this way, we can handle all cases, and | |||
1061 | // the extra bitcast should be optimized away in later passes. | |||
1062 | Value *ActualRelocatedBase = RelocatedBase; | |||
1063 | if (RelocatedBase->getType() != Base->getType()) { | |||
1064 | ActualRelocatedBase = | |||
1065 | Builder.CreateBitCast(RelocatedBase, Base->getType()); | |||
1066 | } | |||
1067 | Value *Replacement = Builder.CreateGEP( | |||
1068 | Derived->getSourceElementType(), ActualRelocatedBase, makeArrayRef(OffsetV)); | |||
1069 | Replacement->takeName(ToReplace); | |||
1070 | // If the newly generated derived pointer's type does not match the original derived | |||
1071 | // pointer's type, cast the new derived pointer to match it. Same reasoning as above. | |||
1072 | Value *ActualReplacement = Replacement; | |||
1073 | if (Replacement->getType() != ToReplace->getType()) { | |||
1074 | ActualReplacement = | |||
1075 | Builder.CreateBitCast(Replacement, ToReplace->getType()); | |||
1076 | } | |||
1077 | ToReplace->replaceAllUsesWith(ActualReplacement); | |||
1078 | ToReplace->eraseFromParent(); | |||
1079 | ||||
1080 | MadeChange = true; | |||
1081 | } | |||
1082 | return MadeChange; | |||
1083 | } | |||
1084 | ||||
1085 | // Turns this: | |||
1086 | // | |||
1087 | // %base = ... | |||
1088 | // %ptr = gep %base + 15 | |||
1089 | // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) | |||
1090 | // %base' = relocate(%tok, i32 4, i32 4) | |||
1091 | // %ptr' = relocate(%tok, i32 4, i32 5) | |||
1092 | // %val = load %ptr' | |||
1093 | // | |||
1094 | // into this: | |||
1095 | // | |||
1096 | // %base = ... | |||
1097 | // %ptr = gep %base + 15 | |||
1098 | // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr) | |||
1099 | // %base' = gc.relocate(%tok, i32 4, i32 4) | |||
1100 | // %ptr' = gep %base' + 15 | |||
1101 | // %val = load %ptr' | |||
1102 | bool CodeGenPrepare::simplifyOffsetableRelocate(Instruction &I) { | |||
1103 | bool MadeChange = false; | |||
1104 | SmallVector<GCRelocateInst *, 2> AllRelocateCalls; | |||
1105 | ||||
1106 | for (auto *U : I.users()) | |||
1107 | if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U)) | |||
1108 | // Collect all the relocate calls associated with a statepoint | |||
1109 | AllRelocateCalls.push_back(Relocate); | |||
1110 | ||||
1111 | // We need atleast one base pointer relocation + one derived pointer | |||
1112 | // relocation to mangle | |||
1113 | if (AllRelocateCalls.size() < 2) | |||
1114 | return false; | |||
1115 | ||||
1116 | // RelocateInstMap is a mapping from the base relocate instruction to the | |||
1117 | // corresponding derived relocate instructions | |||
1118 | DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> RelocateInstMap; | |||
1119 | computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap); | |||
1120 | if (RelocateInstMap.empty()) | |||
1121 | return false; | |||
1122 | ||||
1123 | for (auto &Item : RelocateInstMap) | |||
1124 | // Item.first is the RelocatedBase to offset against | |||
1125 | // Item.second is the vector of Targets to replace | |||
1126 | MadeChange = simplifyRelocatesOffABase(Item.first, Item.second); | |||
1127 | return MadeChange; | |||
1128 | } | |||
1129 | ||||
1130 | /// SinkCast - Sink the specified cast instruction into its user blocks | |||
1131 | static bool SinkCast(CastInst *CI) { | |||
1132 | BasicBlock *DefBB = CI->getParent(); | |||
1133 | ||||
1134 | /// InsertedCasts - Only insert a cast in each block once. | |||
1135 | DenseMap<BasicBlock*, CastInst*> InsertedCasts; | |||
1136 | ||||
1137 | bool MadeChange = false; | |||
1138 | for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); | |||
1139 | UI != E; ) { | |||
1140 | Use &TheUse = UI.getUse(); | |||
1141 | Instruction *User = cast<Instruction>(*UI); | |||
1142 | ||||
1143 | // Figure out which BB this cast is used in. For PHI's this is the | |||
1144 | // appropriate predecessor block. | |||
1145 | BasicBlock *UserBB = User->getParent(); | |||
1146 | if (PHINode *PN = dyn_cast<PHINode>(User)) { | |||
1147 | UserBB = PN->getIncomingBlock(TheUse); | |||
1148 | } | |||
1149 | ||||
1150 | // Preincrement use iterator so we don't invalidate it. | |||
1151 | ++UI; | |||
1152 | ||||
1153 | // The first insertion point of a block containing an EH pad is after the | |||
1154 | // pad. If the pad is the user, we cannot sink the cast past the pad. | |||
1155 | if (User->isEHPad()) | |||
1156 | continue; | |||
1157 | ||||
1158 | // If the block selected to receive the cast is an EH pad that does not | |||
1159 | // allow non-PHI instructions before the terminator, we can't sink the | |||
1160 | // cast. | |||
1161 | if (UserBB->getTerminator()->isEHPad()) | |||
1162 | continue; | |||
1163 | ||||
1164 | // If this user is in the same block as the cast, don't change the cast. | |||
1165 | if (UserBB == DefBB) continue; | |||
1166 | ||||
1167 | // If we have already inserted a cast into this block, use it. | |||
1168 | CastInst *&InsertedCast = InsertedCasts[UserBB]; | |||
1169 | ||||
1170 | if (!InsertedCast) { | |||
1171 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); | |||
1172 | assert(InsertPt != UserBB->end())((InsertPt != UserBB->end()) ? static_cast<void> (0) : __assert_fail ("InsertPt != UserBB->end()", "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 1172, __PRETTY_FUNCTION__)); | |||
1173 | InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0), | |||
1174 | CI->getType(), "", &*InsertPt); | |||
1175 | } | |||
1176 | ||||
1177 | // Replace a use of the cast with a use of the new cast. | |||
1178 | TheUse = InsertedCast; | |||
1179 | MadeChange = true; | |||
1180 | ++NumCastUses; | |||
1181 | } | |||
1182 | ||||
1183 | // If we removed all uses, nuke the cast. | |||
1184 | if (CI->use_empty()) { | |||
1185 | CI->eraseFromParent(); | |||
1186 | MadeChange = true; | |||
1187 | } | |||
1188 | ||||
1189 | return MadeChange; | |||
1190 | } | |||
1191 | ||||
1192 | /// If the specified cast instruction is a noop copy (e.g. it's casting from | |||
1193 | /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to | |||
1194 | /// reduce the number of virtual registers that must be created and coalesced. | |||
1195 | /// | |||
1196 | /// Return true if any changes are made. | |||
1197 | static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI, | |||
1198 | const DataLayout &DL) { | |||
1199 | // Sink only "cheap" (or nop) address-space casts. This is a weaker condition | |||
1200 | // than sinking only nop casts, but is helpful on some platforms. | |||
1201 | if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) { | |||
1202 | if (!TLI.isCheapAddrSpaceCast(ASC->getSrcAddressSpace(), | |||
1203 | ASC->getDestAddressSpace())) | |||
1204 | return false; | |||
1205 | } | |||
1206 | ||||
1207 | // If this is a noop copy, | |||
1208 | EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType()); | |||
1209 | EVT DstVT = TLI.getValueType(DL, CI->getType()); | |||
1210 | ||||
1211 | // This is an fp<->int conversion? | |||
1212 | if (SrcVT.isInteger() != DstVT.isInteger()) | |||
1213 | return false; | |||
1214 | ||||
1215 | // If this is an extension, it will be a zero or sign extension, which | |||
1216 | // isn't a noop. | |||
1217 | if (SrcVT.bitsLT(DstVT)) return false; | |||
1218 | ||||
1219 | // If these values will be promoted, find out what they will be promoted | |||
1220 | // to. This helps us consider truncates on PPC as noop copies when they | |||
1221 | // are. | |||
1222 | if (TLI.getTypeAction(CI->getContext(), SrcVT) == | |||
1223 | TargetLowering::TypePromoteInteger) | |||
1224 | SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT); | |||
1225 | if (TLI.getTypeAction(CI->getContext(), DstVT) == | |||
1226 | TargetLowering::TypePromoteInteger) | |||
1227 | DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT); | |||
1228 | ||||
1229 | // If, after promotion, these are the same types, this is a noop copy. | |||
1230 | if (SrcVT != DstVT) | |||
1231 | return false; | |||
1232 | ||||
1233 | return SinkCast(CI); | |||
1234 | } | |||
1235 | ||||
1236 | /// Try to combine CI into a call to the llvm.uadd.with.overflow intrinsic if | |||
1237 | /// possible. | |||
1238 | /// | |||
1239 | /// Return true if any changes were made. | |||
1240 | static bool CombineUAddWithOverflow(CmpInst *CI) { | |||
1241 | Value *A, *B; | |||
1242 | Instruction *AddI; | |||
1243 | if (!match(CI, | |||
1244 | m_UAddWithOverflow(m_Value(A), m_Value(B), m_Instruction(AddI)))) | |||
1245 | return false; | |||
1246 | ||||
1247 | Type *Ty = AddI->getType(); | |||
1248 | if (!isa<IntegerType>(Ty)) | |||
1249 | return false; | |||
1250 | ||||
1251 | // We don't want to move around uses of condition values this late, so we we | |||
1252 | // check if it is legal to create the call to the intrinsic in the basic | |||
1253 | // block containing the icmp: | |||
1254 | ||||
1255 | if (AddI->getParent() != CI->getParent() && !AddI->hasOneUse()) | |||
1256 | return false; | |||
1257 | ||||
1258 | #ifndef NDEBUG | |||
1259 | // Someday m_UAddWithOverflow may get smarter, but this is a safe assumption | |||
1260 | // for now: | |||
1261 | if (AddI->hasOneUse()) | |||
1262 | assert(*AddI->user_begin() == CI && "expected!")((*AddI->user_begin() == CI && "expected!") ? static_cast <void> (0) : __assert_fail ("*AddI->user_begin() == CI && \"expected!\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 1262, __PRETTY_FUNCTION__)); | |||
1263 | #endif | |||
1264 | ||||
1265 | Module *M = CI->getModule(); | |||
1266 | Value *F = Intrinsic::getDeclaration(M, Intrinsic::uadd_with_overflow, Ty); | |||
1267 | ||||
1268 | auto *InsertPt = AddI->hasOneUse() ? CI : AddI; | |||
1269 | ||||
1270 | auto *UAddWithOverflow = | |||
1271 | CallInst::Create(F, {A, B}, "uadd.overflow", InsertPt); | |||
1272 | auto *UAdd = ExtractValueInst::Create(UAddWithOverflow, 0, "uadd", InsertPt); | |||
1273 | auto *Overflow = | |||
1274 | ExtractValueInst::Create(UAddWithOverflow, 1, "overflow", InsertPt); | |||
1275 | ||||
1276 | CI->replaceAllUsesWith(Overflow); | |||
1277 | AddI->replaceAllUsesWith(UAdd); | |||
1278 | CI->eraseFromParent(); | |||
1279 | AddI->eraseFromParent(); | |||
1280 | return true; | |||
1281 | } | |||
1282 | ||||
1283 | /// Sink the given CmpInst into user blocks to reduce the number of virtual | |||
1284 | /// registers that must be created and coalesced. This is a clear win except on | |||
1285 | /// targets with multiple condition code registers (PowerPC), where it might | |||
1286 | /// lose; some adjustment may be wanted there. | |||
1287 | /// | |||
1288 | /// Return true if any changes are made. | |||
1289 | static bool SinkCmpExpression(CmpInst *CI, const TargetLowering *TLI) { | |||
1290 | BasicBlock *DefBB = CI->getParent(); | |||
1291 | ||||
1292 | // Avoid sinking soft-FP comparisons, since this can move them into a loop. | |||
1293 | if (TLI && TLI->useSoftFloat() && isa<FCmpInst>(CI)) | |||
1294 | return false; | |||
1295 | ||||
1296 | // Only insert a cmp in each block once. | |||
1297 | DenseMap<BasicBlock*, CmpInst*> InsertedCmps; | |||
1298 | ||||
1299 | bool MadeChange = false; | |||
1300 | for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end(); | |||
1301 | UI != E; ) { | |||
1302 | Use &TheUse = UI.getUse(); | |||
1303 | Instruction *User = cast<Instruction>(*UI); | |||
1304 | ||||
1305 | // Preincrement use iterator so we don't invalidate it. | |||
1306 | ++UI; | |||
1307 | ||||
1308 | // Don't bother for PHI nodes. | |||
1309 | if (isa<PHINode>(User)) | |||
1310 | continue; | |||
1311 | ||||
1312 | // Figure out which BB this cmp is used in. | |||
1313 | BasicBlock *UserBB = User->getParent(); | |||
1314 | ||||
1315 | // If this user is in the same block as the cmp, don't change the cmp. | |||
1316 | if (UserBB == DefBB) continue; | |||
1317 | ||||
1318 | // If we have already inserted a cmp into this block, use it. | |||
1319 | CmpInst *&InsertedCmp = InsertedCmps[UserBB]; | |||
1320 | ||||
1321 | if (!InsertedCmp) { | |||
1322 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); | |||
1323 | assert(InsertPt != UserBB->end())((InsertPt != UserBB->end()) ? static_cast<void> (0) : __assert_fail ("InsertPt != UserBB->end()", "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 1323, __PRETTY_FUNCTION__)); | |||
1324 | InsertedCmp = | |||
1325 | CmpInst::Create(CI->getOpcode(), CI->getPredicate(), | |||
1326 | CI->getOperand(0), CI->getOperand(1), "", &*InsertPt); | |||
1327 | // Propagate the debug info. | |||
1328 | InsertedCmp->setDebugLoc(CI->getDebugLoc()); | |||
1329 | } | |||
1330 | ||||
1331 | // Replace a use of the cmp with a use of the new cmp. | |||
1332 | TheUse = InsertedCmp; | |||
1333 | MadeChange = true; | |||
1334 | ++NumCmpUses; | |||
1335 | } | |||
1336 | ||||
1337 | // If we removed all uses, nuke the cmp. | |||
1338 | if (CI->use_empty()) { | |||
1339 | CI->eraseFromParent(); | |||
1340 | MadeChange = true; | |||
1341 | } | |||
1342 | ||||
1343 | return MadeChange; | |||
1344 | } | |||
1345 | ||||
1346 | static bool OptimizeCmpExpression(CmpInst *CI, const TargetLowering *TLI) { | |||
1347 | if (SinkCmpExpression(CI, TLI)) | |||
1348 | return true; | |||
1349 | ||||
1350 | if (CombineUAddWithOverflow(CI)) | |||
1351 | return true; | |||
1352 | ||||
1353 | return false; | |||
1354 | } | |||
1355 | ||||
1356 | /// Duplicate and sink the given 'and' instruction into user blocks where it is | |||
1357 | /// used in a compare to allow isel to generate better code for targets where | |||
1358 | /// this operation can be combined. | |||
1359 | /// | |||
1360 | /// Return true if any changes are made. | |||
1361 | static bool sinkAndCmp0Expression(Instruction *AndI, | |||
1362 | const TargetLowering &TLI, | |||
1363 | SetOfInstrs &InsertedInsts) { | |||
1364 | // Double-check that we're not trying to optimize an instruction that was | |||
1365 | // already optimized by some other part of this pass. | |||
1366 | assert(!InsertedInsts.count(AndI) &&((!InsertedInsts.count(AndI) && "Attempting to optimize already optimized and instruction" ) ? static_cast<void> (0) : __assert_fail ("!InsertedInsts.count(AndI) && \"Attempting to optimize already optimized and instruction\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 1367, __PRETTY_FUNCTION__)) | |||
1367 | "Attempting to optimize already optimized and instruction")((!InsertedInsts.count(AndI) && "Attempting to optimize already optimized and instruction" ) ? static_cast<void> (0) : __assert_fail ("!InsertedInsts.count(AndI) && \"Attempting to optimize already optimized and instruction\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 1367, __PRETTY_FUNCTION__)); | |||
1368 | (void) InsertedInsts; | |||
1369 | ||||
1370 | // Nothing to do for single use in same basic block. | |||
1371 | if (AndI->hasOneUse() && | |||
1372 | AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent()) | |||
1373 | return false; | |||
1374 | ||||
1375 | // Try to avoid cases where sinking/duplicating is likely to increase register | |||
1376 | // pressure. | |||
1377 | if (!isa<ConstantInt>(AndI->getOperand(0)) && | |||
1378 | !isa<ConstantInt>(AndI->getOperand(1)) && | |||
1379 | AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse()) | |||
1380 | return false; | |||
1381 | ||||
1382 | for (auto *U : AndI->users()) { | |||
1383 | Instruction *User = cast<Instruction>(U); | |||
1384 | ||||
1385 | // Only sink for and mask feeding icmp with 0. | |||
1386 | if (!isa<ICmpInst>(User)) | |||
1387 | return false; | |||
1388 | ||||
1389 | auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1)); | |||
1390 | if (!CmpC || !CmpC->isZero()) | |||
1391 | return false; | |||
1392 | } | |||
1393 | ||||
1394 | if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI)) | |||
1395 | return false; | |||
1396 | ||||
1397 | DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "found 'and' feeding only icmp 0;\n" ; } } while (false); | |||
1398 | DEBUG(AndI->getParent()->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { AndI->getParent()->dump(); } } while (false); | |||
1399 | ||||
1400 | // Push the 'and' into the same block as the icmp 0. There should only be | |||
1401 | // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any | |||
1402 | // others, so we don't need to keep track of which BBs we insert into. | |||
1403 | for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end(); | |||
1404 | UI != E; ) { | |||
1405 | Use &TheUse = UI.getUse(); | |||
1406 | Instruction *User = cast<Instruction>(*UI); | |||
1407 | ||||
1408 | // Preincrement use iterator so we don't invalidate it. | |||
1409 | ++UI; | |||
1410 | ||||
1411 | DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "sinking 'and' use: " << *User << "\n"; } } while (false); | |||
1412 | ||||
1413 | // Keep the 'and' in the same place if the use is already in the same block. | |||
1414 | Instruction *InsertPt = | |||
1415 | User->getParent() == AndI->getParent() ? AndI : User; | |||
1416 | Instruction *InsertedAnd = | |||
1417 | BinaryOperator::Create(Instruction::And, AndI->getOperand(0), | |||
1418 | AndI->getOperand(1), "", InsertPt); | |||
1419 | // Propagate the debug info. | |||
1420 | InsertedAnd->setDebugLoc(AndI->getDebugLoc()); | |||
1421 | ||||
1422 | // Replace a use of the 'and' with a use of the new 'and'. | |||
1423 | TheUse = InsertedAnd; | |||
1424 | ++NumAndUses; | |||
1425 | DEBUG(User->getParent()->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { User->getParent()->dump(); } } while (false); | |||
1426 | } | |||
1427 | ||||
1428 | // We removed all uses, nuke the and. | |||
1429 | AndI->eraseFromParent(); | |||
1430 | return true; | |||
1431 | } | |||
1432 | ||||
1433 | /// Check if the candidates could be combined with a shift instruction, which | |||
1434 | /// includes: | |||
1435 | /// 1. Truncate instruction | |||
1436 | /// 2. And instruction and the imm is a mask of the low bits: | |||
1437 | /// imm & (imm+1) == 0 | |||
1438 | static bool isExtractBitsCandidateUse(Instruction *User) { | |||
1439 | if (!isa<TruncInst>(User)) { | |||
1440 | if (User->getOpcode() != Instruction::And || | |||
1441 | !isa<ConstantInt>(User->getOperand(1))) | |||
1442 | return false; | |||
1443 | ||||
1444 | const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue(); | |||
1445 | ||||
1446 | if ((Cimm & (Cimm + 1)).getBoolValue()) | |||
1447 | return false; | |||
1448 | } | |||
1449 | return true; | |||
1450 | } | |||
1451 | ||||
1452 | /// Sink both shift and truncate instruction to the use of truncate's BB. | |||
1453 | static bool | |||
1454 | SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI, | |||
1455 | DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts, | |||
1456 | const TargetLowering &TLI, const DataLayout &DL) { | |||
1457 | BasicBlock *UserBB = User->getParent(); | |||
1458 | DenseMap<BasicBlock *, CastInst *> InsertedTruncs; | |||
1459 | TruncInst *TruncI = dyn_cast<TruncInst>(User); | |||
1460 | bool MadeChange = false; | |||
1461 | ||||
1462 | for (Value::user_iterator TruncUI = TruncI->user_begin(), | |||
1463 | TruncE = TruncI->user_end(); | |||
1464 | TruncUI != TruncE;) { | |||
1465 | ||||
1466 | Use &TruncTheUse = TruncUI.getUse(); | |||
1467 | Instruction *TruncUser = cast<Instruction>(*TruncUI); | |||
1468 | // Preincrement use iterator so we don't invalidate it. | |||
1469 | ||||
1470 | ++TruncUI; | |||
1471 | ||||
1472 | int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode()); | |||
1473 | if (!ISDOpcode) | |||
1474 | continue; | |||
1475 | ||||
1476 | // If the use is actually a legal node, there will not be an | |||
1477 | // implicit truncate. | |||
1478 | // FIXME: always querying the result type is just an | |||
1479 | // approximation; some nodes' legality is determined by the | |||
1480 | // operand or other means. There's no good way to find out though. | |||
1481 | if (TLI.isOperationLegalOrCustom( | |||
1482 | ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true))) | |||
1483 | continue; | |||
1484 | ||||
1485 | // Don't bother for PHI nodes. | |||
1486 | if (isa<PHINode>(TruncUser)) | |||
1487 | continue; | |||
1488 | ||||
1489 | BasicBlock *TruncUserBB = TruncUser->getParent(); | |||
1490 | ||||
1491 | if (UserBB == TruncUserBB) | |||
1492 | continue; | |||
1493 | ||||
1494 | BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB]; | |||
1495 | CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB]; | |||
1496 | ||||
1497 | if (!InsertedShift && !InsertedTrunc) { | |||
1498 | BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt(); | |||
1499 | assert(InsertPt != TruncUserBB->end())((InsertPt != TruncUserBB->end()) ? static_cast<void> (0) : __assert_fail ("InsertPt != TruncUserBB->end()", "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 1499, __PRETTY_FUNCTION__)); | |||
1500 | // Sink the shift | |||
1501 | if (ShiftI->getOpcode() == Instruction::AShr) | |||
1502 | InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, | |||
1503 | "", &*InsertPt); | |||
1504 | else | |||
1505 | InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, | |||
1506 | "", &*InsertPt); | |||
1507 | ||||
1508 | // Sink the trunc | |||
1509 | BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt(); | |||
1510 | TruncInsertPt++; | |||
1511 | assert(TruncInsertPt != TruncUserBB->end())((TruncInsertPt != TruncUserBB->end()) ? static_cast<void > (0) : __assert_fail ("TruncInsertPt != TruncUserBB->end()" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 1511, __PRETTY_FUNCTION__)); | |||
1512 | ||||
1513 | InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift, | |||
1514 | TruncI->getType(), "", &*TruncInsertPt); | |||
1515 | ||||
1516 | MadeChange = true; | |||
1517 | ||||
1518 | TruncTheUse = InsertedTrunc; | |||
1519 | } | |||
1520 | } | |||
1521 | return MadeChange; | |||
1522 | } | |||
1523 | ||||
1524 | /// Sink the shift *right* instruction into user blocks if the uses could | |||
1525 | /// potentially be combined with this shift instruction and generate BitExtract | |||
1526 | /// instruction. It will only be applied if the architecture supports BitExtract | |||
1527 | /// instruction. Here is an example: | |||
1528 | /// BB1: | |||
1529 | /// %x.extract.shift = lshr i64 %arg1, 32 | |||
1530 | /// BB2: | |||
1531 | /// %x.extract.trunc = trunc i64 %x.extract.shift to i16 | |||
1532 | /// ==> | |||
1533 | /// | |||
1534 | /// BB2: | |||
1535 | /// %x.extract.shift.1 = lshr i64 %arg1, 32 | |||
1536 | /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16 | |||
1537 | /// | |||
1538 | /// CodeGen will recoginze the pattern in BB2 and generate BitExtract | |||
1539 | /// instruction. | |||
1540 | /// Return true if any changes are made. | |||
1541 | static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI, | |||
1542 | const TargetLowering &TLI, | |||
1543 | const DataLayout &DL) { | |||
1544 | BasicBlock *DefBB = ShiftI->getParent(); | |||
1545 | ||||
1546 | /// Only insert instructions in each block once. | |||
1547 | DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts; | |||
1548 | ||||
1549 | bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType())); | |||
1550 | ||||
1551 | bool MadeChange = false; | |||
1552 | for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end(); | |||
1553 | UI != E;) { | |||
1554 | Use &TheUse = UI.getUse(); | |||
1555 | Instruction *User = cast<Instruction>(*UI); | |||
1556 | // Preincrement use iterator so we don't invalidate it. | |||
1557 | ++UI; | |||
1558 | ||||
1559 | // Don't bother for PHI nodes. | |||
1560 | if (isa<PHINode>(User)) | |||
1561 | continue; | |||
1562 | ||||
1563 | if (!isExtractBitsCandidateUse(User)) | |||
1564 | continue; | |||
1565 | ||||
1566 | BasicBlock *UserBB = User->getParent(); | |||
1567 | ||||
1568 | if (UserBB == DefBB) { | |||
1569 | // If the shift and truncate instruction are in the same BB. The use of | |||
1570 | // the truncate(TruncUse) may still introduce another truncate if not | |||
1571 | // legal. In this case, we would like to sink both shift and truncate | |||
1572 | // instruction to the BB of TruncUse. | |||
1573 | // for example: | |||
1574 | // BB1: | |||
1575 | // i64 shift.result = lshr i64 opnd, imm | |||
1576 | // trunc.result = trunc shift.result to i16 | |||
1577 | // | |||
1578 | // BB2: | |||
1579 | // ----> We will have an implicit truncate here if the architecture does | |||
1580 | // not have i16 compare. | |||
1581 | // cmp i16 trunc.result, opnd2 | |||
1582 | // | |||
1583 | if (isa<TruncInst>(User) && shiftIsLegal | |||
1584 | // If the type of the truncate is legal, no trucate will be | |||
1585 | // introduced in other basic blocks. | |||
1586 | && | |||
1587 | (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType())))) | |||
1588 | MadeChange = | |||
1589 | SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL); | |||
1590 | ||||
1591 | continue; | |||
1592 | } | |||
1593 | // If we have already inserted a shift into this block, use it. | |||
1594 | BinaryOperator *&InsertedShift = InsertedShifts[UserBB]; | |||
1595 | ||||
1596 | if (!InsertedShift) { | |||
1597 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); | |||
1598 | assert(InsertPt != UserBB->end())((InsertPt != UserBB->end()) ? static_cast<void> (0) : __assert_fail ("InsertPt != UserBB->end()", "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 1598, __PRETTY_FUNCTION__)); | |||
1599 | ||||
1600 | if (ShiftI->getOpcode() == Instruction::AShr) | |||
1601 | InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, | |||
1602 | "", &*InsertPt); | |||
1603 | else | |||
1604 | InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, | |||
1605 | "", &*InsertPt); | |||
1606 | ||||
1607 | MadeChange = true; | |||
1608 | } | |||
1609 | ||||
1610 | // Replace a use of the shift with a use of the new shift. | |||
1611 | TheUse = InsertedShift; | |||
1612 | } | |||
1613 | ||||
1614 | // If we removed all uses, nuke the shift. | |||
1615 | if (ShiftI->use_empty()) | |||
1616 | ShiftI->eraseFromParent(); | |||
1617 | ||||
1618 | return MadeChange; | |||
1619 | } | |||
1620 | ||||
1621 | /// If counting leading or trailing zeros is an expensive operation and a zero | |||
1622 | /// input is defined, add a check for zero to avoid calling the intrinsic. | |||
1623 | /// | |||
1624 | /// We want to transform: | |||
1625 | /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false) | |||
1626 | /// | |||
1627 | /// into: | |||
1628 | /// entry: | |||
1629 | /// %cmpz = icmp eq i64 %A, 0 | |||
1630 | /// br i1 %cmpz, label %cond.end, label %cond.false | |||
1631 | /// cond.false: | |||
1632 | /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true) | |||
1633 | /// br label %cond.end | |||
1634 | /// cond.end: | |||
1635 | /// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ] | |||
1636 | /// | |||
1637 | /// If the transform is performed, return true and set ModifiedDT to true. | |||
1638 | static bool despeculateCountZeros(IntrinsicInst *CountZeros, | |||
1639 | const TargetLowering *TLI, | |||
1640 | const DataLayout *DL, | |||
1641 | bool &ModifiedDT) { | |||
1642 | if (!TLI || !DL) | |||
1643 | return false; | |||
1644 | ||||
1645 | // If a zero input is undefined, it doesn't make sense to despeculate that. | |||
1646 | if (match(CountZeros->getOperand(1), m_One())) | |||
1647 | return false; | |||
1648 | ||||
1649 | // If it's cheap to speculate, there's nothing to do. | |||
1650 | auto IntrinsicID = CountZeros->getIntrinsicID(); | |||
1651 | if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz()) || | |||
1652 | (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz())) | |||
1653 | return false; | |||
1654 | ||||
1655 | // Only handle legal scalar cases. Anything else requires too much work. | |||
1656 | Type *Ty = CountZeros->getType(); | |||
1657 | unsigned SizeInBits = Ty->getPrimitiveSizeInBits(); | |||
1658 | if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits()) | |||
1659 | return false; | |||
1660 | ||||
1661 | // The intrinsic will be sunk behind a compare against zero and branch. | |||
1662 | BasicBlock *StartBlock = CountZeros->getParent(); | |||
1663 | BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false"); | |||
1664 | ||||
1665 | // Create another block after the count zero intrinsic. A PHI will be added | |||
1666 | // in this block to select the result of the intrinsic or the bit-width | |||
1667 | // constant if the input to the intrinsic is zero. | |||
1668 | BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros)); | |||
1669 | BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end"); | |||
1670 | ||||
1671 | // Set up a builder to create a compare, conditional branch, and PHI. | |||
1672 | IRBuilder<> Builder(CountZeros->getContext()); | |||
1673 | Builder.SetInsertPoint(StartBlock->getTerminator()); | |||
1674 | Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc()); | |||
1675 | ||||
1676 | // Replace the unconditional branch that was created by the first split with | |||
1677 | // a compare against zero and a conditional branch. | |||
1678 | Value *Zero = Constant::getNullValue(Ty); | |||
1679 | Value *Cmp = Builder.CreateICmpEQ(CountZeros->getOperand(0), Zero, "cmpz"); | |||
1680 | Builder.CreateCondBr(Cmp, EndBlock, CallBlock); | |||
1681 | StartBlock->getTerminator()->eraseFromParent(); | |||
1682 | ||||
1683 | // Create a PHI in the end block to select either the output of the intrinsic | |||
1684 | // or the bit width of the operand. | |||
1685 | Builder.SetInsertPoint(&EndBlock->front()); | |||
1686 | PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz"); | |||
1687 | CountZeros->replaceAllUsesWith(PN); | |||
1688 | Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits)); | |||
1689 | PN->addIncoming(BitWidth, StartBlock); | |||
1690 | PN->addIncoming(CountZeros, CallBlock); | |||
1691 | ||||
1692 | // We are explicitly handling the zero case, so we can set the intrinsic's | |||
1693 | // undefined zero argument to 'true'. This will also prevent reprocessing the | |||
1694 | // intrinsic; we only despeculate when a zero input is defined. | |||
1695 | CountZeros->setArgOperand(1, Builder.getTrue()); | |||
1696 | ModifiedDT = true; | |||
1697 | return true; | |||
1698 | } | |||
1699 | ||||
1700 | namespace { | |||
1701 | ||||
1702 | // This class provides helper functions to expand a memcmp library call into an | |||
1703 | // inline expansion. | |||
1704 | class MemCmpExpansion { | |||
1705 | struct ResultBlock { | |||
1706 | BasicBlock *BB = nullptr; | |||
1707 | PHINode *PhiSrc1 = nullptr; | |||
1708 | PHINode *PhiSrc2 = nullptr; | |||
1709 | ||||
1710 | ResultBlock() = default; | |||
1711 | }; | |||
1712 | ||||
1713 | CallInst *CI; | |||
1714 | ResultBlock ResBlock; | |||
1715 | unsigned MaxLoadSize; | |||
1716 | unsigned NumBlocks; | |||
1717 | unsigned NumBlocksNonOneByte; | |||
1718 | unsigned NumLoadsPerBlock; | |||
1719 | std::vector<BasicBlock *> LoadCmpBlocks; | |||
1720 | BasicBlock *EndBlock; | |||
1721 | PHINode *PhiRes; | |||
1722 | bool IsUsedForZeroCmp; | |||
1723 | const DataLayout &DL; | |||
1724 | IRBuilder<> Builder; | |||
1725 | ||||
1726 | unsigned calculateNumBlocks(unsigned Size); | |||
1727 | void createLoadCmpBlocks(); | |||
1728 | void createResultBlock(); | |||
1729 | void setupResultBlockPHINodes(); | |||
1730 | void setupEndBlockPHINodes(); | |||
1731 | void emitLoadCompareBlock(unsigned Index, unsigned LoadSize, | |||
1732 | unsigned GEPIndex); | |||
1733 | Value *getCompareLoadPairs(unsigned Index, unsigned Size, | |||
1734 | unsigned &NumBytesProcessed); | |||
1735 | void emitLoadCompareBlockMultipleLoads(unsigned Index, unsigned Size, | |||
1736 | unsigned &NumBytesProcessed); | |||
1737 | void emitLoadCompareByteBlock(unsigned Index, unsigned GEPIndex); | |||
1738 | void emitMemCmpResultBlock(); | |||
1739 | Value *getMemCmpExpansionZeroCase(unsigned Size); | |||
1740 | Value *getMemCmpEqZeroOneBlock(unsigned Size); | |||
1741 | Value *getMemCmpOneBlock(unsigned Size); | |||
1742 | unsigned getLoadSize(unsigned Size); | |||
1743 | unsigned getNumLoads(unsigned Size); | |||
1744 | ||||
1745 | public: | |||
1746 | MemCmpExpansion(CallInst *CI, uint64_t Size, unsigned MaxLoadSize, | |||
1747 | unsigned NumLoadsPerBlock, const DataLayout &DL); | |||
1748 | ||||
1749 | Value *getMemCmpExpansion(uint64_t Size); | |||
1750 | }; | |||
1751 | ||||
1752 | } // end anonymous namespace | |||
1753 | ||||
1754 | // Initialize the basic block structure required for expansion of memcmp call | |||
1755 | // with given maximum load size and memcmp size parameter. | |||
1756 | // This structure includes: | |||
1757 | // 1. A list of load compare blocks - LoadCmpBlocks. | |||
1758 | // 2. An EndBlock, split from original instruction point, which is the block to | |||
1759 | // return from. | |||
1760 | // 3. ResultBlock, block to branch to for early exit when a | |||
1761 | // LoadCmpBlock finds a difference. | |||
1762 | MemCmpExpansion::MemCmpExpansion(CallInst *CI, uint64_t Size, | |||
1763 | unsigned MaxLoadSize, unsigned LoadsPerBlock, | |||
1764 | const DataLayout &TheDataLayout) | |||
1765 | : CI(CI), MaxLoadSize(MaxLoadSize), NumLoadsPerBlock(LoadsPerBlock), | |||
1766 | DL(TheDataLayout), Builder(CI) { | |||
1767 | // A memcmp with zero-comparison with only one block of load and compare does | |||
1768 | // not need to set up any extra blocks. This case could be handled in the DAG, | |||
1769 | // but since we have all of the machinery to flexibly expand any memcpy here, | |||
1770 | // we choose to handle this case too to avoid fragmented lowering. | |||
1771 | IsUsedForZeroCmp = isOnlyUsedInZeroEqualityComparison(CI); | |||
1772 | NumBlocks = calculateNumBlocks(Size); | |||
1773 | if ((!IsUsedForZeroCmp && NumLoadsPerBlock != 1) || NumBlocks != 1) { | |||
1774 | BasicBlock *StartBlock = CI->getParent(); | |||
1775 | EndBlock = StartBlock->splitBasicBlock(CI, "endblock"); | |||
1776 | setupEndBlockPHINodes(); | |||
1777 | createResultBlock(); | |||
1778 | ||||
1779 | // If return value of memcmp is not used in a zero equality, we need to | |||
1780 | // calculate which source was larger. The calculation requires the | |||
1781 | // two loaded source values of each load compare block. | |||
1782 | // These will be saved in the phi nodes created by setupResultBlockPHINodes. | |||
1783 | if (!IsUsedForZeroCmp) | |||
1784 | setupResultBlockPHINodes(); | |||
1785 | ||||
1786 | // Create the number of required load compare basic blocks. | |||
1787 | createLoadCmpBlocks(); | |||
1788 | ||||
1789 | // Update the terminator added by splitBasicBlock to branch to the first | |||
1790 | // LoadCmpBlock. | |||
1791 | StartBlock->getTerminator()->setSuccessor(0, LoadCmpBlocks[0]); | |||
1792 | } | |||
1793 | ||||
1794 | Builder.SetCurrentDebugLocation(CI->getDebugLoc()); | |||
1795 | } | |||
1796 | ||||
1797 | void MemCmpExpansion::createLoadCmpBlocks() { | |||
1798 | for (unsigned i = 0; i < NumBlocks; i++) { | |||
1799 | BasicBlock *BB = BasicBlock::Create(CI->getContext(), "loadbb", | |||
1800 | EndBlock->getParent(), EndBlock); | |||
1801 | LoadCmpBlocks.push_back(BB); | |||
1802 | } | |||
1803 | } | |||
1804 | ||||
1805 | void MemCmpExpansion::createResultBlock() { | |||
1806 | ResBlock.BB = BasicBlock::Create(CI->getContext(), "res_block", | |||
1807 | EndBlock->getParent(), EndBlock); | |||
1808 | } | |||
1809 | ||||
1810 | // This function creates the IR instructions for loading and comparing 1 byte. | |||
1811 | // It loads 1 byte from each source of the memcmp parameters with the given | |||
1812 | // GEPIndex. It then subtracts the two loaded values and adds this result to the | |||
1813 | // final phi node for selecting the memcmp result. | |||
1814 | void MemCmpExpansion::emitLoadCompareByteBlock(unsigned Index, | |||
1815 | unsigned GEPIndex) { | |||
1816 | Value *Source1 = CI->getArgOperand(0); | |||
1817 | Value *Source2 = CI->getArgOperand(1); | |||
1818 | ||||
1819 | Builder.SetInsertPoint(LoadCmpBlocks[Index]); | |||
1820 | Type *LoadSizeType = Type::getInt8Ty(CI->getContext()); | |||
1821 | // Cast source to LoadSizeType*. | |||
1822 | if (Source1->getType() != LoadSizeType) | |||
1823 | Source1 = Builder.CreateBitCast(Source1, LoadSizeType->getPointerTo()); | |||
1824 | if (Source2->getType() != LoadSizeType) | |||
1825 | Source2 = Builder.CreateBitCast(Source2, LoadSizeType->getPointerTo()); | |||
1826 | ||||
1827 | // Get the base address using the GEPIndex. | |||
1828 | if (GEPIndex != 0) { | |||
1829 | Source1 = Builder.CreateGEP(LoadSizeType, Source1, | |||
1830 | ConstantInt::get(LoadSizeType, GEPIndex)); | |||
1831 | Source2 = Builder.CreateGEP(LoadSizeType, Source2, | |||
1832 | ConstantInt::get(LoadSizeType, GEPIndex)); | |||
1833 | } | |||
1834 | ||||
1835 | Value *LoadSrc1 = Builder.CreateLoad(LoadSizeType, Source1); | |||
1836 | Value *LoadSrc2 = Builder.CreateLoad(LoadSizeType, Source2); | |||
1837 | ||||
1838 | LoadSrc1 = Builder.CreateZExt(LoadSrc1, Type::getInt32Ty(CI->getContext())); | |||
1839 | LoadSrc2 = Builder.CreateZExt(LoadSrc2, Type::getInt32Ty(CI->getContext())); | |||
1840 | Value *Diff = Builder.CreateSub(LoadSrc1, LoadSrc2); | |||
1841 | ||||
1842 | PhiRes->addIncoming(Diff, LoadCmpBlocks[Index]); | |||
1843 | ||||
1844 | if (Index < (LoadCmpBlocks.size() - 1)) { | |||
1845 | // Early exit branch if difference found to EndBlock. Otherwise, continue to | |||
1846 | // next LoadCmpBlock, | |||
1847 | Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_NE, Diff, | |||
1848 | ConstantInt::get(Diff->getType(), 0)); | |||
1849 | BranchInst *CmpBr = | |||
1850 | BranchInst::Create(EndBlock, LoadCmpBlocks[Index + 1], Cmp); | |||
1851 | Builder.Insert(CmpBr); | |||
1852 | } else { | |||
1853 | // The last block has an unconditional branch to EndBlock. | |||
1854 | BranchInst *CmpBr = BranchInst::Create(EndBlock); | |||
1855 | Builder.Insert(CmpBr); | |||
1856 | } | |||
1857 | } | |||
1858 | ||||
1859 | unsigned MemCmpExpansion::getNumLoads(unsigned Size) { | |||
1860 | return (Size / MaxLoadSize) + countPopulation(Size % MaxLoadSize); | |||
1861 | } | |||
1862 | ||||
1863 | unsigned MemCmpExpansion::getLoadSize(unsigned Size) { | |||
1864 | return MinAlign(PowerOf2Floor(Size), MaxLoadSize); | |||
1865 | } | |||
1866 | ||||
1867 | /// Generate an equality comparison for one or more pairs of loaded values. | |||
1868 | /// This is used in the case where the memcmp() call is compared equal or not | |||
1869 | /// equal to zero. | |||
1870 | Value *MemCmpExpansion::getCompareLoadPairs(unsigned Index, unsigned Size, | |||
1871 | unsigned &NumBytesProcessed) { | |||
1872 | std::vector<Value *> XorList, OrList; | |||
1873 | Value *Diff; | |||
1874 | ||||
1875 | unsigned RemainingBytes = Size - NumBytesProcessed; | |||
1876 | unsigned NumLoadsRemaining = getNumLoads(RemainingBytes); | |||
1877 | unsigned NumLoads = std::min(NumLoadsRemaining, NumLoadsPerBlock); | |||
1878 | ||||
1879 | // For a single-block expansion, start inserting before the memcmp call. | |||
1880 | if (LoadCmpBlocks.empty()) | |||
1881 | Builder.SetInsertPoint(CI); | |||
1882 | else | |||
1883 | Builder.SetInsertPoint(LoadCmpBlocks[Index]); | |||
1884 | ||||
1885 | Value *Cmp = nullptr; | |||
1886 | for (unsigned i = 0; i < NumLoads; ++i) { | |||
1887 | unsigned LoadSize = getLoadSize(RemainingBytes); | |||
1888 | unsigned GEPIndex = NumBytesProcessed / LoadSize; | |||
1889 | NumBytesProcessed += LoadSize; | |||
1890 | RemainingBytes -= LoadSize; | |||
1891 | ||||
1892 | Type *LoadSizeType = IntegerType::get(CI->getContext(), LoadSize * 8); | |||
1893 | Type *MaxLoadType = IntegerType::get(CI->getContext(), MaxLoadSize * 8); | |||
1894 | assert(LoadSize <= MaxLoadSize && "Unexpected load type")((LoadSize <= MaxLoadSize && "Unexpected load type" ) ? static_cast<void> (0) : __assert_fail ("LoadSize <= MaxLoadSize && \"Unexpected load type\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 1894, __PRETTY_FUNCTION__)); | |||
1895 | ||||
1896 | Value *Source1 = CI->getArgOperand(0); | |||
1897 | Value *Source2 = CI->getArgOperand(1); | |||
1898 | ||||
1899 | // Cast source to LoadSizeType*. | |||
1900 | if (Source1->getType() != LoadSizeType) | |||
1901 | Source1 = Builder.CreateBitCast(Source1, LoadSizeType->getPointerTo()); | |||
1902 | if (Source2->getType() != LoadSizeType) | |||
1903 | Source2 = Builder.CreateBitCast(Source2, LoadSizeType->getPointerTo()); | |||
1904 | ||||
1905 | // Get the base address using the GEPIndex. | |||
1906 | if (GEPIndex != 0) { | |||
1907 | Source1 = Builder.CreateGEP(LoadSizeType, Source1, | |||
1908 | ConstantInt::get(LoadSizeType, GEPIndex)); | |||
1909 | Source2 = Builder.CreateGEP(LoadSizeType, Source2, | |||
1910 | ConstantInt::get(LoadSizeType, GEPIndex)); | |||
1911 | } | |||
1912 | ||||
1913 | // Get a constant or load a value for each source address. | |||
1914 | Value *LoadSrc1 = nullptr; | |||
1915 | if (auto *Source1C = dyn_cast<Constant>(Source1)) | |||
1916 | LoadSrc1 = ConstantFoldLoadFromConstPtr(Source1C, LoadSizeType, DL); | |||
1917 | if (!LoadSrc1) | |||
1918 | LoadSrc1 = Builder.CreateLoad(LoadSizeType, Source1); | |||
1919 | ||||
1920 | Value *LoadSrc2 = nullptr; | |||
1921 | if (auto *Source2C = dyn_cast<Constant>(Source2)) | |||
1922 | LoadSrc2 = ConstantFoldLoadFromConstPtr(Source2C, LoadSizeType, DL); | |||
1923 | if (!LoadSrc2) | |||
1924 | LoadSrc2 = Builder.CreateLoad(LoadSizeType, Source2); | |||
1925 | ||||
1926 | if (NumLoads != 1) { | |||
1927 | if (LoadSizeType != MaxLoadType) { | |||
1928 | LoadSrc1 = Builder.CreateZExt(LoadSrc1, MaxLoadType); | |||
1929 | LoadSrc2 = Builder.CreateZExt(LoadSrc2, MaxLoadType); | |||
1930 | } | |||
1931 | // If we have multiple loads per block, we need to generate a composite | |||
1932 | // comparison using xor+or. | |||
1933 | Diff = Builder.CreateXor(LoadSrc1, LoadSrc2); | |||
1934 | Diff = Builder.CreateZExt(Diff, MaxLoadType); | |||
1935 | XorList.push_back(Diff); | |||
1936 | } else { | |||
1937 | // If there's only one load per block, we just compare the loaded values. | |||
1938 | Cmp = Builder.CreateICmpNE(LoadSrc1, LoadSrc2); | |||
1939 | } | |||
1940 | } | |||
1941 | ||||
1942 | auto pairWiseOr = [&](std::vector<Value *> &InList) -> std::vector<Value *> { | |||
1943 | std::vector<Value *> OutList; | |||
1944 | for (unsigned i = 0; i < InList.size() - 1; i = i + 2) { | |||
1945 | Value *Or = Builder.CreateOr(InList[i], InList[i + 1]); | |||
1946 | OutList.push_back(Or); | |||
1947 | } | |||
1948 | if (InList.size() % 2 != 0) | |||
1949 | OutList.push_back(InList.back()); | |||
1950 | return OutList; | |||
1951 | }; | |||
1952 | ||||
1953 | if (!Cmp) { | |||
1954 | // Pairwise OR the XOR results. | |||
1955 | OrList = pairWiseOr(XorList); | |||
1956 | ||||
1957 | // Pairwise OR the OR results until one result left. | |||
1958 | while (OrList.size() != 1) { | |||
1959 | OrList = pairWiseOr(OrList); | |||
1960 | } | |||
1961 | Cmp = Builder.CreateICmpNE(OrList[0], ConstantInt::get(Diff->getType(), 0)); | |||
| ||||
1962 | } | |||
1963 | ||||
1964 | return Cmp; | |||
1965 | } | |||
1966 | ||||
1967 | void MemCmpExpansion::emitLoadCompareBlockMultipleLoads( | |||
1968 | unsigned Index, unsigned Size, unsigned &NumBytesProcessed) { | |||
1969 | Value *Cmp = getCompareLoadPairs(Index, Size, NumBytesProcessed); | |||
1970 | ||||
1971 | BasicBlock *NextBB = (Index == (LoadCmpBlocks.size() - 1)) | |||
1972 | ? EndBlock | |||
1973 | : LoadCmpBlocks[Index + 1]; | |||
1974 | // Early exit branch if difference found to ResultBlock. Otherwise, | |||
1975 | // continue to next LoadCmpBlock or EndBlock. | |||
1976 | BranchInst *CmpBr = BranchInst::Create(ResBlock.BB, NextBB, Cmp); | |||
1977 | Builder.Insert(CmpBr); | |||
1978 | ||||
1979 | // Add a phi edge for the last LoadCmpBlock to Endblock with a value of 0 | |||
1980 | // since early exit to ResultBlock was not taken (no difference was found in | |||
1981 | // any of the bytes). | |||
1982 | if (Index == LoadCmpBlocks.size() - 1) { | |||
1983 | Value *Zero = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 0); | |||
1984 | PhiRes->addIncoming(Zero, LoadCmpBlocks[Index]); | |||
1985 | } | |||
1986 | } | |||
1987 | ||||
1988 | // This function creates the IR intructions for loading and comparing using the | |||
1989 | // given LoadSize. It loads the number of bytes specified by LoadSize from each | |||
1990 | // source of the memcmp parameters. It then does a subtract to see if there was | |||
1991 | // a difference in the loaded values. If a difference is found, it branches | |||
1992 | // with an early exit to the ResultBlock for calculating which source was | |||
1993 | // larger. Otherwise, it falls through to the either the next LoadCmpBlock or | |||
1994 | // the EndBlock if this is the last LoadCmpBlock. Loading 1 byte is handled with | |||
1995 | // a special case through emitLoadCompareByteBlock. The special handling can | |||
1996 | // simply subtract the loaded values and add it to the result phi node. | |||
1997 | void MemCmpExpansion::emitLoadCompareBlock(unsigned Index, unsigned LoadSize, | |||
1998 | unsigned GEPIndex) { | |||
1999 | if (LoadSize == 1) { | |||
2000 | MemCmpExpansion::emitLoadCompareByteBlock(Index, GEPIndex); | |||
2001 | return; | |||
2002 | } | |||
2003 | ||||
2004 | Type *LoadSizeType = IntegerType::get(CI->getContext(), LoadSize * 8); | |||
2005 | Type *MaxLoadType = IntegerType::get(CI->getContext(), MaxLoadSize * 8); | |||
2006 | assert(LoadSize <= MaxLoadSize && "Unexpected load type")((LoadSize <= MaxLoadSize && "Unexpected load type" ) ? static_cast<void> (0) : __assert_fail ("LoadSize <= MaxLoadSize && \"Unexpected load type\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 2006, __PRETTY_FUNCTION__)); | |||
2007 | ||||
2008 | Value *Source1 = CI->getArgOperand(0); | |||
2009 | Value *Source2 = CI->getArgOperand(1); | |||
2010 | ||||
2011 | Builder.SetInsertPoint(LoadCmpBlocks[Index]); | |||
2012 | // Cast source to LoadSizeType*. | |||
2013 | if (Source1->getType() != LoadSizeType) | |||
2014 | Source1 = Builder.CreateBitCast(Source1, LoadSizeType->getPointerTo()); | |||
2015 | if (Source2->getType() != LoadSizeType) | |||
2016 | Source2 = Builder.CreateBitCast(Source2, LoadSizeType->getPointerTo()); | |||
2017 | ||||
2018 | // Get the base address using the GEPIndex. | |||
2019 | if (GEPIndex != 0) { | |||
2020 | Source1 = Builder.CreateGEP(LoadSizeType, Source1, | |||
2021 | ConstantInt::get(LoadSizeType, GEPIndex)); | |||
2022 | Source2 = Builder.CreateGEP(LoadSizeType, Source2, | |||
2023 | ConstantInt::get(LoadSizeType, GEPIndex)); | |||
2024 | } | |||
2025 | ||||
2026 | // Load LoadSizeType from the base address. | |||
2027 | Value *LoadSrc1 = Builder.CreateLoad(LoadSizeType, Source1); | |||
2028 | Value *LoadSrc2 = Builder.CreateLoad(LoadSizeType, Source2); | |||
2029 | ||||
2030 | if (DL.isLittleEndian()) { | |||
2031 | Function *Bswap = Intrinsic::getDeclaration(CI->getModule(), | |||
2032 | Intrinsic::bswap, LoadSizeType); | |||
2033 | LoadSrc1 = Builder.CreateCall(Bswap, LoadSrc1); | |||
2034 | LoadSrc2 = Builder.CreateCall(Bswap, LoadSrc2); | |||
2035 | } | |||
2036 | ||||
2037 | if (LoadSizeType != MaxLoadType) { | |||
2038 | LoadSrc1 = Builder.CreateZExt(LoadSrc1, MaxLoadType); | |||
2039 | LoadSrc2 = Builder.CreateZExt(LoadSrc2, MaxLoadType); | |||
2040 | } | |||
2041 | ||||
2042 | // Add the loaded values to the phi nodes for calculating memcmp result only | |||
2043 | // if result is not used in a zero equality. | |||
2044 | if (!IsUsedForZeroCmp) { | |||
2045 | ResBlock.PhiSrc1->addIncoming(LoadSrc1, LoadCmpBlocks[Index]); | |||
2046 | ResBlock.PhiSrc2->addIncoming(LoadSrc2, LoadCmpBlocks[Index]); | |||
2047 | } | |||
2048 | ||||
2049 | Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, LoadSrc1, LoadSrc2); | |||
2050 | BasicBlock *NextBB = (Index == (LoadCmpBlocks.size() - 1)) | |||
2051 | ? EndBlock | |||
2052 | : LoadCmpBlocks[Index + 1]; | |||
2053 | // Early exit branch if difference found to ResultBlock. Otherwise, continue | |||
2054 | // to next LoadCmpBlock or EndBlock. | |||
2055 | BranchInst *CmpBr = BranchInst::Create(NextBB, ResBlock.BB, Cmp); | |||
2056 | Builder.Insert(CmpBr); | |||
2057 | ||||
2058 | // Add a phi edge for the last LoadCmpBlock to Endblock with a value of 0 | |||
2059 | // since early exit to ResultBlock was not taken (no difference was found in | |||
2060 | // any of the bytes). | |||
2061 | if (Index == LoadCmpBlocks.size() - 1) { | |||
2062 | Value *Zero = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 0); | |||
2063 | PhiRes->addIncoming(Zero, LoadCmpBlocks[Index]); | |||
2064 | } | |||
2065 | } | |||
2066 | ||||
2067 | // This function populates the ResultBlock with a sequence to calculate the | |||
2068 | // memcmp result. It compares the two loaded source values and returns -1 if | |||
2069 | // src1 < src2 and 1 if src1 > src2. | |||
2070 | void MemCmpExpansion::emitMemCmpResultBlock() { | |||
2071 | // Special case: if memcmp result is used in a zero equality, result does not | |||
2072 | // need to be calculated and can simply return 1. | |||
2073 | if (IsUsedForZeroCmp) { | |||
2074 | BasicBlock::iterator InsertPt = ResBlock.BB->getFirstInsertionPt(); | |||
2075 | Builder.SetInsertPoint(ResBlock.BB, InsertPt); | |||
2076 | Value *Res = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 1); | |||
2077 | PhiRes->addIncoming(Res, ResBlock.BB); | |||
2078 | BranchInst *NewBr = BranchInst::Create(EndBlock); | |||
2079 | Builder.Insert(NewBr); | |||
2080 | return; | |||
2081 | } | |||
2082 | BasicBlock::iterator InsertPt = ResBlock.BB->getFirstInsertionPt(); | |||
2083 | Builder.SetInsertPoint(ResBlock.BB, InsertPt); | |||
2084 | ||||
2085 | Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_ULT, ResBlock.PhiSrc1, | |||
2086 | ResBlock.PhiSrc2); | |||
2087 | ||||
2088 | Value *Res = | |||
2089 | Builder.CreateSelect(Cmp, ConstantInt::get(Builder.getInt32Ty(), -1), | |||
2090 | ConstantInt::get(Builder.getInt32Ty(), 1)); | |||
2091 | ||||
2092 | BranchInst *NewBr = BranchInst::Create(EndBlock); | |||
2093 | Builder.Insert(NewBr); | |||
2094 | PhiRes->addIncoming(Res, ResBlock.BB); | |||
2095 | } | |||
2096 | ||||
2097 | unsigned MemCmpExpansion::calculateNumBlocks(unsigned Size) { | |||
2098 | unsigned NumBlocks = 0; | |||
2099 | bool HaveOneByteLoad = false; | |||
2100 | unsigned RemainingSize = Size; | |||
2101 | unsigned LoadSize = MaxLoadSize; | |||
2102 | while (RemainingSize) { | |||
2103 | if (LoadSize == 1) | |||
2104 | HaveOneByteLoad = true; | |||
2105 | NumBlocks += RemainingSize / LoadSize; | |||
2106 | RemainingSize = RemainingSize % LoadSize; | |||
2107 | LoadSize = LoadSize / 2; | |||
2108 | } | |||
2109 | NumBlocksNonOneByte = HaveOneByteLoad ? (NumBlocks - 1) : NumBlocks; | |||
2110 | ||||
2111 | if (IsUsedForZeroCmp) | |||
2112 | NumBlocks = NumBlocks / NumLoadsPerBlock + | |||
2113 | (NumBlocks % NumLoadsPerBlock != 0 ? 1 : 0); | |||
2114 | ||||
2115 | return NumBlocks; | |||
2116 | } | |||
2117 | ||||
2118 | void MemCmpExpansion::setupResultBlockPHINodes() { | |||
2119 | Type *MaxLoadType = IntegerType::get(CI->getContext(), MaxLoadSize * 8); | |||
2120 | Builder.SetInsertPoint(ResBlock.BB); | |||
2121 | ResBlock.PhiSrc1 = | |||
2122 | Builder.CreatePHI(MaxLoadType, NumBlocksNonOneByte, "phi.src1"); | |||
2123 | ResBlock.PhiSrc2 = | |||
2124 | Builder.CreatePHI(MaxLoadType, NumBlocksNonOneByte, "phi.src2"); | |||
2125 | } | |||
2126 | ||||
2127 | void MemCmpExpansion::setupEndBlockPHINodes() { | |||
2128 | Builder.SetInsertPoint(&EndBlock->front()); | |||
2129 | PhiRes = Builder.CreatePHI(Type::getInt32Ty(CI->getContext()), 2, "phi.res"); | |||
2130 | } | |||
2131 | ||||
2132 | Value *MemCmpExpansion::getMemCmpExpansionZeroCase(unsigned Size) { | |||
2133 | unsigned NumBytesProcessed = 0; | |||
2134 | // This loop populates each of the LoadCmpBlocks with the IR sequence to | |||
2135 | // handle multiple loads per block. | |||
2136 | for (unsigned i = 0; i < NumBlocks; ++i) | |||
| ||||
2137 | emitLoadCompareBlockMultipleLoads(i, Size, NumBytesProcessed); | |||
2138 | ||||
2139 | emitMemCmpResultBlock(); | |||
2140 | return PhiRes; | |||
2141 | } | |||
2142 | ||||
2143 | /// A memcmp expansion that compares equality with 0 and only has one block of | |||
2144 | /// load and compare can bypass the compare, branch, and phi IR that is required | |||
2145 | /// in the general case. | |||
2146 | Value *MemCmpExpansion::getMemCmpEqZeroOneBlock(unsigned Size) { | |||
2147 | unsigned NumBytesProcessed = 0; | |||
2148 | Value *Cmp = getCompareLoadPairs(0, Size, NumBytesProcessed); | |||
2149 | return Builder.CreateZExt(Cmp, Type::getInt32Ty(CI->getContext())); | |||
2150 | } | |||
2151 | ||||
2152 | /// A memcmp expansion that only has one block of load and compare can bypass | |||
2153 | /// the compare, branch, and phi IR that is required in the general case. | |||
2154 | Value *MemCmpExpansion::getMemCmpOneBlock(unsigned Size) { | |||
2155 | assert(NumLoadsPerBlock == 1 && "Only handles one load pair per block")((NumLoadsPerBlock == 1 && "Only handles one load pair per block" ) ? static_cast<void> (0) : __assert_fail ("NumLoadsPerBlock == 1 && \"Only handles one load pair per block\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 2155, __PRETTY_FUNCTION__)); | |||
2156 | ||||
2157 | Type *LoadSizeType = IntegerType::get(CI->getContext(), Size * 8); | |||
2158 | Value *Source1 = CI->getArgOperand(0); | |||
2159 | Value *Source2 = CI->getArgOperand(1); | |||
2160 | ||||
2161 | // Cast source to LoadSizeType*. | |||
2162 | if (Source1->getType() != LoadSizeType) | |||
2163 | Source1 = Builder.CreateBitCast(Source1, LoadSizeType->getPointerTo()); | |||
2164 | if (Source2->getType() != LoadSizeType) | |||
2165 | Source2 = Builder.CreateBitCast(Source2, LoadSizeType->getPointerTo()); | |||
2166 | ||||
2167 | // Load LoadSizeType from the base address. | |||
2168 | Value *LoadSrc1 = Builder.CreateLoad(LoadSizeType, Source1); | |||
2169 | Value *LoadSrc2 = Builder.CreateLoad(LoadSizeType, Source2); | |||
2170 | ||||
2171 | if (DL.isLittleEndian() && Size != 1) { | |||
2172 | Function *Bswap = Intrinsic::getDeclaration(CI->getModule(), | |||
2173 | Intrinsic::bswap, LoadSizeType); | |||
2174 | LoadSrc1 = Builder.CreateCall(Bswap, LoadSrc1); | |||
2175 | LoadSrc2 = Builder.CreateCall(Bswap, LoadSrc2); | |||
2176 | } | |||
2177 | ||||
2178 | if (Size < 4) { | |||
2179 | // The i8 and i16 cases don't need compares. We zext the loaded values and | |||
2180 | // subtract them to get the suitable negative, zero, or positive i32 result. | |||
2181 | LoadSrc1 = Builder.CreateZExt(LoadSrc1, Builder.getInt32Ty()); | |||
2182 | LoadSrc2 = Builder.CreateZExt(LoadSrc2, Builder.getInt32Ty()); | |||
2183 | return Builder.CreateSub(LoadSrc1, LoadSrc2); | |||
2184 | } | |||
2185 | ||||
2186 | // The result of memcmp is negative, zero, or positive, so produce that by | |||
2187 | // subtracting 2 extended compare bits: sub (ugt, ult). | |||
2188 | // If a target prefers to use selects to get -1/0/1, they should be able | |||
2189 | // to transform this later. The inverse transform (going from selects to math) | |||
2190 | // may not be possible in the DAG because the selects got converted into | |||
2191 | // branches before we got there. | |||
2192 | Value *CmpUGT = Builder.CreateICmpUGT(LoadSrc1, LoadSrc2); | |||
2193 | Value *CmpULT = Builder.CreateICmpULT(LoadSrc1, LoadSrc2); | |||
2194 | Value *ZextUGT = Builder.CreateZExt(CmpUGT, Builder.getInt32Ty()); | |||
2195 | Value *ZextULT = Builder.CreateZExt(CmpULT, Builder.getInt32Ty()); | |||
2196 | return Builder.CreateSub(ZextUGT, ZextULT); | |||
2197 | } | |||
2198 | ||||
2199 | // This function expands the memcmp call into an inline expansion and returns | |||
2200 | // the memcmp result. | |||
2201 | Value *MemCmpExpansion::getMemCmpExpansion(uint64_t Size) { | |||
2202 | if (IsUsedForZeroCmp) | |||
2203 | return NumBlocks == 1 ? getMemCmpEqZeroOneBlock(Size) : | |||
2204 | getMemCmpExpansionZeroCase(Size); | |||
2205 | ||||
2206 | // TODO: Handle more than one load pair per block in getMemCmpOneBlock(). | |||
2207 | if (NumBlocks == 1 && NumLoadsPerBlock == 1) | |||
2208 | return getMemCmpOneBlock(Size); | |||
2209 | ||||
2210 | // This loop calls emitLoadCompareBlock for comparing Size bytes of the two | |||
2211 | // memcmp sources. It starts with loading using the maximum load size set by | |||
2212 | // the target. It processes any remaining bytes using a load size which is the | |||
2213 | // next smallest power of 2. | |||
2214 | unsigned LoadSize = MaxLoadSize; | |||
2215 | unsigned NumBytesToBeProcessed = Size; | |||
2216 | unsigned Index = 0; | |||
2217 | while (NumBytesToBeProcessed) { | |||
2218 | // Calculate how many blocks we can create with the current load size. | |||
2219 | unsigned NumBlocks = NumBytesToBeProcessed / LoadSize; | |||
2220 | unsigned GEPIndex = (Size - NumBytesToBeProcessed) / LoadSize; | |||
2221 | NumBytesToBeProcessed = NumBytesToBeProcessed % LoadSize; | |||
2222 | ||||
2223 | // For each NumBlocks, populate the instruction sequence for loading and | |||
2224 | // comparing LoadSize bytes. | |||
2225 | while (NumBlocks--) { | |||
2226 | emitLoadCompareBlock(Index, LoadSize, GEPIndex); | |||
2227 | Index++; | |||
2228 | GEPIndex++; | |||
2229 | } | |||
2230 | // Get the next LoadSize to use. | |||
2231 | LoadSize = LoadSize / 2; | |||
2232 | } | |||
2233 | ||||
2234 | emitMemCmpResultBlock(); | |||
2235 | return PhiRes; | |||
2236 | } | |||
2237 | ||||
2238 | // This function checks to see if an expansion of memcmp can be generated. | |||
2239 | // It checks for constant compare size that is less than the max inline size. | |||
2240 | // If an expansion cannot occur, returns false to leave as a library call. | |||
2241 | // Otherwise, the library call is replaced with a new IR instruction sequence. | |||
2242 | /// We want to transform: | |||
2243 | /// %call = call signext i32 @memcmp(i8* %0, i8* %1, i64 15) | |||
2244 | /// To: | |||
2245 | /// loadbb: | |||
2246 | /// %0 = bitcast i32* %buffer2 to i8* | |||
2247 | /// %1 = bitcast i32* %buffer1 to i8* | |||
2248 | /// %2 = bitcast i8* %1 to i64* | |||
2249 | /// %3 = bitcast i8* %0 to i64* | |||
2250 | /// %4 = load i64, i64* %2 | |||
2251 | /// %5 = load i64, i64* %3 | |||
2252 | /// %6 = call i64 @llvm.bswap.i64(i64 %4) | |||
2253 | /// %7 = call i64 @llvm.bswap.i64(i64 %5) | |||
2254 | /// %8 = sub i64 %6, %7 | |||
2255 | /// %9 = icmp ne i64 %8, 0 | |||
2256 | /// br i1 %9, label %res_block, label %loadbb1 | |||
2257 | /// res_block: ; preds = %loadbb2, | |||
2258 | /// %loadbb1, %loadbb | |||
2259 | /// %phi.src1 = phi i64 [ %6, %loadbb ], [ %22, %loadbb1 ], [ %36, %loadbb2 ] | |||
2260 | /// %phi.src2 = phi i64 [ %7, %loadbb ], [ %23, %loadbb1 ], [ %37, %loadbb2 ] | |||
2261 | /// %10 = icmp ult i64 %phi.src1, %phi.src2 | |||
2262 | /// %11 = select i1 %10, i32 -1, i32 1 | |||
2263 | /// br label %endblock | |||
2264 | /// loadbb1: ; preds = %loadbb | |||
2265 | /// %12 = bitcast i32* %buffer2 to i8* | |||
2266 | /// %13 = bitcast i32* %buffer1 to i8* | |||
2267 | /// %14 = bitcast i8* %13 to i32* | |||
2268 | /// %15 = bitcast i8* %12 to i32* | |||
2269 | /// %16 = getelementptr i32, i32* %14, i32 2 | |||
2270 | /// %17 = getelementptr i32, i32* %15, i32 2 | |||
2271 | /// %18 = load i32, i32* %16 | |||
2272 | /// %19 = load i32, i32* %17 | |||
2273 | /// %20 = call i32 @llvm.bswap.i32(i32 %18) | |||
2274 | /// %21 = call i32 @llvm.bswap.i32(i32 %19) | |||
2275 | /// %22 = zext i32 %20 to i64 | |||
2276 | /// %23 = zext i32 %21 to i64 | |||
2277 | /// %24 = sub i64 %22, %23 | |||
2278 | /// %25 = icmp ne i64 %24, 0 | |||
2279 | /// br i1 %25, label %res_block, label %loadbb2 | |||
2280 | /// loadbb2: ; preds = %loadbb1 | |||
2281 | /// %26 = bitcast i32* %buffer2 to i8* | |||
2282 | /// %27 = bitcast i32* %buffer1 to i8* | |||
2283 | /// %28 = bitcast i8* %27 to i16* | |||
2284 | /// %29 = bitcast i8* %26 to i16* | |||
2285 | /// %30 = getelementptr i16, i16* %28, i16 6 | |||
2286 | /// %31 = getelementptr i16, i16* %29, i16 6 | |||
2287 | /// %32 = load i16, i16* %30 | |||
2288 | /// %33 = load i16, i16* %31 | |||
2289 | /// %34 = call i16 @llvm.bswap.i16(i16 %32) | |||
2290 | /// %35 = call i16 @llvm.bswap.i16(i16 %33) | |||
2291 | /// %36 = zext i16 %34 to i64 | |||
2292 | /// %37 = zext i16 %35 to i64 | |||
2293 | /// %38 = sub i64 %36, %37 | |||
2294 | /// %39 = icmp ne i64 %38, 0 | |||
2295 | /// br i1 %39, label %res_block, label %loadbb3 | |||
2296 | /// loadbb3: ; preds = %loadbb2 | |||
2297 | /// %40 = bitcast i32* %buffer2 to i8* | |||
2298 | /// %41 = bitcast i32* %buffer1 to i8* | |||
2299 | /// %42 = getelementptr i8, i8* %41, i8 14 | |||
2300 | /// %43 = getelementptr i8, i8* %40, i8 14 | |||
2301 | /// %44 = load i8, i8* %42 | |||
2302 | /// %45 = load i8, i8* %43 | |||
2303 | /// %46 = zext i8 %44 to i32 | |||
2304 | /// %47 = zext i8 %45 to i32 | |||
2305 | /// %48 = sub i32 %46, %47 | |||
2306 | /// br label %endblock | |||
2307 | /// endblock: ; preds = %res_block, | |||
2308 | /// %loadbb3 | |||
2309 | /// %phi.res = phi i32 [ %48, %loadbb3 ], [ %11, %res_block ] | |||
2310 | /// ret i32 %phi.res | |||
2311 | static bool expandMemCmp(CallInst *CI, const TargetTransformInfo *TTI, | |||
2312 | const TargetLowering *TLI, const DataLayout *DL) { | |||
2313 | NumMemCmpCalls++; | |||
2314 | ||||
2315 | // TTI call to check if target would like to expand memcmp. Also, get the | |||
2316 | // MaxLoadSize. | |||
2317 | unsigned MaxLoadSize; | |||
2318 | if (!TTI->enableMemCmpExpansion(MaxLoadSize)) | |||
2319 | return false; | |||
2320 | ||||
2321 | // Early exit from expansion if -Oz. | |||
2322 | if (CI->getFunction()->optForMinSize()) | |||
2323 | return false; | |||
2324 | ||||
2325 | // Early exit from expansion if size is not a constant. | |||
2326 | ConstantInt *SizeCast = dyn_cast<ConstantInt>(CI->getArgOperand(2)); | |||
2327 | if (!SizeCast) { | |||
2328 | NumMemCmpNotConstant++; | |||
2329 | return false; | |||
2330 | } | |||
2331 | ||||
2332 | // Scale the max size down if the target can load more bytes than we need. | |||
2333 | uint64_t SizeVal = SizeCast->getZExtValue(); | |||
2334 | if (MaxLoadSize > SizeVal) | |||
2335 | MaxLoadSize = 1 << SizeCast->getValue().logBase2(); | |||
2336 | ||||
2337 | // Calculate how many load pairs are needed for the constant size. | |||
2338 | unsigned NumLoads = 0; | |||
2339 | unsigned RemainingSize = SizeVal; | |||
2340 | unsigned LoadSize = MaxLoadSize; | |||
2341 | while (RemainingSize) { | |||
2342 | NumLoads += RemainingSize / LoadSize; | |||
2343 | RemainingSize = RemainingSize % LoadSize; | |||
2344 | LoadSize = LoadSize / 2; | |||
2345 | } | |||
2346 | ||||
2347 | // Don't expand if this will require more loads than desired by the target. | |||
2348 | if (NumLoads > TLI->getMaxExpandSizeMemcmp(CI->getFunction()->optForSize())) { | |||
2349 | NumMemCmpGreaterThanMax++; | |||
2350 | return false; | |||
2351 | } | |||
2352 | ||||
2353 | NumMemCmpInlined++; | |||
2354 | ||||
2355 | // MemCmpHelper object creates and sets up basic blocks required for | |||
2356 | // expanding memcmp with size SizeVal. | |||
2357 | unsigned NumLoadsPerBlock = MemCmpNumLoadsPerBlock; | |||
2358 | MemCmpExpansion MemCmpHelper(CI, SizeVal, MaxLoadSize, NumLoadsPerBlock, *DL); | |||
2359 | ||||
2360 | Value *Res = MemCmpHelper.getMemCmpExpansion(SizeVal); | |||
2361 | ||||
2362 | // Replace call with result of expansion and erase call. | |||
2363 | CI->replaceAllUsesWith(Res); | |||
2364 | CI->eraseFromParent(); | |||
2365 | ||||
2366 | return true; | |||
2367 | } | |||
2368 | ||||
2369 | bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) { | |||
2370 | BasicBlock *BB = CI->getParent(); | |||
2371 | ||||
2372 | // Lower inline assembly if we can. | |||
2373 | // If we found an inline asm expession, and if the target knows how to | |||
2374 | // lower it to normal LLVM code, do so now. | |||
2375 | if (TLI && isa<InlineAsm>(CI->getCalledValue())) { | |||
2376 | if (TLI->ExpandInlineAsm(CI)) { | |||
2377 | // Avoid invalidating the iterator. | |||
2378 | CurInstIterator = BB->begin(); | |||
2379 | // Avoid processing instructions out of order, which could cause | |||
2380 | // reuse before a value is defined. | |||
2381 | SunkAddrs.clear(); | |||
2382 | return true; | |||
2383 | } | |||
2384 | // Sink address computing for memory operands into the block. | |||
2385 | if (optimizeInlineAsmInst(CI)) | |||
2386 | return true; | |||
2387 | } | |||
2388 | ||||
2389 | // Align the pointer arguments to this call if the target thinks it's a good | |||
2390 | // idea | |||
2391 | unsigned MinSize, PrefAlign; | |||
2392 | if (TLI && TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) { | |||
2393 | for (auto &Arg : CI->arg_operands()) { | |||
2394 | // We want to align both objects whose address is used directly and | |||
2395 | // objects whose address is used in casts and GEPs, though it only makes | |||
2396 | // sense for GEPs if the offset is a multiple of the desired alignment and | |||
2397 | // if size - offset meets the size threshold. | |||
2398 | if (!Arg->getType()->isPointerTy()) | |||
2399 | continue; | |||
2400 | APInt Offset(DL->getPointerSizeInBits( | |||
2401 | cast<PointerType>(Arg->getType())->getAddressSpace()), | |||
2402 | 0); | |||
2403 | Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset); | |||
2404 | uint64_t Offset2 = Offset.getLimitedValue(); | |||
2405 | if ((Offset2 & (PrefAlign-1)) != 0) | |||
2406 | continue; | |||
2407 | AllocaInst *AI; | |||
2408 | if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlignment() < PrefAlign && | |||
2409 | DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2) | |||
2410 | AI->setAlignment(PrefAlign); | |||
2411 | // Global variables can only be aligned if they are defined in this | |||
2412 | // object (i.e. they are uniquely initialized in this object), and | |||
2413 | // over-aligning global variables that have an explicit section is | |||
2414 | // forbidden. | |||
2415 | GlobalVariable *GV; | |||
2416 | if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() && | |||
2417 | GV->getPointerAlignment(*DL) < PrefAlign && | |||
2418 | DL->getTypeAllocSize(GV->getValueType()) >= | |||
2419 | MinSize + Offset2) | |||
2420 | GV->setAlignment(PrefAlign); | |||
2421 | } | |||
2422 | // If this is a memcpy (or similar) then we may be able to improve the | |||
2423 | // alignment | |||
2424 | if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) { | |||
2425 | unsigned Align = getKnownAlignment(MI->getDest(), *DL); | |||
2426 | if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) | |||
2427 | Align = std::min(Align, getKnownAlignment(MTI->getSource(), *DL)); | |||
2428 | if (Align > MI->getAlignment()) | |||
2429 | MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), Align)); | |||
2430 | } | |||
2431 | } | |||
2432 | ||||
2433 | // If we have a cold call site, try to sink addressing computation into the | |||
2434 | // cold block. This interacts with our handling for loads and stores to | |||
2435 | // ensure that we can fold all uses of a potential addressing computation | |||
2436 | // into their uses. TODO: generalize this to work over profiling data | |||
2437 | if (!OptSize && CI->hasFnAttr(Attribute::Cold)) | |||
2438 | for (auto &Arg : CI->arg_operands()) { | |||
2439 | if (!Arg->getType()->isPointerTy()) | |||
2440 | continue; | |||
2441 | unsigned AS = Arg->getType()->getPointerAddressSpace(); | |||
2442 | return optimizeMemoryInst(CI, Arg, Arg->getType(), AS); | |||
2443 | } | |||
2444 | ||||
2445 | IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); | |||
2446 | if (II) { | |||
2447 | switch (II->getIntrinsicID()) { | |||
2448 | default: break; | |||
2449 | case Intrinsic::objectsize: { | |||
2450 | // Lower all uses of llvm.objectsize.* | |||
2451 | ConstantInt *RetVal = | |||
2452 | lowerObjectSizeCall(II, *DL, TLInfo, /*MustSucceed=*/true); | |||
2453 | // Substituting this can cause recursive simplifications, which can | |||
2454 | // invalidate our iterator. Use a WeakTrackingVH to hold onto it in case | |||
2455 | // this | |||
2456 | // happens. | |||
2457 | Value *CurValue = &*CurInstIterator; | |||
2458 | WeakTrackingVH IterHandle(CurValue); | |||
2459 | ||||
2460 | replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr); | |||
2461 | ||||
2462 | // If the iterator instruction was recursively deleted, start over at the | |||
2463 | // start of the block. | |||
2464 | if (IterHandle != CurValue) { | |||
2465 | CurInstIterator = BB->begin(); | |||
2466 | SunkAddrs.clear(); | |||
2467 | } | |||
2468 | return true; | |||
2469 | } | |||
2470 | case Intrinsic::aarch64_stlxr: | |||
2471 | case Intrinsic::aarch64_stxr: { | |||
2472 | ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0)); | |||
2473 | if (!ExtVal || !ExtVal->hasOneUse() || | |||
2474 | ExtVal->getParent() == CI->getParent()) | |||
2475 | return false; | |||
2476 | // Sink a zext feeding stlxr/stxr before it, so it can be folded into it. | |||
2477 | ExtVal->moveBefore(CI); | |||
2478 | // Mark this instruction as "inserted by CGP", so that other | |||
2479 | // optimizations don't touch it. | |||
2480 | InsertedInsts.insert(ExtVal); | |||
2481 | return true; | |||
2482 | } | |||
2483 | case Intrinsic::invariant_group_barrier: | |||
2484 | II->replaceAllUsesWith(II->getArgOperand(0)); | |||
2485 | II->eraseFromParent(); | |||
2486 | return true; | |||
2487 | ||||
2488 | case Intrinsic::cttz: | |||
2489 | case Intrinsic::ctlz: | |||
2490 | // If counting zeros is expensive, try to avoid it. | |||
2491 | return despeculateCountZeros(II, TLI, DL, ModifiedDT); | |||
2492 | } | |||
2493 | ||||
2494 | if (TLI) { | |||
2495 | SmallVector<Value*, 2> PtrOps; | |||
2496 | Type *AccessTy; | |||
2497 | if (TLI->getAddrModeArguments(II, PtrOps, AccessTy)) | |||
2498 | while (!PtrOps.empty()) { | |||
2499 | Value *PtrVal = PtrOps.pop_back_val(); | |||
2500 | unsigned AS = PtrVal->getType()->getPointerAddressSpace(); | |||
2501 | if (optimizeMemoryInst(II, PtrVal, AccessTy, AS)) | |||
2502 | return true; | |||
2503 | } | |||
2504 | } | |||
2505 | } | |||
2506 | ||||
2507 | // From here on out we're working with named functions. | |||
2508 | if (!CI->getCalledFunction()) return false; | |||
2509 | ||||
2510 | // Lower all default uses of _chk calls. This is very similar | |||
2511 | // to what InstCombineCalls does, but here we are only lowering calls | |||
2512 | // to fortified library functions (e.g. __memcpy_chk) that have the default | |||
2513 | // "don't know" as the objectsize. Anything else should be left alone. | |||
2514 | FortifiedLibCallSimplifier Simplifier(TLInfo, true); | |||
2515 | if (Value *V = Simplifier.optimizeCall(CI)) { | |||
2516 | CI->replaceAllUsesWith(V); | |||
2517 | CI->eraseFromParent(); | |||
2518 | return true; | |||
2519 | } | |||
2520 | ||||
2521 | LibFunc Func; | |||
2522 | if (TLInfo->getLibFunc(ImmutableCallSite(CI), Func) && | |||
2523 | Func == LibFunc_memcmp && expandMemCmp(CI, TTI, TLI, DL)) { | |||
2524 | ModifiedDT = true; | |||
2525 | return true; | |||
2526 | } | |||
2527 | return false; | |||
2528 | } | |||
2529 | ||||
2530 | /// Look for opportunities to duplicate return instructions to the predecessor | |||
2531 | /// to enable tail call optimizations. The case it is currently looking for is: | |||
2532 | /// @code | |||
2533 | /// bb0: | |||
2534 | /// %tmp0 = tail call i32 @f0() | |||
2535 | /// br label %return | |||
2536 | /// bb1: | |||
2537 | /// %tmp1 = tail call i32 @f1() | |||
2538 | /// br label %return | |||
2539 | /// bb2: | |||
2540 | /// %tmp2 = tail call i32 @f2() | |||
2541 | /// br label %return | |||
2542 | /// return: | |||
2543 | /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ] | |||
2544 | /// ret i32 %retval | |||
2545 | /// @endcode | |||
2546 | /// | |||
2547 | /// => | |||
2548 | /// | |||
2549 | /// @code | |||
2550 | /// bb0: | |||
2551 | /// %tmp0 = tail call i32 @f0() | |||
2552 | /// ret i32 %tmp0 | |||
2553 | /// bb1: | |||
2554 | /// %tmp1 = tail call i32 @f1() | |||
2555 | /// ret i32 %tmp1 | |||
2556 | /// bb2: | |||
2557 | /// %tmp2 = tail call i32 @f2() | |||
2558 | /// ret i32 %tmp2 | |||
2559 | /// @endcode | |||
2560 | bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB) { | |||
2561 | if (!TLI) | |||
2562 | return false; | |||
2563 | ||||
2564 | ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator()); | |||
2565 | if (!RetI) | |||
2566 | return false; | |||
2567 | ||||
2568 | PHINode *PN = nullptr; | |||
2569 | BitCastInst *BCI = nullptr; | |||
2570 | Value *V = RetI->getReturnValue(); | |||
2571 | if (V) { | |||
2572 | BCI = dyn_cast<BitCastInst>(V); | |||
2573 | if (BCI) | |||
2574 | V = BCI->getOperand(0); | |||
2575 | ||||
2576 | PN = dyn_cast<PHINode>(V); | |||
2577 | if (!PN) | |||
2578 | return false; | |||
2579 | } | |||
2580 | ||||
2581 | if (PN && PN->getParent() != BB) | |||
2582 | return false; | |||
2583 | ||||
2584 | // Make sure there are no instructions between the PHI and return, or that the | |||
2585 | // return is the first instruction in the block. | |||
2586 | if (PN) { | |||
2587 | BasicBlock::iterator BI = BB->begin(); | |||
2588 | do { ++BI; } while (isa<DbgInfoIntrinsic>(BI)); | |||
2589 | if (&*BI == BCI) | |||
2590 | // Also skip over the bitcast. | |||
2591 | ++BI; | |||
2592 | if (&*BI != RetI) | |||
2593 | return false; | |||
2594 | } else { | |||
2595 | BasicBlock::iterator BI = BB->begin(); | |||
2596 | while (isa<DbgInfoIntrinsic>(BI)) ++BI; | |||
2597 | if (&*BI != RetI) | |||
2598 | return false; | |||
2599 | } | |||
2600 | ||||
2601 | /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail | |||
2602 | /// call. | |||
2603 | const Function *F = BB->getParent(); | |||
2604 | SmallVector<CallInst*, 4> TailCalls; | |||
2605 | if (PN) { | |||
2606 | for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) { | |||
2607 | CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I)); | |||
2608 | // Make sure the phi value is indeed produced by the tail call. | |||
2609 | if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) && | |||
2610 | TLI->mayBeEmittedAsTailCall(CI) && | |||
2611 | attributesPermitTailCall(F, CI, RetI, *TLI)) | |||
2612 | TailCalls.push_back(CI); | |||
2613 | } | |||
2614 | } else { | |||
2615 | SmallPtrSet<BasicBlock*, 4> VisitedBBs; | |||
2616 | for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) { | |||
2617 | if (!VisitedBBs.insert(*PI).second) | |||
2618 | continue; | |||
2619 | ||||
2620 | BasicBlock::InstListType &InstList = (*PI)->getInstList(); | |||
2621 | BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin(); | |||
2622 | BasicBlock::InstListType::reverse_iterator RE = InstList.rend(); | |||
2623 | do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI)); | |||
2624 | if (RI == RE) | |||
2625 | continue; | |||
2626 | ||||
2627 | CallInst *CI = dyn_cast<CallInst>(&*RI); | |||
2628 | if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) && | |||
2629 | attributesPermitTailCall(F, CI, RetI, *TLI)) | |||
2630 | TailCalls.push_back(CI); | |||
2631 | } | |||
2632 | } | |||
2633 | ||||
2634 | bool Changed = false; | |||
2635 | for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) { | |||
2636 | CallInst *CI = TailCalls[i]; | |||
2637 | CallSite CS(CI); | |||
2638 | ||||
2639 | // Conservatively require the attributes of the call to match those of the | |||
2640 | // return. Ignore noalias because it doesn't affect the call sequence. | |||
2641 | AttributeList CalleeAttrs = CS.getAttributes(); | |||
2642 | if (AttrBuilder(CalleeAttrs, AttributeList::ReturnIndex) | |||
2643 | .removeAttribute(Attribute::NoAlias) != | |||
2644 | AttrBuilder(CalleeAttrs, AttributeList::ReturnIndex) | |||
2645 | .removeAttribute(Attribute::NoAlias)) | |||
2646 | continue; | |||
2647 | ||||
2648 | // Make sure the call instruction is followed by an unconditional branch to | |||
2649 | // the return block. | |||
2650 | BasicBlock *CallBB = CI->getParent(); | |||
2651 | BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator()); | |||
2652 | if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB) | |||
2653 | continue; | |||
2654 | ||||
2655 | // Duplicate the return into CallBB. | |||
2656 | (void)FoldReturnIntoUncondBranch(RetI, BB, CallBB); | |||
2657 | ModifiedDT = Changed = true; | |||
2658 | ++NumRetsDup; | |||
2659 | } | |||
2660 | ||||
2661 | // If we eliminated all predecessors of the block, delete the block now. | |||
2662 | if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB)) | |||
2663 | BB->eraseFromParent(); | |||
2664 | ||||
2665 | return Changed; | |||
2666 | } | |||
2667 | ||||
2668 | //===----------------------------------------------------------------------===// | |||
2669 | // Memory Optimization | |||
2670 | //===----------------------------------------------------------------------===// | |||
2671 | ||||
2672 | namespace { | |||
2673 | ||||
2674 | /// This is an extended version of TargetLowering::AddrMode | |||
2675 | /// which holds actual Value*'s for register values. | |||
2676 | struct ExtAddrMode : public TargetLowering::AddrMode { | |||
2677 | Value *BaseReg = nullptr; | |||
2678 | Value *ScaledReg = nullptr; | |||
2679 | Value *OriginalValue = nullptr; | |||
2680 | ||||
2681 | enum FieldName { | |||
2682 | NoField = 0x00, | |||
2683 | BaseRegField = 0x01, | |||
2684 | BaseGVField = 0x02, | |||
2685 | BaseOffsField = 0x04, | |||
2686 | ScaledRegField = 0x08, | |||
2687 | ScaleField = 0x10, | |||
2688 | MultipleFields = 0xff | |||
2689 | }; | |||
2690 | ||||
2691 | ExtAddrMode() = default; | |||
2692 | ||||
2693 | void print(raw_ostream &OS) const; | |||
2694 | void dump() const; | |||
2695 | ||||
2696 | FieldName compare(const ExtAddrMode &other) { | |||
2697 | // First check that the types are the same on each field, as differing types | |||
2698 | // is something we can't cope with later on. | |||
2699 | if (BaseReg && other.BaseReg && | |||
2700 | BaseReg->getType() != other.BaseReg->getType()) | |||
2701 | return MultipleFields; | |||
2702 | if (BaseGV && other.BaseGV && | |||
2703 | BaseGV->getType() != other.BaseGV->getType()) | |||
2704 | return MultipleFields; | |||
2705 | if (ScaledReg && other.ScaledReg && | |||
2706 | ScaledReg->getType() != other.ScaledReg->getType()) | |||
2707 | return MultipleFields; | |||
2708 | ||||
2709 | // Check each field to see if it differs. | |||
2710 | unsigned Result = NoField; | |||
2711 | if (BaseReg != other.BaseReg) | |||
2712 | Result |= BaseRegField; | |||
2713 | if (BaseGV != other.BaseGV) | |||
2714 | Result |= BaseGVField; | |||
2715 | if (BaseOffs != other.BaseOffs) | |||
2716 | Result |= BaseOffsField; | |||
2717 | if (ScaledReg != other.ScaledReg) | |||
2718 | Result |= ScaledRegField; | |||
2719 | // Don't count 0 as being a different scale, because that actually means | |||
2720 | // unscaled (which will already be counted by having no ScaledReg). | |||
2721 | if (Scale && other.Scale && Scale != other.Scale) | |||
2722 | Result |= ScaleField; | |||
2723 | ||||
2724 | if (countPopulation(Result) > 1) | |||
2725 | return MultipleFields; | |||
2726 | else | |||
2727 | return static_cast<FieldName>(Result); | |||
2728 | } | |||
2729 | ||||
2730 | // AddrModes with a base reg or gv where the reg/gv is just the original | |||
2731 | // value are trivial. | |||
2732 | bool isTrivial() { | |||
2733 | bool Trivial = (BaseGV && BaseGV == OriginalValue) || | |||
2734 | (BaseReg && BaseReg == OriginalValue); | |||
2735 | // If the AddrMode is trivial it shouldn't have an offset or be scaled. | |||
2736 | if (Trivial) { | |||
2737 | assert(BaseOffs == 0)((BaseOffs == 0) ? static_cast<void> (0) : __assert_fail ("BaseOffs == 0", "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 2737, __PRETTY_FUNCTION__)); | |||
2738 | assert(Scale == 0)((Scale == 0) ? static_cast<void> (0) : __assert_fail ( "Scale == 0", "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 2738, __PRETTY_FUNCTION__)); | |||
2739 | } | |||
2740 | return Trivial; | |||
2741 | } | |||
2742 | }; | |||
2743 | ||||
2744 | } // end anonymous namespace | |||
2745 | ||||
2746 | #ifndef NDEBUG | |||
2747 | static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) { | |||
2748 | AM.print(OS); | |||
2749 | return OS; | |||
2750 | } | |||
2751 | #endif | |||
2752 | ||||
2753 | #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) | |||
2754 | void ExtAddrMode::print(raw_ostream &OS) const { | |||
2755 | bool NeedPlus = false; | |||
2756 | OS << "["; | |||
2757 | if (BaseGV) { | |||
2758 | OS << (NeedPlus ? " + " : "") | |||
2759 | << "GV:"; | |||
2760 | BaseGV->printAsOperand(OS, /*PrintType=*/false); | |||
2761 | NeedPlus = true; | |||
2762 | } | |||
2763 | ||||
2764 | if (BaseOffs) { | |||
2765 | OS << (NeedPlus ? " + " : "") | |||
2766 | << BaseOffs; | |||
2767 | NeedPlus = true; | |||
2768 | } | |||
2769 | ||||
2770 | if (BaseReg) { | |||
2771 | OS << (NeedPlus ? " + " : "") | |||
2772 | << "Base:"; | |||
2773 | BaseReg->printAsOperand(OS, /*PrintType=*/false); | |||
2774 | NeedPlus = true; | |||
2775 | } | |||
2776 | if (Scale) { | |||
2777 | OS << (NeedPlus ? " + " : "") | |||
2778 | << Scale << "*"; | |||
2779 | ScaledReg->printAsOperand(OS, /*PrintType=*/false); | |||
2780 | } | |||
2781 | ||||
2782 | OS << ']'; | |||
2783 | } | |||
2784 | ||||
2785 | LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void ExtAddrMode::dump() const { | |||
2786 | print(dbgs()); | |||
2787 | dbgs() << '\n'; | |||
2788 | } | |||
2789 | #endif | |||
2790 | ||||
2791 | namespace { | |||
2792 | ||||
2793 | /// \brief This class provides transaction based operation on the IR. | |||
2794 | /// Every change made through this class is recorded in the internal state and | |||
2795 | /// can be undone (rollback) until commit is called. | |||
2796 | class TypePromotionTransaction { | |||
2797 | /// \brief This represents the common interface of the individual transaction. | |||
2798 | /// Each class implements the logic for doing one specific modification on | |||
2799 | /// the IR via the TypePromotionTransaction. | |||
2800 | class TypePromotionAction { | |||
2801 | protected: | |||
2802 | /// The Instruction modified. | |||
2803 | Instruction *Inst; | |||
2804 | ||||
2805 | public: | |||
2806 | /// \brief Constructor of the action. | |||
2807 | /// The constructor performs the related action on the IR. | |||
2808 | TypePromotionAction(Instruction *Inst) : Inst(Inst) {} | |||
2809 | ||||
2810 | virtual ~TypePromotionAction() = default; | |||
2811 | ||||
2812 | /// \brief Undo the modification done by this action. | |||
2813 | /// When this method is called, the IR must be in the same state as it was | |||
2814 | /// before this action was applied. | |||
2815 | /// \pre Undoing the action works if and only if the IR is in the exact same | |||
2816 | /// state as it was directly after this action was applied. | |||
2817 | virtual void undo() = 0; | |||
2818 | ||||
2819 | /// \brief Advocate every change made by this action. | |||
2820 | /// When the results on the IR of the action are to be kept, it is important | |||
2821 | /// to call this function, otherwise hidden information may be kept forever. | |||
2822 | virtual void commit() { | |||
2823 | // Nothing to be done, this action is not doing anything. | |||
2824 | } | |||
2825 | }; | |||
2826 | ||||
2827 | /// \brief Utility to remember the position of an instruction. | |||
2828 | class InsertionHandler { | |||
2829 | /// Position of an instruction. | |||
2830 | /// Either an instruction: | |||
2831 | /// - Is the first in a basic block: BB is used. | |||
2832 | /// - Has a previous instructon: PrevInst is used. | |||
2833 | union { | |||
2834 | Instruction *PrevInst; | |||
2835 | BasicBlock *BB; | |||
2836 | } Point; | |||
2837 | ||||
2838 | /// Remember whether or not the instruction had a previous instruction. | |||
2839 | bool HasPrevInstruction; | |||
2840 | ||||
2841 | public: | |||
2842 | /// \brief Record the position of \p Inst. | |||
2843 | InsertionHandler(Instruction *Inst) { | |||
2844 | BasicBlock::iterator It = Inst->getIterator(); | |||
2845 | HasPrevInstruction = (It != (Inst->getParent()->begin())); | |||
2846 | if (HasPrevInstruction) | |||
2847 | Point.PrevInst = &*--It; | |||
2848 | else | |||
2849 | Point.BB = Inst->getParent(); | |||
2850 | } | |||
2851 | ||||
2852 | /// \brief Insert \p Inst at the recorded position. | |||
2853 | void insert(Instruction *Inst) { | |||
2854 | if (HasPrevInstruction) { | |||
2855 | if (Inst->getParent()) | |||
2856 | Inst->removeFromParent(); | |||
2857 | Inst->insertAfter(Point.PrevInst); | |||
2858 | } else { | |||
2859 | Instruction *Position = &*Point.BB->getFirstInsertionPt(); | |||
2860 | if (Inst->getParent()) | |||
2861 | Inst->moveBefore(Position); | |||
2862 | else | |||
2863 | Inst->insertBefore(Position); | |||
2864 | } | |||
2865 | } | |||
2866 | }; | |||
2867 | ||||
2868 | /// \brief Move an instruction before another. | |||
2869 | class InstructionMoveBefore : public TypePromotionAction { | |||
2870 | /// Original position of the instruction. | |||
2871 | InsertionHandler Position; | |||
2872 | ||||
2873 | public: | |||
2874 | /// \brief Move \p Inst before \p Before. | |||
2875 | InstructionMoveBefore(Instruction *Inst, Instruction *Before) | |||
2876 | : TypePromotionAction(Inst), Position(Inst) { | |||
2877 | DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: move: " << * Inst << "\nbefore: " << *Before << "\n"; } } while (false); | |||
2878 | Inst->moveBefore(Before); | |||
2879 | } | |||
2880 | ||||
2881 | /// \brief Move the instruction back to its original position. | |||
2882 | void undo() override { | |||
2883 | DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: moveBefore: " << *Inst << "\n"; } } while (false); | |||
2884 | Position.insert(Inst); | |||
2885 | } | |||
2886 | }; | |||
2887 | ||||
2888 | /// \brief Set the operand of an instruction with a new value. | |||
2889 | class OperandSetter : public TypePromotionAction { | |||
2890 | /// Original operand of the instruction. | |||
2891 | Value *Origin; | |||
2892 | ||||
2893 | /// Index of the modified instruction. | |||
2894 | unsigned Idx; | |||
2895 | ||||
2896 | public: | |||
2897 | /// \brief Set \p Idx operand of \p Inst with \p NewVal. | |||
2898 | OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal) | |||
2899 | : TypePromotionAction(Inst), Idx(Idx) { | |||
2900 | DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: setOperand: " << Idx << "\n" << "for:" << *Inst << "\n" << "with:" << *NewVal << "\n"; } } while ( false) | |||
2901 | << "for:" << *Inst << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: setOperand: " << Idx << "\n" << "for:" << *Inst << "\n" << "with:" << *NewVal << "\n"; } } while ( false) | |||
2902 | << "with:" << *NewVal << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: setOperand: " << Idx << "\n" << "for:" << *Inst << "\n" << "with:" << *NewVal << "\n"; } } while ( false); | |||
2903 | Origin = Inst->getOperand(Idx); | |||
2904 | Inst->setOperand(Idx, NewVal); | |||
2905 | } | |||
2906 | ||||
2907 | /// \brief Restore the original value of the instruction. | |||
2908 | void undo() override { | |||
2909 | DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: setOperand:" << Idx << "\n" << "for: " << *Inst << "\n" << "with: " << *Origin << "\n"; } } while ( false) | |||
2910 | << "for: " << *Inst << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: setOperand:" << Idx << "\n" << "for: " << *Inst << "\n" << "with: " << *Origin << "\n"; } } while ( false) | |||
2911 | << "with: " << *Origin << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: setOperand:" << Idx << "\n" << "for: " << *Inst << "\n" << "with: " << *Origin << "\n"; } } while ( false); | |||
2912 | Inst->setOperand(Idx, Origin); | |||
2913 | } | |||
2914 | }; | |||
2915 | ||||
2916 | /// \brief Hide the operands of an instruction. | |||
2917 | /// Do as if this instruction was not using any of its operands. | |||
2918 | class OperandsHider : public TypePromotionAction { | |||
2919 | /// The list of original operands. | |||
2920 | SmallVector<Value *, 4> OriginalValues; | |||
2921 | ||||
2922 | public: | |||
2923 | /// \brief Remove \p Inst from the uses of the operands of \p Inst. | |||
2924 | OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) { | |||
2925 | DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: OperandsHider: " << *Inst << "\n"; } } while (false); | |||
2926 | unsigned NumOpnds = Inst->getNumOperands(); | |||
2927 | OriginalValues.reserve(NumOpnds); | |||
2928 | for (unsigned It = 0; It < NumOpnds; ++It) { | |||
2929 | // Save the current operand. | |||
2930 | Value *Val = Inst->getOperand(It); | |||
2931 | OriginalValues.push_back(Val); | |||
2932 | // Set a dummy one. | |||
2933 | // We could use OperandSetter here, but that would imply an overhead | |||
2934 | // that we are not willing to pay. | |||
2935 | Inst->setOperand(It, UndefValue::get(Val->getType())); | |||
2936 | } | |||
2937 | } | |||
2938 | ||||
2939 | /// \brief Restore the original list of uses. | |||
2940 | void undo() override { | |||
2941 | DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: OperandsHider: " << *Inst << "\n"; } } while (false); | |||
2942 | for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It) | |||
2943 | Inst->setOperand(It, OriginalValues[It]); | |||
2944 | } | |||
2945 | }; | |||
2946 | ||||
2947 | /// \brief Build a truncate instruction. | |||
2948 | class TruncBuilder : public TypePromotionAction { | |||
2949 | Value *Val; | |||
2950 | ||||
2951 | public: | |||
2952 | /// \brief Build a truncate instruction of \p Opnd producing a \p Ty | |||
2953 | /// result. | |||
2954 | /// trunc Opnd to Ty. | |||
2955 | TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) { | |||
2956 | IRBuilder<> Builder(Opnd); | |||
2957 | Val = Builder.CreateTrunc(Opnd, Ty, "promoted"); | |||
2958 | DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: TruncBuilder: " << *Val << "\n"; } } while (false); | |||
2959 | } | |||
2960 | ||||
2961 | /// \brief Get the built value. | |||
2962 | Value *getBuiltValue() { return Val; } | |||
2963 | ||||
2964 | /// \brief Remove the built instruction. | |||
2965 | void undo() override { | |||
2966 | DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: TruncBuilder: " << *Val << "\n"; } } while (false); | |||
2967 | if (Instruction *IVal = dyn_cast<Instruction>(Val)) | |||
2968 | IVal->eraseFromParent(); | |||
2969 | } | |||
2970 | }; | |||
2971 | ||||
2972 | /// \brief Build a sign extension instruction. | |||
2973 | class SExtBuilder : public TypePromotionAction { | |||
2974 | Value *Val; | |||
2975 | ||||
2976 | public: | |||
2977 | /// \brief Build a sign extension instruction of \p Opnd producing a \p Ty | |||
2978 | /// result. | |||
2979 | /// sext Opnd to Ty. | |||
2980 | SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) | |||
2981 | : TypePromotionAction(InsertPt) { | |||
2982 | IRBuilder<> Builder(InsertPt); | |||
2983 | Val = Builder.CreateSExt(Opnd, Ty, "promoted"); | |||
2984 | DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: SExtBuilder: " << *Val << "\n"; } } while (false); | |||
2985 | } | |||
2986 | ||||
2987 | /// \brief Get the built value. | |||
2988 | Value *getBuiltValue() { return Val; } | |||
2989 | ||||
2990 | /// \brief Remove the built instruction. | |||
2991 | void undo() override { | |||
2992 | DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: SExtBuilder: " << *Val << "\n"; } } while (false); | |||
2993 | if (Instruction *IVal = dyn_cast<Instruction>(Val)) | |||
2994 | IVal->eraseFromParent(); | |||
2995 | } | |||
2996 | }; | |||
2997 | ||||
2998 | /// \brief Build a zero extension instruction. | |||
2999 | class ZExtBuilder : public TypePromotionAction { | |||
3000 | Value *Val; | |||
3001 | ||||
3002 | public: | |||
3003 | /// \brief Build a zero extension instruction of \p Opnd producing a \p Ty | |||
3004 | /// result. | |||
3005 | /// zext Opnd to Ty. | |||
3006 | ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty) | |||
3007 | : TypePromotionAction(InsertPt) { | |||
3008 | IRBuilder<> Builder(InsertPt); | |||
3009 | Val = Builder.CreateZExt(Opnd, Ty, "promoted"); | |||
3010 | DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: ZExtBuilder: " << *Val << "\n"; } } while (false); | |||
3011 | } | |||
3012 | ||||
3013 | /// \brief Get the built value. | |||
3014 | Value *getBuiltValue() { return Val; } | |||
3015 | ||||
3016 | /// \brief Remove the built instruction. | |||
3017 | void undo() override { | |||
3018 | DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: ZExtBuilder: " << *Val << "\n"; } } while (false); | |||
3019 | if (Instruction *IVal = dyn_cast<Instruction>(Val)) | |||
3020 | IVal->eraseFromParent(); | |||
3021 | } | |||
3022 | }; | |||
3023 | ||||
3024 | /// \brief Mutate an instruction to another type. | |||
3025 | class TypeMutator : public TypePromotionAction { | |||
3026 | /// Record the original type. | |||
3027 | Type *OrigTy; | |||
3028 | ||||
3029 | public: | |||
3030 | /// \brief Mutate the type of \p Inst into \p NewTy. | |||
3031 | TypeMutator(Instruction *Inst, Type *NewTy) | |||
3032 | : TypePromotionAction(Inst), OrigTy(Inst->getType()) { | |||
3033 | DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTydo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy << "\n"; } } while (false) | |||
3034 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy << "\n"; } } while (false); | |||
3035 | Inst->mutateType(NewTy); | |||
3036 | } | |||
3037 | ||||
3038 | /// \brief Mutate the instruction back to its original type. | |||
3039 | void undo() override { | |||
3040 | DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTydo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy << "\n"; } } while (false) | |||
3041 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy << "\n"; } } while (false); | |||
3042 | Inst->mutateType(OrigTy); | |||
3043 | } | |||
3044 | }; | |||
3045 | ||||
3046 | /// \brief Replace the uses of an instruction by another instruction. | |||
3047 | class UsesReplacer : public TypePromotionAction { | |||
3048 | /// Helper structure to keep track of the replaced uses. | |||
3049 | struct InstructionAndIdx { | |||
3050 | /// The instruction using the instruction. | |||
3051 | Instruction *Inst; | |||
3052 | ||||
3053 | /// The index where this instruction is used for Inst. | |||
3054 | unsigned Idx; | |||
3055 | ||||
3056 | InstructionAndIdx(Instruction *Inst, unsigned Idx) | |||
3057 | : Inst(Inst), Idx(Idx) {} | |||
3058 | }; | |||
3059 | ||||
3060 | /// Keep track of the original uses (pair Instruction, Index). | |||
3061 | SmallVector<InstructionAndIdx, 4> OriginalUses; | |||
3062 | ||||
3063 | using use_iterator = SmallVectorImpl<InstructionAndIdx>::iterator; | |||
3064 | ||||
3065 | public: | |||
3066 | /// \brief Replace all the use of \p Inst by \p New. | |||
3067 | UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) { | |||
3068 | DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *Newdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New << "\n"; } } while (false) | |||
3069 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New << "\n"; } } while (false); | |||
3070 | // Record the original uses. | |||
3071 | for (Use &U : Inst->uses()) { | |||
3072 | Instruction *UserI = cast<Instruction>(U.getUser()); | |||
3073 | OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo())); | |||
3074 | } | |||
3075 | // Now, we can replace the uses. | |||
3076 | Inst->replaceAllUsesWith(New); | |||
3077 | } | |||
3078 | ||||
3079 | /// \brief Reassign the original uses of Inst to Inst. | |||
3080 | void undo() override { | |||
3081 | DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: UsersReplacer: " << *Inst << "\n"; } } while (false); | |||
3082 | for (use_iterator UseIt = OriginalUses.begin(), | |||
3083 | EndIt = OriginalUses.end(); | |||
3084 | UseIt != EndIt; ++UseIt) { | |||
3085 | UseIt->Inst->setOperand(UseIt->Idx, Inst); | |||
3086 | } | |||
3087 | } | |||
3088 | }; | |||
3089 | ||||
3090 | /// \brief Remove an instruction from the IR. | |||
3091 | class InstructionRemover : public TypePromotionAction { | |||
3092 | /// Original position of the instruction. | |||
3093 | InsertionHandler Inserter; | |||
3094 | ||||
3095 | /// Helper structure to hide all the link to the instruction. In other | |||
3096 | /// words, this helps to do as if the instruction was removed. | |||
3097 | OperandsHider Hider; | |||
3098 | ||||
3099 | /// Keep track of the uses replaced, if any. | |||
3100 | UsesReplacer *Replacer = nullptr; | |||
3101 | ||||
3102 | /// Keep track of instructions removed. | |||
3103 | SetOfInstrs &RemovedInsts; | |||
3104 | ||||
3105 | public: | |||
3106 | /// \brief Remove all reference of \p Inst and optinally replace all its | |||
3107 | /// uses with New. | |||
3108 | /// \p RemovedInsts Keep track of the instructions removed by this Action. | |||
3109 | /// \pre If !Inst->use_empty(), then New != nullptr | |||
3110 | InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts, | |||
3111 | Value *New = nullptr) | |||
3112 | : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst), | |||
3113 | RemovedInsts(RemovedInsts) { | |||
3114 | if (New) | |||
3115 | Replacer = new UsesReplacer(Inst, New); | |||
3116 | DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Do: InstructionRemover: " << *Inst << "\n"; } } while (false); | |||
3117 | RemovedInsts.insert(Inst); | |||
3118 | /// The instructions removed here will be freed after completing | |||
3119 | /// optimizeBlock() for all blocks as we need to keep track of the | |||
3120 | /// removed instructions during promotion. | |||
3121 | Inst->removeFromParent(); | |||
3122 | } | |||
3123 | ||||
3124 | ~InstructionRemover() override { delete Replacer; } | |||
3125 | ||||
3126 | /// \brief Resurrect the instruction and reassign it to the proper uses if | |||
3127 | /// new value was provided when build this action. | |||
3128 | void undo() override { | |||
3129 | DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Undo: InstructionRemover: " << *Inst << "\n"; } } while (false); | |||
3130 | Inserter.insert(Inst); | |||
3131 | if (Replacer) | |||
3132 | Replacer->undo(); | |||
3133 | Hider.undo(); | |||
3134 | RemovedInsts.erase(Inst); | |||
3135 | } | |||
3136 | }; | |||
3137 | ||||
3138 | public: | |||
3139 | /// Restoration point. | |||
3140 | /// The restoration point is a pointer to an action instead of an iterator | |||
3141 | /// because the iterator may be invalidated but not the pointer. | |||
3142 | using ConstRestorationPt = const TypePromotionAction *; | |||
3143 | ||||
3144 | TypePromotionTransaction(SetOfInstrs &RemovedInsts) | |||
3145 | : RemovedInsts(RemovedInsts) {} | |||
3146 | ||||
3147 | /// Advocate every changes made in that transaction. | |||
3148 | void commit(); | |||
3149 | ||||
3150 | /// Undo all the changes made after the given point. | |||
3151 | void rollback(ConstRestorationPt Point); | |||
3152 | ||||
3153 | /// Get the current restoration point. | |||
3154 | ConstRestorationPt getRestorationPoint() const; | |||
3155 | ||||
3156 | /// \name API for IR modification with state keeping to support rollback. | |||
3157 | /// @{ | |||
3158 | /// Same as Instruction::setOperand. | |||
3159 | void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal); | |||
3160 | ||||
3161 | /// Same as Instruction::eraseFromParent. | |||
3162 | void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr); | |||
3163 | ||||
3164 | /// Same as Value::replaceAllUsesWith. | |||
3165 | void replaceAllUsesWith(Instruction *Inst, Value *New); | |||
3166 | ||||
3167 | /// Same as Value::mutateType. | |||
3168 | void mutateType(Instruction *Inst, Type *NewTy); | |||
3169 | ||||
3170 | /// Same as IRBuilder::createTrunc. | |||
3171 | Value *createTrunc(Instruction *Opnd, Type *Ty); | |||
3172 | ||||
3173 | /// Same as IRBuilder::createSExt. | |||
3174 | Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty); | |||
3175 | ||||
3176 | /// Same as IRBuilder::createZExt. | |||
3177 | Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty); | |||
3178 | ||||
3179 | /// Same as Instruction::moveBefore. | |||
3180 | void moveBefore(Instruction *Inst, Instruction *Before); | |||
3181 | /// @} | |||
3182 | ||||
3183 | private: | |||
3184 | /// The ordered list of actions made so far. | |||
3185 | SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions; | |||
3186 | ||||
3187 | using CommitPt = SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator; | |||
3188 | ||||
3189 | SetOfInstrs &RemovedInsts; | |||
3190 | }; | |||
3191 | ||||
3192 | } // end anonymous namespace | |||
3193 | ||||
3194 | void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx, | |||
3195 | Value *NewVal) { | |||
3196 | Actions.push_back(llvm::make_unique<TypePromotionTransaction::OperandSetter>( | |||
3197 | Inst, Idx, NewVal)); | |||
3198 | } | |||
3199 | ||||
3200 | void TypePromotionTransaction::eraseInstruction(Instruction *Inst, | |||
3201 | Value *NewVal) { | |||
3202 | Actions.push_back( | |||
3203 | llvm::make_unique<TypePromotionTransaction::InstructionRemover>( | |||
3204 | Inst, RemovedInsts, NewVal)); | |||
3205 | } | |||
3206 | ||||
3207 | void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst, | |||
3208 | Value *New) { | |||
3209 | Actions.push_back( | |||
3210 | llvm::make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New)); | |||
3211 | } | |||
3212 | ||||
3213 | void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) { | |||
3214 | Actions.push_back( | |||
3215 | llvm::make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy)); | |||
3216 | } | |||
3217 | ||||
3218 | Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, | |||
3219 | Type *Ty) { | |||
3220 | std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty)); | |||
3221 | Value *Val = Ptr->getBuiltValue(); | |||
3222 | Actions.push_back(std::move(Ptr)); | |||
3223 | return Val; | |||
3224 | } | |||
3225 | ||||
3226 | Value *TypePromotionTransaction::createSExt(Instruction *Inst, | |||
3227 | Value *Opnd, Type *Ty) { | |||
3228 | std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty)); | |||
3229 | Value *Val = Ptr->getBuiltValue(); | |||
3230 | Actions.push_back(std::move(Ptr)); | |||
3231 | return Val; | |||
3232 | } | |||
3233 | ||||
3234 | Value *TypePromotionTransaction::createZExt(Instruction *Inst, | |||
3235 | Value *Opnd, Type *Ty) { | |||
3236 | std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty)); | |||
3237 | Value *Val = Ptr->getBuiltValue(); | |||
3238 | Actions.push_back(std::move(Ptr)); | |||
3239 | return Val; | |||
3240 | } | |||
3241 | ||||
3242 | void TypePromotionTransaction::moveBefore(Instruction *Inst, | |||
3243 | Instruction *Before) { | |||
3244 | Actions.push_back( | |||
3245 | llvm::make_unique<TypePromotionTransaction::InstructionMoveBefore>( | |||
3246 | Inst, Before)); | |||
3247 | } | |||
3248 | ||||
3249 | TypePromotionTransaction::ConstRestorationPt | |||
3250 | TypePromotionTransaction::getRestorationPoint() const { | |||
3251 | return !Actions.empty() ? Actions.back().get() : nullptr; | |||
3252 | } | |||
3253 | ||||
3254 | void TypePromotionTransaction::commit() { | |||
3255 | for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt; | |||
3256 | ++It) | |||
3257 | (*It)->commit(); | |||
3258 | Actions.clear(); | |||
3259 | } | |||
3260 | ||||
3261 | void TypePromotionTransaction::rollback( | |||
3262 | TypePromotionTransaction::ConstRestorationPt Point) { | |||
3263 | while (!Actions.empty() && Point != Actions.back().get()) { | |||
3264 | std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val(); | |||
3265 | Curr->undo(); | |||
3266 | } | |||
3267 | } | |||
3268 | ||||
3269 | namespace { | |||
3270 | ||||
3271 | /// \brief A helper class for matching addressing modes. | |||
3272 | /// | |||
3273 | /// This encapsulates the logic for matching the target-legal addressing modes. | |||
3274 | class AddressingModeMatcher { | |||
3275 | SmallVectorImpl<Instruction*> &AddrModeInsts; | |||
3276 | const TargetLowering &TLI; | |||
3277 | const TargetRegisterInfo &TRI; | |||
3278 | const DataLayout &DL; | |||
3279 | ||||
3280 | /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and | |||
3281 | /// the memory instruction that we're computing this address for. | |||
3282 | Type *AccessTy; | |||
3283 | unsigned AddrSpace; | |||
3284 | Instruction *MemoryInst; | |||
3285 | ||||
3286 | /// This is the addressing mode that we're building up. This is | |||
3287 | /// part of the return value of this addressing mode matching stuff. | |||
3288 | ExtAddrMode &AddrMode; | |||
3289 | ||||
3290 | /// The instructions inserted by other CodeGenPrepare optimizations. | |||
3291 | const SetOfInstrs &InsertedInsts; | |||
3292 | ||||
3293 | /// A map from the instructions to their type before promotion. | |||
3294 | InstrToOrigTy &PromotedInsts; | |||
3295 | ||||
3296 | /// The ongoing transaction where every action should be registered. | |||
3297 | TypePromotionTransaction &TPT; | |||
3298 | ||||
3299 | /// This is set to true when we should not do profitability checks. | |||
3300 | /// When true, IsProfitableToFoldIntoAddressingMode always returns true. | |||
3301 | bool IgnoreProfitability; | |||
3302 | ||||
3303 | AddressingModeMatcher(SmallVectorImpl<Instruction *> &AMI, | |||
3304 | const TargetLowering &TLI, | |||
3305 | const TargetRegisterInfo &TRI, | |||
3306 | Type *AT, unsigned AS, | |||
3307 | Instruction *MI, ExtAddrMode &AM, | |||
3308 | const SetOfInstrs &InsertedInsts, | |||
3309 | InstrToOrigTy &PromotedInsts, | |||
3310 | TypePromotionTransaction &TPT) | |||
3311 | : AddrModeInsts(AMI), TLI(TLI), TRI(TRI), | |||
3312 | DL(MI->getModule()->getDataLayout()), AccessTy(AT), AddrSpace(AS), | |||
3313 | MemoryInst(MI), AddrMode(AM), InsertedInsts(InsertedInsts), | |||
3314 | PromotedInsts(PromotedInsts), TPT(TPT) { | |||
3315 | IgnoreProfitability = false; | |||
3316 | } | |||
3317 | ||||
3318 | public: | |||
3319 | /// Find the maximal addressing mode that a load/store of V can fold, | |||
3320 | /// give an access type of AccessTy. This returns a list of involved | |||
3321 | /// instructions in AddrModeInsts. | |||
3322 | /// \p InsertedInsts The instructions inserted by other CodeGenPrepare | |||
3323 | /// optimizations. | |||
3324 | /// \p PromotedInsts maps the instructions to their type before promotion. | |||
3325 | /// \p The ongoing transaction where every action should be registered. | |||
3326 | static ExtAddrMode Match(Value *V, Type *AccessTy, unsigned AS, | |||
3327 | Instruction *MemoryInst, | |||
3328 | SmallVectorImpl<Instruction*> &AddrModeInsts, | |||
3329 | const TargetLowering &TLI, | |||
3330 | const TargetRegisterInfo &TRI, | |||
3331 | const SetOfInstrs &InsertedInsts, | |||
3332 | InstrToOrigTy &PromotedInsts, | |||
3333 | TypePromotionTransaction &TPT) { | |||
3334 | ExtAddrMode Result; | |||
3335 | ||||
3336 | bool Success = AddressingModeMatcher(AddrModeInsts, TLI, TRI, | |||
3337 | AccessTy, AS, | |||
3338 | MemoryInst, Result, InsertedInsts, | |||
3339 | PromotedInsts, TPT).matchAddr(V, 0); | |||
3340 | (void)Success; assert(Success && "Couldn't select *anything*?")((Success && "Couldn't select *anything*?") ? static_cast <void> (0) : __assert_fail ("Success && \"Couldn't select *anything*?\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 3340, __PRETTY_FUNCTION__)); | |||
3341 | return Result; | |||
3342 | } | |||
3343 | ||||
3344 | private: | |||
3345 | bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth); | |||
3346 | bool matchAddr(Value *V, unsigned Depth); | |||
3347 | bool matchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth, | |||
3348 | bool *MovedAway = nullptr); | |||
3349 | bool isProfitableToFoldIntoAddressingMode(Instruction *I, | |||
3350 | ExtAddrMode &AMBefore, | |||
3351 | ExtAddrMode &AMAfter); | |||
3352 | bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2); | |||
3353 | bool isPromotionProfitable(unsigned NewCost, unsigned OldCost, | |||
3354 | Value *PromotedOperand) const; | |||
3355 | }; | |||
3356 | ||||
3357 | /// \brief A helper class for combining addressing modes. | |||
3358 | class AddressingModeCombiner { | |||
3359 | private: | |||
3360 | /// The addressing modes we've collected. | |||
3361 | SmallVector<ExtAddrMode, 16> AddrModes; | |||
3362 | ||||
3363 | /// The field in which the AddrModes differ, when we have more than one. | |||
3364 | ExtAddrMode::FieldName DifferentField = ExtAddrMode::NoField; | |||
3365 | ||||
3366 | /// Are the AddrModes that we have all just equal to their original values? | |||
3367 | bool AllAddrModesTrivial = true; | |||
3368 | ||||
3369 | public: | |||
3370 | /// \brief Get the combined AddrMode | |||
3371 | const ExtAddrMode &getAddrMode() const { | |||
3372 | return AddrModes[0]; | |||
3373 | } | |||
3374 | ||||
3375 | /// \brief Add a new AddrMode if it's compatible with the AddrModes we already | |||
3376 | /// have. | |||
3377 | /// \return True iff we succeeded in doing so. | |||
3378 | bool addNewAddrMode(ExtAddrMode &NewAddrMode) { | |||
3379 | // Take note of if we have any non-trivial AddrModes, as we need to detect | |||
3380 | // when all AddrModes are trivial as then we would introduce a phi or select | |||
3381 | // which just duplicates what's already there. | |||
3382 | AllAddrModesTrivial = AllAddrModesTrivial && NewAddrMode.isTrivial(); | |||
3383 | ||||
3384 | // If this is the first addrmode then everything is fine. | |||
3385 | if (AddrModes.empty()) { | |||
3386 | AddrModes.emplace_back(NewAddrMode); | |||
3387 | return true; | |||
3388 | } | |||
3389 | ||||
3390 | // Figure out how different this is from the other address modes, which we | |||
3391 | // can do just by comparing against the first one given that we only care | |||
3392 | // about the cumulative difference. | |||
3393 | ExtAddrMode::FieldName ThisDifferentField = | |||
3394 | AddrModes[0].compare(NewAddrMode); | |||
3395 | if (DifferentField == ExtAddrMode::NoField) | |||
3396 | DifferentField = ThisDifferentField; | |||
3397 | else if (DifferentField != ThisDifferentField) | |||
3398 | DifferentField = ExtAddrMode::MultipleFields; | |||
3399 | ||||
3400 | // If this AddrMode is the same as all the others then everything is fine | |||
3401 | // (which should only happen when there is actually only one AddrMode). | |||
3402 | if (DifferentField == ExtAddrMode::NoField) { | |||
3403 | assert(AddrModes.size() == 1)((AddrModes.size() == 1) ? static_cast<void> (0) : __assert_fail ("AddrModes.size() == 1", "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 3403, __PRETTY_FUNCTION__)); | |||
3404 | return true; | |||
3405 | } | |||
3406 | ||||
3407 | // If NewAddrMode differs in only one dimension then we can handle it by | |||
3408 | // inserting a phi/select later on. | |||
3409 | if (DifferentField != ExtAddrMode::MultipleFields) { | |||
3410 | AddrModes.emplace_back(NewAddrMode); | |||
3411 | return true; | |||
3412 | } | |||
3413 | ||||
3414 | // We couldn't combine NewAddrMode with the rest, so return failure. | |||
3415 | AddrModes.clear(); | |||
3416 | return false; | |||
3417 | } | |||
3418 | ||||
3419 | /// \brief Combine the addressing modes we've collected into a single | |||
3420 | /// addressing mode. | |||
3421 | /// \return True iff we successfully combined them or we only had one so | |||
3422 | /// didn't need to combine them anyway. | |||
3423 | bool combineAddrModes() { | |||
3424 | // If we have no AddrModes then they can't be combined. | |||
3425 | if (AddrModes.size() == 0) | |||
3426 | return false; | |||
3427 | ||||
3428 | // A single AddrMode can trivially be combined. | |||
3429 | if (AddrModes.size() == 1) | |||
3430 | return true; | |||
3431 | ||||
3432 | // If the AddrModes we collected are all just equal to the value they are | |||
3433 | // derived from then combining them wouldn't do anything useful. | |||
3434 | if (AllAddrModesTrivial) | |||
3435 | return false; | |||
3436 | ||||
3437 | // TODO: Combine multiple AddrModes by inserting a select or phi for the | |||
3438 | // field in which the AddrModes differ. | |||
3439 | return false; | |||
3440 | } | |||
3441 | }; | |||
3442 | ||||
3443 | } // end anonymous namespace | |||
3444 | ||||
3445 | /// Try adding ScaleReg*Scale to the current addressing mode. | |||
3446 | /// Return true and update AddrMode if this addr mode is legal for the target, | |||
3447 | /// false if not. | |||
3448 | bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale, | |||
3449 | unsigned Depth) { | |||
3450 | // If Scale is 1, then this is the same as adding ScaleReg to the addressing | |||
3451 | // mode. Just process that directly. | |||
3452 | if (Scale == 1) | |||
3453 | return matchAddr(ScaleReg, Depth); | |||
3454 | ||||
3455 | // If the scale is 0, it takes nothing to add this. | |||
3456 | if (Scale == 0) | |||
3457 | return true; | |||
3458 | ||||
3459 | // If we already have a scale of this value, we can add to it, otherwise, we | |||
3460 | // need an available scale field. | |||
3461 | if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg) | |||
3462 | return false; | |||
3463 | ||||
3464 | ExtAddrMode TestAddrMode = AddrMode; | |||
3465 | ||||
3466 | // Add scale to turn X*4+X*3 -> X*7. This could also do things like | |||
3467 | // [A+B + A*7] -> [B+A*8]. | |||
3468 | TestAddrMode.Scale += Scale; | |||
3469 | TestAddrMode.ScaledReg = ScaleReg; | |||
3470 | ||||
3471 | // If the new address isn't legal, bail out. | |||
3472 | if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) | |||
3473 | return false; | |||
3474 | ||||
3475 | // It was legal, so commit it. | |||
3476 | AddrMode = TestAddrMode; | |||
3477 | ||||
3478 | // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now | |||
3479 | // to see if ScaleReg is actually X+C. If so, we can turn this into adding | |||
3480 | // X*Scale + C*Scale to addr mode. | |||
3481 | ConstantInt *CI = nullptr; Value *AddLHS = nullptr; | |||
3482 | if (isa<Instruction>(ScaleReg) && // not a constant expr. | |||
3483 | match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) { | |||
3484 | TestAddrMode.ScaledReg = AddLHS; | |||
3485 | TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale; | |||
3486 | ||||
3487 | // If this addressing mode is legal, commit it and remember that we folded | |||
3488 | // this instruction. | |||
3489 | if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) { | |||
3490 | AddrModeInsts.push_back(cast<Instruction>(ScaleReg)); | |||
3491 | AddrMode = TestAddrMode; | |||
3492 | return true; | |||
3493 | } | |||
3494 | } | |||
3495 | ||||
3496 | // Otherwise, not (x+c)*scale, just return what we have. | |||
3497 | return true; | |||
3498 | } | |||
3499 | ||||
3500 | /// This is a little filter, which returns true if an addressing computation | |||
3501 | /// involving I might be folded into a load/store accessing it. | |||
3502 | /// This doesn't need to be perfect, but needs to accept at least | |||
3503 | /// the set of instructions that MatchOperationAddr can. | |||
3504 | static bool MightBeFoldableInst(Instruction *I) { | |||
3505 | switch (I->getOpcode()) { | |||
3506 | case Instruction::BitCast: | |||
3507 | case Instruction::AddrSpaceCast: | |||
3508 | // Don't touch identity bitcasts. | |||
3509 | if (I->getType() == I->getOperand(0)->getType()) | |||
3510 | return false; | |||
3511 | return I->getType()->isPointerTy() || I->getType()->isIntegerTy(); | |||
3512 | case Instruction::PtrToInt: | |||
3513 | // PtrToInt is always a noop, as we know that the int type is pointer sized. | |||
3514 | return true; | |||
3515 | case Instruction::IntToPtr: | |||
3516 | // We know the input is intptr_t, so this is foldable. | |||
3517 | return true; | |||
3518 | case Instruction::Add: | |||
3519 | return true; | |||
3520 | case Instruction::Mul: | |||
3521 | case Instruction::Shl: | |||
3522 | // Can only handle X*C and X << C. | |||
3523 | return isa<ConstantInt>(I->getOperand(1)); | |||
3524 | case Instruction::GetElementPtr: | |||
3525 | return true; | |||
3526 | default: | |||
3527 | return false; | |||
3528 | } | |||
3529 | } | |||
3530 | ||||
3531 | /// \brief Check whether or not \p Val is a legal instruction for \p TLI. | |||
3532 | /// \note \p Val is assumed to be the product of some type promotion. | |||
3533 | /// Therefore if \p Val has an undefined state in \p TLI, this is assumed | |||
3534 | /// to be legal, as the non-promoted value would have had the same state. | |||
3535 | static bool isPromotedInstructionLegal(const TargetLowering &TLI, | |||
3536 | const DataLayout &DL, Value *Val) { | |||
3537 | Instruction *PromotedInst = dyn_cast<Instruction>(Val); | |||
3538 | if (!PromotedInst) | |||
3539 | return false; | |||
3540 | int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode()); | |||
3541 | // If the ISDOpcode is undefined, it was undefined before the promotion. | |||
3542 | if (!ISDOpcode) | |||
3543 | return true; | |||
3544 | // Otherwise, check if the promoted instruction is legal or not. | |||
3545 | return TLI.isOperationLegalOrCustom( | |||
3546 | ISDOpcode, TLI.getValueType(DL, PromotedInst->getType())); | |||
3547 | } | |||
3548 | ||||
3549 | namespace { | |||
3550 | ||||
3551 | /// \brief Hepler class to perform type promotion. | |||
3552 | class TypePromotionHelper { | |||
3553 | /// \brief Utility function to check whether or not a sign or zero extension | |||
3554 | /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by | |||
3555 | /// either using the operands of \p Inst or promoting \p Inst. | |||
3556 | /// The type of the extension is defined by \p IsSExt. | |||
3557 | /// In other words, check if: | |||
3558 | /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType. | |||
3559 | /// #1 Promotion applies: | |||
3560 | /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...). | |||
3561 | /// #2 Operand reuses: | |||
3562 | /// ext opnd1 to ConsideredExtType. | |||
3563 | /// \p PromotedInsts maps the instructions to their type before promotion. | |||
3564 | static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType, | |||
3565 | const InstrToOrigTy &PromotedInsts, bool IsSExt); | |||
3566 | ||||
3567 | /// \brief Utility function to determine if \p OpIdx should be promoted when | |||
3568 | /// promoting \p Inst. | |||
3569 | static bool shouldExtOperand(const Instruction *Inst, int OpIdx) { | |||
3570 | return !(isa<SelectInst>(Inst) && OpIdx == 0); | |||
3571 | } | |||
3572 | ||||
3573 | /// \brief Utility function to promote the operand of \p Ext when this | |||
3574 | /// operand is a promotable trunc or sext or zext. | |||
3575 | /// \p PromotedInsts maps the instructions to their type before promotion. | |||
3576 | /// \p CreatedInstsCost[out] contains the cost of all instructions | |||
3577 | /// created to promote the operand of Ext. | |||
3578 | /// Newly added extensions are inserted in \p Exts. | |||
3579 | /// Newly added truncates are inserted in \p Truncs. | |||
3580 | /// Should never be called directly. | |||
3581 | /// \return The promoted value which is used instead of Ext. | |||
3582 | static Value *promoteOperandForTruncAndAnyExt( | |||
3583 | Instruction *Ext, TypePromotionTransaction &TPT, | |||
3584 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, | |||
3585 | SmallVectorImpl<Instruction *> *Exts, | |||
3586 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI); | |||
3587 | ||||
3588 | /// \brief Utility function to promote the operand of \p Ext when this | |||
3589 | /// operand is promotable and is not a supported trunc or sext. | |||
3590 | /// \p PromotedInsts maps the instructions to their type before promotion. | |||
3591 | /// \p CreatedInstsCost[out] contains the cost of all the instructions | |||
3592 | /// created to promote the operand of Ext. | |||
3593 | /// Newly added extensions are inserted in \p Exts. | |||
3594 | /// Newly added truncates are inserted in \p Truncs. | |||
3595 | /// Should never be called directly. | |||
3596 | /// \return The promoted value which is used instead of Ext. | |||
3597 | static Value *promoteOperandForOther(Instruction *Ext, | |||
3598 | TypePromotionTransaction &TPT, | |||
3599 | InstrToOrigTy &PromotedInsts, | |||
3600 | unsigned &CreatedInstsCost, | |||
3601 | SmallVectorImpl<Instruction *> *Exts, | |||
3602 | SmallVectorImpl<Instruction *> *Truncs, | |||
3603 | const TargetLowering &TLI, bool IsSExt); | |||
3604 | ||||
3605 | /// \see promoteOperandForOther. | |||
3606 | static Value *signExtendOperandForOther( | |||
3607 | Instruction *Ext, TypePromotionTransaction &TPT, | |||
3608 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, | |||
3609 | SmallVectorImpl<Instruction *> *Exts, | |||
3610 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { | |||
3611 | return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, | |||
3612 | Exts, Truncs, TLI, true); | |||
3613 | } | |||
3614 | ||||
3615 | /// \see promoteOperandForOther. | |||
3616 | static Value *zeroExtendOperandForOther( | |||
3617 | Instruction *Ext, TypePromotionTransaction &TPT, | |||
3618 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, | |||
3619 | SmallVectorImpl<Instruction *> *Exts, | |||
3620 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { | |||
3621 | return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost, | |||
3622 | Exts, Truncs, TLI, false); | |||
3623 | } | |||
3624 | ||||
3625 | public: | |||
3626 | /// Type for the utility function that promotes the operand of Ext. | |||
3627 | using Action = Value *(*)(Instruction *Ext, TypePromotionTransaction &TPT, | |||
3628 | InstrToOrigTy &PromotedInsts, | |||
3629 | unsigned &CreatedInstsCost, | |||
3630 | SmallVectorImpl<Instruction *> *Exts, | |||
3631 | SmallVectorImpl<Instruction *> *Truncs, | |||
3632 | const TargetLowering &TLI); | |||
3633 | ||||
3634 | /// \brief Given a sign/zero extend instruction \p Ext, return the approriate | |||
3635 | /// action to promote the operand of \p Ext instead of using Ext. | |||
3636 | /// \return NULL if no promotable action is possible with the current | |||
3637 | /// sign extension. | |||
3638 | /// \p InsertedInsts keeps track of all the instructions inserted by the | |||
3639 | /// other CodeGenPrepare optimizations. This information is important | |||
3640 | /// because we do not want to promote these instructions as CodeGenPrepare | |||
3641 | /// will reinsert them later. Thus creating an infinite loop: create/remove. | |||
3642 | /// \p PromotedInsts maps the instructions to their type before promotion. | |||
3643 | static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts, | |||
3644 | const TargetLowering &TLI, | |||
3645 | const InstrToOrigTy &PromotedInsts); | |||
3646 | }; | |||
3647 | ||||
3648 | } // end anonymous namespace | |||
3649 | ||||
3650 | bool TypePromotionHelper::canGetThrough(const Instruction *Inst, | |||
3651 | Type *ConsideredExtType, | |||
3652 | const InstrToOrigTy &PromotedInsts, | |||
3653 | bool IsSExt) { | |||
3654 | // The promotion helper does not know how to deal with vector types yet. | |||
3655 | // To be able to fix that, we would need to fix the places where we | |||
3656 | // statically extend, e.g., constants and such. | |||
3657 | if (Inst->getType()->isVectorTy()) | |||
3658 | return false; | |||
3659 | ||||
3660 | // We can always get through zext. | |||
3661 | if (isa<ZExtInst>(Inst)) | |||
3662 | return true; | |||
3663 | ||||
3664 | // sext(sext) is ok too. | |||
3665 | if (IsSExt && isa<SExtInst>(Inst)) | |||
3666 | return true; | |||
3667 | ||||
3668 | // We can get through binary operator, if it is legal. In other words, the | |||
3669 | // binary operator must have a nuw or nsw flag. | |||
3670 | const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst); | |||
3671 | if (BinOp && isa<OverflowingBinaryOperator>(BinOp) && | |||
3672 | ((!IsSExt && BinOp->hasNoUnsignedWrap()) || | |||
3673 | (IsSExt && BinOp->hasNoSignedWrap()))) | |||
3674 | return true; | |||
3675 | ||||
3676 | // Check if we can do the following simplification. | |||
3677 | // ext(trunc(opnd)) --> ext(opnd) | |||
3678 | if (!isa<TruncInst>(Inst)) | |||
3679 | return false; | |||
3680 | ||||
3681 | Value *OpndVal = Inst->getOperand(0); | |||
3682 | // Check if we can use this operand in the extension. | |||
3683 | // If the type is larger than the result type of the extension, we cannot. | |||
3684 | if (!OpndVal->getType()->isIntegerTy() || | |||
3685 | OpndVal->getType()->getIntegerBitWidth() > | |||
3686 | ConsideredExtType->getIntegerBitWidth()) | |||
3687 | return false; | |||
3688 | ||||
3689 | // If the operand of the truncate is not an instruction, we will not have | |||
3690 | // any information on the dropped bits. | |||
3691 | // (Actually we could for constant but it is not worth the extra logic). | |||
3692 | Instruction *Opnd = dyn_cast<Instruction>(OpndVal); | |||
3693 | if (!Opnd) | |||
3694 | return false; | |||
3695 | ||||
3696 | // Check if the source of the type is narrow enough. | |||
3697 | // I.e., check that trunc just drops extended bits of the same kind of | |||
3698 | // the extension. | |||
3699 | // #1 get the type of the operand and check the kind of the extended bits. | |||
3700 | const Type *OpndType; | |||
3701 | InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd); | |||
3702 | if (It != PromotedInsts.end() && It->second.getInt() == IsSExt) | |||
3703 | OpndType = It->second.getPointer(); | |||
3704 | else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd))) | |||
3705 | OpndType = Opnd->getOperand(0)->getType(); | |||
3706 | else | |||
3707 | return false; | |||
3708 | ||||
3709 | // #2 check that the truncate just drops extended bits. | |||
3710 | return Inst->getType()->getIntegerBitWidth() >= | |||
3711 | OpndType->getIntegerBitWidth(); | |||
3712 | } | |||
3713 | ||||
3714 | TypePromotionHelper::Action TypePromotionHelper::getAction( | |||
3715 | Instruction *Ext, const SetOfInstrs &InsertedInsts, | |||
3716 | const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) { | |||
3717 | assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&(((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && "Unexpected instruction type") ? static_cast<void> (0) : __assert_fail ("(isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && \"Unexpected instruction type\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 3718, __PRETTY_FUNCTION__)) | |||
3718 | "Unexpected instruction type")(((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && "Unexpected instruction type") ? static_cast<void> (0) : __assert_fail ("(isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && \"Unexpected instruction type\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 3718, __PRETTY_FUNCTION__)); | |||
3719 | Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0)); | |||
3720 | Type *ExtTy = Ext->getType(); | |||
3721 | bool IsSExt = isa<SExtInst>(Ext); | |||
3722 | // If the operand of the extension is not an instruction, we cannot | |||
3723 | // get through. | |||
3724 | // If it, check we can get through. | |||
3725 | if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt)) | |||
3726 | return nullptr; | |||
3727 | ||||
3728 | // Do not promote if the operand has been added by codegenprepare. | |||
3729 | // Otherwise, it means we are undoing an optimization that is likely to be | |||
3730 | // redone, thus causing potential infinite loop. | |||
3731 | if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd)) | |||
3732 | return nullptr; | |||
3733 | ||||
3734 | // SExt or Trunc instructions. | |||
3735 | // Return the related handler. | |||
3736 | if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) || | |||
3737 | isa<ZExtInst>(ExtOpnd)) | |||
3738 | return promoteOperandForTruncAndAnyExt; | |||
3739 | ||||
3740 | // Regular instruction. | |||
3741 | // Abort early if we will have to insert non-free instructions. | |||
3742 | if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType())) | |||
3743 | return nullptr; | |||
3744 | return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther; | |||
3745 | } | |||
3746 | ||||
3747 | Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt( | |||
3748 | Instruction *SExt, TypePromotionTransaction &TPT, | |||
3749 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, | |||
3750 | SmallVectorImpl<Instruction *> *Exts, | |||
3751 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) { | |||
3752 | // By construction, the operand of SExt is an instruction. Otherwise we cannot | |||
3753 | // get through it and this method should not be called. | |||
3754 | Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0)); | |||
3755 | Value *ExtVal = SExt; | |||
3756 | bool HasMergedNonFreeExt = false; | |||
3757 | if (isa<ZExtInst>(SExtOpnd)) { | |||
3758 | // Replace s|zext(zext(opnd)) | |||
3759 | // => zext(opnd). | |||
3760 | HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd); | |||
3761 | Value *ZExt = | |||
3762 | TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType()); | |||
3763 | TPT.replaceAllUsesWith(SExt, ZExt); | |||
3764 | TPT.eraseInstruction(SExt); | |||
3765 | ExtVal = ZExt; | |||
3766 | } else { | |||
3767 | // Replace z|sext(trunc(opnd)) or sext(sext(opnd)) | |||
3768 | // => z|sext(opnd). | |||
3769 | TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0)); | |||
3770 | } | |||
3771 | CreatedInstsCost = 0; | |||
3772 | ||||
3773 | // Remove dead code. | |||
3774 | if (SExtOpnd->use_empty()) | |||
3775 | TPT.eraseInstruction(SExtOpnd); | |||
3776 | ||||
3777 | // Check if the extension is still needed. | |||
3778 | Instruction *ExtInst = dyn_cast<Instruction>(ExtVal); | |||
3779 | if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) { | |||
3780 | if (ExtInst) { | |||
3781 | if (Exts) | |||
3782 | Exts->push_back(ExtInst); | |||
3783 | CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt; | |||
3784 | } | |||
3785 | return ExtVal; | |||
3786 | } | |||
3787 | ||||
3788 | // At this point we have: ext ty opnd to ty. | |||
3789 | // Reassign the uses of ExtInst to the opnd and remove ExtInst. | |||
3790 | Value *NextVal = ExtInst->getOperand(0); | |||
3791 | TPT.eraseInstruction(ExtInst, NextVal); | |||
3792 | return NextVal; | |||
3793 | } | |||
3794 | ||||
3795 | Value *TypePromotionHelper::promoteOperandForOther( | |||
3796 | Instruction *Ext, TypePromotionTransaction &TPT, | |||
3797 | InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost, | |||
3798 | SmallVectorImpl<Instruction *> *Exts, | |||
3799 | SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI, | |||
3800 | bool IsSExt) { | |||
3801 | // By construction, the operand of Ext is an instruction. Otherwise we cannot | |||
3802 | // get through it and this method should not be called. | |||
3803 | Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0)); | |||
3804 | CreatedInstsCost = 0; | |||
3805 | if (!ExtOpnd->hasOneUse()) { | |||
3806 | // ExtOpnd will be promoted. | |||
3807 | // All its uses, but Ext, will need to use a truncated value of the | |||
3808 | // promoted version. | |||
3809 | // Create the truncate now. | |||
3810 | Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType()); | |||
3811 | if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) { | |||
3812 | // Insert it just after the definition. | |||
3813 | ITrunc->moveAfter(ExtOpnd); | |||
3814 | if (Truncs) | |||
3815 | Truncs->push_back(ITrunc); | |||
3816 | } | |||
3817 | ||||
3818 | TPT.replaceAllUsesWith(ExtOpnd, Trunc); | |||
3819 | // Restore the operand of Ext (which has been replaced by the previous call | |||
3820 | // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext. | |||
3821 | TPT.setOperand(Ext, 0, ExtOpnd); | |||
3822 | } | |||
3823 | ||||
3824 | // Get through the Instruction: | |||
3825 | // 1. Update its type. | |||
3826 | // 2. Replace the uses of Ext by Inst. | |||
3827 | // 3. Extend each operand that needs to be extended. | |||
3828 | ||||
3829 | // Remember the original type of the instruction before promotion. | |||
3830 | // This is useful to know that the high bits are sign extended bits. | |||
3831 | PromotedInsts.insert(std::pair<Instruction *, TypeIsSExt>( | |||
3832 | ExtOpnd, TypeIsSExt(ExtOpnd->getType(), IsSExt))); | |||
3833 | // Step #1. | |||
3834 | TPT.mutateType(ExtOpnd, Ext->getType()); | |||
3835 | // Step #2. | |||
3836 | TPT.replaceAllUsesWith(Ext, ExtOpnd); | |||
3837 | // Step #3. | |||
3838 | Instruction *ExtForOpnd = Ext; | |||
3839 | ||||
3840 | DEBUG(dbgs() << "Propagate Ext to operands\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Propagate Ext to operands\n" ; } } while (false); | |||
3841 | for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx; | |||
3842 | ++OpIdx) { | |||
3843 | DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Operand:\n" << * (ExtOpnd->getOperand(OpIdx)) << '\n'; } } while (false ); | |||
3844 | if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() || | |||
3845 | !shouldExtOperand(ExtOpnd, OpIdx)) { | |||
3846 | DEBUG(dbgs() << "No need to propagate\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "No need to propagate\n" ; } } while (false); | |||
3847 | continue; | |||
3848 | } | |||
3849 | // Check if we can statically extend the operand. | |||
3850 | Value *Opnd = ExtOpnd->getOperand(OpIdx); | |||
3851 | if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) { | |||
3852 | DEBUG(dbgs() << "Statically extend\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Statically extend\n"; } } while (false); | |||
3853 | unsigned BitWidth = Ext->getType()->getIntegerBitWidth(); | |||
3854 | APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth) | |||
3855 | : Cst->getValue().zext(BitWidth); | |||
3856 | TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal)); | |||
3857 | continue; | |||
3858 | } | |||
3859 | // UndefValue are typed, so we have to statically sign extend them. | |||
3860 | if (isa<UndefValue>(Opnd)) { | |||
3861 | DEBUG(dbgs() << "Statically extend\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Statically extend\n"; } } while (false); | |||
3862 | TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType())); | |||
3863 | continue; | |||
3864 | } | |||
3865 | ||||
3866 | // Otherwise we have to explicity sign extend the operand. | |||
3867 | // Check if Ext was reused to extend an operand. | |||
3868 | if (!ExtForOpnd) { | |||
3869 | // If yes, create a new one. | |||
3870 | DEBUG(dbgs() << "More operands to ext\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "More operands to ext\n" ; } } while (false); | |||
3871 | Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType()) | |||
3872 | : TPT.createZExt(Ext, Opnd, Ext->getType()); | |||
3873 | if (!isa<Instruction>(ValForExtOpnd)) { | |||
3874 | TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd); | |||
3875 | continue; | |||
3876 | } | |||
3877 | ExtForOpnd = cast<Instruction>(ValForExtOpnd); | |||
3878 | } | |||
3879 | if (Exts) | |||
3880 | Exts->push_back(ExtForOpnd); | |||
3881 | TPT.setOperand(ExtForOpnd, 0, Opnd); | |||
3882 | ||||
3883 | // Move the sign extension before the insertion point. | |||
3884 | TPT.moveBefore(ExtForOpnd, ExtOpnd); | |||
3885 | TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd); | |||
3886 | CreatedInstsCost += !TLI.isExtFree(ExtForOpnd); | |||
3887 | // If more sext are required, new instructions will have to be created. | |||
3888 | ExtForOpnd = nullptr; | |||
3889 | } | |||
3890 | if (ExtForOpnd == Ext) { | |||
3891 | DEBUG(dbgs() << "Extension is useless now\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Extension is useless now\n" ; } } while (false); | |||
3892 | TPT.eraseInstruction(Ext); | |||
3893 | } | |||
3894 | return ExtOpnd; | |||
3895 | } | |||
3896 | ||||
3897 | /// Check whether or not promoting an instruction to a wider type is profitable. | |||
3898 | /// \p NewCost gives the cost of extension instructions created by the | |||
3899 | /// promotion. | |||
3900 | /// \p OldCost gives the cost of extension instructions before the promotion | |||
3901 | /// plus the number of instructions that have been | |||
3902 | /// matched in the addressing mode the promotion. | |||
3903 | /// \p PromotedOperand is the value that has been promoted. | |||
3904 | /// \return True if the promotion is profitable, false otherwise. | |||
3905 | bool AddressingModeMatcher::isPromotionProfitable( | |||
3906 | unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const { | |||
3907 | DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost << '\n'; } } while (false); | |||
3908 | // The cost of the new extensions is greater than the cost of the | |||
3909 | // old extension plus what we folded. | |||
3910 | // This is not profitable. | |||
3911 | if (NewCost > OldCost) | |||
3912 | return false; | |||
3913 | if (NewCost < OldCost) | |||
3914 | return true; | |||
3915 | // The promotion is neutral but it may help folding the sign extension in | |||
3916 | // loads for instance. | |||
3917 | // Check that we did not create an illegal instruction. | |||
3918 | return isPromotedInstructionLegal(TLI, DL, PromotedOperand); | |||
3919 | } | |||
3920 | ||||
3921 | /// Given an instruction or constant expr, see if we can fold the operation | |||
3922 | /// into the addressing mode. If so, update the addressing mode and return | |||
3923 | /// true, otherwise return false without modifying AddrMode. | |||
3924 | /// If \p MovedAway is not NULL, it contains the information of whether or | |||
3925 | /// not AddrInst has to be folded into the addressing mode on success. | |||
3926 | /// If \p MovedAway == true, \p AddrInst will not be part of the addressing | |||
3927 | /// because it has been moved away. | |||
3928 | /// Thus AddrInst must not be added in the matched instructions. | |||
3929 | /// This state can happen when AddrInst is a sext, since it may be moved away. | |||
3930 | /// Therefore, AddrInst may not be valid when MovedAway is true and it must | |||
3931 | /// not be referenced anymore. | |||
3932 | bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode, | |||
3933 | unsigned Depth, | |||
3934 | bool *MovedAway) { | |||
3935 | // Avoid exponential behavior on extremely deep expression trees. | |||
3936 | if (Depth >= 5) return false; | |||
3937 | ||||
3938 | // By default, all matched instructions stay in place. | |||
3939 | if (MovedAway) | |||
3940 | *MovedAway = false; | |||
3941 | ||||
3942 | switch (Opcode) { | |||
3943 | case Instruction::PtrToInt: | |||
3944 | // PtrToInt is always a noop, as we know that the int type is pointer sized. | |||
3945 | return matchAddr(AddrInst->getOperand(0), Depth); | |||
3946 | case Instruction::IntToPtr: { | |||
3947 | auto AS = AddrInst->getType()->getPointerAddressSpace(); | |||
3948 | auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS)); | |||
3949 | // This inttoptr is a no-op if the integer type is pointer sized. | |||
3950 | if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy) | |||
3951 | return matchAddr(AddrInst->getOperand(0), Depth); | |||
3952 | return false; | |||
3953 | } | |||
3954 | case Instruction::BitCast: | |||
3955 | // BitCast is always a noop, and we can handle it as long as it is | |||
3956 | // int->int or pointer->pointer (we don't want int<->fp or something). | |||
3957 | if ((AddrInst->getOperand(0)->getType()->isPointerTy() || | |||
3958 | AddrInst->getOperand(0)->getType()->isIntegerTy()) && | |||
3959 | // Don't touch identity bitcasts. These were probably put here by LSR, | |||
3960 | // and we don't want to mess around with them. Assume it knows what it | |||
3961 | // is doing. | |||
3962 | AddrInst->getOperand(0)->getType() != AddrInst->getType()) | |||
3963 | return matchAddr(AddrInst->getOperand(0), Depth); | |||
3964 | return false; | |||
3965 | case Instruction::AddrSpaceCast: { | |||
3966 | unsigned SrcAS | |||
3967 | = AddrInst->getOperand(0)->getType()->getPointerAddressSpace(); | |||
3968 | unsigned DestAS = AddrInst->getType()->getPointerAddressSpace(); | |||
3969 | if (TLI.isNoopAddrSpaceCast(SrcAS, DestAS)) | |||
3970 | return matchAddr(AddrInst->getOperand(0), Depth); | |||
3971 | return false; | |||
3972 | } | |||
3973 | case Instruction::Add: { | |||
3974 | // Check to see if we can merge in the RHS then the LHS. If so, we win. | |||
3975 | ExtAddrMode BackupAddrMode = AddrMode; | |||
3976 | unsigned OldSize = AddrModeInsts.size(); | |||
3977 | // Start a transaction at this point. | |||
3978 | // The LHS may match but not the RHS. | |||
3979 | // Therefore, we need a higher level restoration point to undo partially | |||
3980 | // matched operation. | |||
3981 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
3982 | TPT.getRestorationPoint(); | |||
3983 | ||||
3984 | if (matchAddr(AddrInst->getOperand(1), Depth+1) && | |||
3985 | matchAddr(AddrInst->getOperand(0), Depth+1)) | |||
3986 | return true; | |||
3987 | ||||
3988 | // Restore the old addr mode info. | |||
3989 | AddrMode = BackupAddrMode; | |||
3990 | AddrModeInsts.resize(OldSize); | |||
3991 | TPT.rollback(LastKnownGood); | |||
3992 | ||||
3993 | // Otherwise this was over-aggressive. Try merging in the LHS then the RHS. | |||
3994 | if (matchAddr(AddrInst->getOperand(0), Depth+1) && | |||
3995 | matchAddr(AddrInst->getOperand(1), Depth+1)) | |||
3996 | return true; | |||
3997 | ||||
3998 | // Otherwise we definitely can't merge the ADD in. | |||
3999 | AddrMode = BackupAddrMode; | |||
4000 | AddrModeInsts.resize(OldSize); | |||
4001 | TPT.rollback(LastKnownGood); | |||
4002 | break; | |||
4003 | } | |||
4004 | //case Instruction::Or: | |||
4005 | // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD. | |||
4006 | //break; | |||
4007 | case Instruction::Mul: | |||
4008 | case Instruction::Shl: { | |||
4009 | // Can only handle X*C and X << C. | |||
4010 | ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1)); | |||
4011 | if (!RHS) | |||
4012 | return false; | |||
4013 | int64_t Scale = RHS->getSExtValue(); | |||
4014 | if (Opcode == Instruction::Shl) | |||
4015 | Scale = 1LL << Scale; | |||
4016 | ||||
4017 | return matchScaledValue(AddrInst->getOperand(0), Scale, Depth); | |||
4018 | } | |||
4019 | case Instruction::GetElementPtr: { | |||
4020 | // Scan the GEP. We check it if it contains constant offsets and at most | |||
4021 | // one variable offset. | |||
4022 | int VariableOperand = -1; | |||
4023 | unsigned VariableScale = 0; | |||
4024 | ||||
4025 | int64_t ConstantOffset = 0; | |||
4026 | gep_type_iterator GTI = gep_type_begin(AddrInst); | |||
4027 | for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { | |||
4028 | if (StructType *STy = GTI.getStructTypeOrNull()) { | |||
4029 | const StructLayout *SL = DL.getStructLayout(STy); | |||
4030 | unsigned Idx = | |||
4031 | cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue(); | |||
4032 | ConstantOffset += SL->getElementOffset(Idx); | |||
4033 | } else { | |||
4034 | uint64_t TypeSize = DL.getTypeAllocSize(GTI.getIndexedType()); | |||
4035 | if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) { | |||
4036 | ConstantOffset += CI->getSExtValue()*TypeSize; | |||
4037 | } else if (TypeSize) { // Scales of zero don't do anything. | |||
4038 | // We only allow one variable index at the moment. | |||
4039 | if (VariableOperand != -1) | |||
4040 | return false; | |||
4041 | ||||
4042 | // Remember the variable index. | |||
4043 | VariableOperand = i; | |||
4044 | VariableScale = TypeSize; | |||
4045 | } | |||
4046 | } | |||
4047 | } | |||
4048 | ||||
4049 | // A common case is for the GEP to only do a constant offset. In this case, | |||
4050 | // just add it to the disp field and check validity. | |||
4051 | if (VariableOperand == -1) { | |||
4052 | AddrMode.BaseOffs += ConstantOffset; | |||
4053 | if (ConstantOffset == 0 || | |||
4054 | TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) { | |||
4055 | // Check to see if we can fold the base pointer in too. | |||
4056 | if (matchAddr(AddrInst->getOperand(0), Depth+1)) | |||
4057 | return true; | |||
4058 | } | |||
4059 | AddrMode.BaseOffs -= ConstantOffset; | |||
4060 | return false; | |||
4061 | } | |||
4062 | ||||
4063 | // Save the valid addressing mode in case we can't match. | |||
4064 | ExtAddrMode BackupAddrMode = AddrMode; | |||
4065 | unsigned OldSize = AddrModeInsts.size(); | |||
4066 | ||||
4067 | // See if the scale and offset amount is valid for this target. | |||
4068 | AddrMode.BaseOffs += ConstantOffset; | |||
4069 | ||||
4070 | // Match the base operand of the GEP. | |||
4071 | if (!matchAddr(AddrInst->getOperand(0), Depth+1)) { | |||
4072 | // If it couldn't be matched, just stuff the value in a register. | |||
4073 | if (AddrMode.HasBaseReg) { | |||
4074 | AddrMode = BackupAddrMode; | |||
4075 | AddrModeInsts.resize(OldSize); | |||
4076 | return false; | |||
4077 | } | |||
4078 | AddrMode.HasBaseReg = true; | |||
4079 | AddrMode.BaseReg = AddrInst->getOperand(0); | |||
4080 | } | |||
4081 | ||||
4082 | // Match the remaining variable portion of the GEP. | |||
4083 | if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale, | |||
4084 | Depth)) { | |||
4085 | // If it couldn't be matched, try stuffing the base into a register | |||
4086 | // instead of matching it, and retrying the match of the scale. | |||
4087 | AddrMode = BackupAddrMode; | |||
4088 | AddrModeInsts.resize(OldSize); | |||
4089 | if (AddrMode.HasBaseReg) | |||
4090 | return false; | |||
4091 | AddrMode.HasBaseReg = true; | |||
4092 | AddrMode.BaseReg = AddrInst->getOperand(0); | |||
4093 | AddrMode.BaseOffs += ConstantOffset; | |||
4094 | if (!matchScaledValue(AddrInst->getOperand(VariableOperand), | |||
4095 | VariableScale, Depth)) { | |||
4096 | // If even that didn't work, bail. | |||
4097 | AddrMode = BackupAddrMode; | |||
4098 | AddrModeInsts.resize(OldSize); | |||
4099 | return false; | |||
4100 | } | |||
4101 | } | |||
4102 | ||||
4103 | return true; | |||
4104 | } | |||
4105 | case Instruction::SExt: | |||
4106 | case Instruction::ZExt: { | |||
4107 | Instruction *Ext = dyn_cast<Instruction>(AddrInst); | |||
4108 | if (!Ext) | |||
4109 | return false; | |||
4110 | ||||
4111 | // Try to move this ext out of the way of the addressing mode. | |||
4112 | // Ask for a method for doing so. | |||
4113 | TypePromotionHelper::Action TPH = | |||
4114 | TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts); | |||
4115 | if (!TPH) | |||
4116 | return false; | |||
4117 | ||||
4118 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
4119 | TPT.getRestorationPoint(); | |||
4120 | unsigned CreatedInstsCost = 0; | |||
4121 | unsigned ExtCost = !TLI.isExtFree(Ext); | |||
4122 | Value *PromotedOperand = | |||
4123 | TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI); | |||
4124 | // SExt has been moved away. | |||
4125 | // Thus either it will be rematched later in the recursive calls or it is | |||
4126 | // gone. Anyway, we must not fold it into the addressing mode at this point. | |||
4127 | // E.g., | |||
4128 | // op = add opnd, 1 | |||
4129 | // idx = ext op | |||
4130 | // addr = gep base, idx | |||
4131 | // is now: | |||
4132 | // promotedOpnd = ext opnd <- no match here | |||
4133 | // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls) | |||
4134 | // addr = gep base, op <- match | |||
4135 | if (MovedAway) | |||
4136 | *MovedAway = true; | |||
4137 | ||||
4138 | assert(PromotedOperand &&((PromotedOperand && "TypePromotionHelper should have filtered out those cases" ) ? static_cast<void> (0) : __assert_fail ("PromotedOperand && \"TypePromotionHelper should have filtered out those cases\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 4139, __PRETTY_FUNCTION__)) | |||
4139 | "TypePromotionHelper should have filtered out those cases")((PromotedOperand && "TypePromotionHelper should have filtered out those cases" ) ? static_cast<void> (0) : __assert_fail ("PromotedOperand && \"TypePromotionHelper should have filtered out those cases\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 4139, __PRETTY_FUNCTION__)); | |||
4140 | ||||
4141 | ExtAddrMode BackupAddrMode = AddrMode; | |||
4142 | unsigned OldSize = AddrModeInsts.size(); | |||
4143 | ||||
4144 | if (!matchAddr(PromotedOperand, Depth) || | |||
4145 | // The total of the new cost is equal to the cost of the created | |||
4146 | // instructions. | |||
4147 | // The total of the old cost is equal to the cost of the extension plus | |||
4148 | // what we have saved in the addressing mode. | |||
4149 | !isPromotionProfitable(CreatedInstsCost, | |||
4150 | ExtCost + (AddrModeInsts.size() - OldSize), | |||
4151 | PromotedOperand)) { | |||
4152 | AddrMode = BackupAddrMode; | |||
4153 | AddrModeInsts.resize(OldSize); | |||
4154 | DEBUG(dbgs() << "Sign extension does not pay off: rollback\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Sign extension does not pay off: rollback\n" ; } } while (false); | |||
4155 | TPT.rollback(LastKnownGood); | |||
4156 | return false; | |||
4157 | } | |||
4158 | return true; | |||
4159 | } | |||
4160 | } | |||
4161 | return false; | |||
4162 | } | |||
4163 | ||||
4164 | /// If we can, try to add the value of 'Addr' into the current addressing mode. | |||
4165 | /// If Addr can't be added to AddrMode this returns false and leaves AddrMode | |||
4166 | /// unmodified. This assumes that Addr is either a pointer type or intptr_t | |||
4167 | /// for the target. | |||
4168 | /// | |||
4169 | bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) { | |||
4170 | // Start a transaction at this point that we will rollback if the matching | |||
4171 | // fails. | |||
4172 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
4173 | TPT.getRestorationPoint(); | |||
4174 | if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) { | |||
4175 | // Fold in immediates if legal for the target. | |||
4176 | AddrMode.BaseOffs += CI->getSExtValue(); | |||
4177 | if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) | |||
4178 | return true; | |||
4179 | AddrMode.BaseOffs -= CI->getSExtValue(); | |||
4180 | } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) { | |||
4181 | // If this is a global variable, try to fold it into the addressing mode. | |||
4182 | if (!AddrMode.BaseGV) { | |||
4183 | AddrMode.BaseGV = GV; | |||
4184 | if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) | |||
4185 | return true; | |||
4186 | AddrMode.BaseGV = nullptr; | |||
4187 | } | |||
4188 | } else if (Instruction *I = dyn_cast<Instruction>(Addr)) { | |||
4189 | ExtAddrMode BackupAddrMode = AddrMode; | |||
4190 | unsigned OldSize = AddrModeInsts.size(); | |||
4191 | ||||
4192 | // Check to see if it is possible to fold this operation. | |||
4193 | bool MovedAway = false; | |||
4194 | if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) { | |||
4195 | // This instruction may have been moved away. If so, there is nothing | |||
4196 | // to check here. | |||
4197 | if (MovedAway) | |||
4198 | return true; | |||
4199 | // Okay, it's possible to fold this. Check to see if it is actually | |||
4200 | // *profitable* to do so. We use a simple cost model to avoid increasing | |||
4201 | // register pressure too much. | |||
4202 | if (I->hasOneUse() || | |||
4203 | isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) { | |||
4204 | AddrModeInsts.push_back(I); | |||
4205 | return true; | |||
4206 | } | |||
4207 | ||||
4208 | // It isn't profitable to do this, roll back. | |||
4209 | //cerr << "NOT FOLDING: " << *I; | |||
4210 | AddrMode = BackupAddrMode; | |||
4211 | AddrModeInsts.resize(OldSize); | |||
4212 | TPT.rollback(LastKnownGood); | |||
4213 | } | |||
4214 | } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) { | |||
4215 | if (matchOperationAddr(CE, CE->getOpcode(), Depth)) | |||
4216 | return true; | |||
4217 | TPT.rollback(LastKnownGood); | |||
4218 | } else if (isa<ConstantPointerNull>(Addr)) { | |||
4219 | // Null pointer gets folded without affecting the addressing mode. | |||
4220 | return true; | |||
4221 | } | |||
4222 | ||||
4223 | // Worse case, the target should support [reg] addressing modes. :) | |||
4224 | if (!AddrMode.HasBaseReg) { | |||
4225 | AddrMode.HasBaseReg = true; | |||
4226 | AddrMode.BaseReg = Addr; | |||
4227 | // Still check for legality in case the target supports [imm] but not [i+r]. | |||
4228 | if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) | |||
4229 | return true; | |||
4230 | AddrMode.HasBaseReg = false; | |||
4231 | AddrMode.BaseReg = nullptr; | |||
4232 | } | |||
4233 | ||||
4234 | // If the base register is already taken, see if we can do [r+r]. | |||
4235 | if (AddrMode.Scale == 0) { | |||
4236 | AddrMode.Scale = 1; | |||
4237 | AddrMode.ScaledReg = Addr; | |||
4238 | if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) | |||
4239 | return true; | |||
4240 | AddrMode.Scale = 0; | |||
4241 | AddrMode.ScaledReg = nullptr; | |||
4242 | } | |||
4243 | // Couldn't match. | |||
4244 | TPT.rollback(LastKnownGood); | |||
4245 | return false; | |||
4246 | } | |||
4247 | ||||
4248 | /// Check to see if all uses of OpVal by the specified inline asm call are due | |||
4249 | /// to memory operands. If so, return true, otherwise return false. | |||
4250 | static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal, | |||
4251 | const TargetLowering &TLI, | |||
4252 | const TargetRegisterInfo &TRI) { | |||
4253 | const Function *F = CI->getFunction(); | |||
4254 | TargetLowering::AsmOperandInfoVector TargetConstraints = | |||
4255 | TLI.ParseConstraints(F->getParent()->getDataLayout(), &TRI, | |||
4256 | ImmutableCallSite(CI)); | |||
4257 | ||||
4258 | for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { | |||
4259 | TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; | |||
4260 | ||||
4261 | // Compute the constraint code and ConstraintType to use. | |||
4262 | TLI.ComputeConstraintToUse(OpInfo, SDValue()); | |||
4263 | ||||
4264 | // If this asm operand is our Value*, and if it isn't an indirect memory | |||
4265 | // operand, we can't fold it! | |||
4266 | if (OpInfo.CallOperandVal == OpVal && | |||
4267 | (OpInfo.ConstraintType != TargetLowering::C_Memory || | |||
4268 | !OpInfo.isIndirect)) | |||
4269 | return false; | |||
4270 | } | |||
4271 | ||||
4272 | return true; | |||
4273 | } | |||
4274 | ||||
4275 | // Max number of memory uses to look at before aborting the search to conserve | |||
4276 | // compile time. | |||
4277 | static constexpr int MaxMemoryUsesToScan = 20; | |||
4278 | ||||
4279 | /// Recursively walk all the uses of I until we find a memory use. | |||
4280 | /// If we find an obviously non-foldable instruction, return true. | |||
4281 | /// Add the ultimately found memory instructions to MemoryUses. | |||
4282 | static bool FindAllMemoryUses( | |||
4283 | Instruction *I, | |||
4284 | SmallVectorImpl<std::pair<Instruction *, unsigned>> &MemoryUses, | |||
4285 | SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI, | |||
4286 | const TargetRegisterInfo &TRI, int SeenInsts = 0) { | |||
4287 | // If we already considered this instruction, we're done. | |||
4288 | if (!ConsideredInsts.insert(I).second) | |||
4289 | return false; | |||
4290 | ||||
4291 | // If this is an obviously unfoldable instruction, bail out. | |||
4292 | if (!MightBeFoldableInst(I)) | |||
4293 | return true; | |||
4294 | ||||
4295 | const bool OptSize = I->getFunction()->optForSize(); | |||
4296 | ||||
4297 | // Loop over all the uses, recursively processing them. | |||
4298 | for (Use &U : I->uses()) { | |||
4299 | // Conservatively return true if we're seeing a large number or a deep chain | |||
4300 | // of users. This avoids excessive compilation times in pathological cases. | |||
4301 | if (SeenInsts++ >= MaxMemoryUsesToScan) | |||
4302 | return true; | |||
4303 | ||||
4304 | Instruction *UserI = cast<Instruction>(U.getUser()); | |||
4305 | if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) { | |||
4306 | MemoryUses.push_back(std::make_pair(LI, U.getOperandNo())); | |||
4307 | continue; | |||
4308 | } | |||
4309 | ||||
4310 | if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) { | |||
4311 | unsigned opNo = U.getOperandNo(); | |||
4312 | if (opNo != StoreInst::getPointerOperandIndex()) | |||
4313 | return true; // Storing addr, not into addr. | |||
4314 | MemoryUses.push_back(std::make_pair(SI, opNo)); | |||
4315 | continue; | |||
4316 | } | |||
4317 | ||||
4318 | if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) { | |||
4319 | unsigned opNo = U.getOperandNo(); | |||
4320 | if (opNo != AtomicRMWInst::getPointerOperandIndex()) | |||
4321 | return true; // Storing addr, not into addr. | |||
4322 | MemoryUses.push_back(std::make_pair(RMW, opNo)); | |||
4323 | continue; | |||
4324 | } | |||
4325 | ||||
4326 | if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) { | |||
4327 | unsigned opNo = U.getOperandNo(); | |||
4328 | if (opNo != AtomicCmpXchgInst::getPointerOperandIndex()) | |||
4329 | return true; // Storing addr, not into addr. | |||
4330 | MemoryUses.push_back(std::make_pair(CmpX, opNo)); | |||
4331 | continue; | |||
4332 | } | |||
4333 | ||||
4334 | if (CallInst *CI = dyn_cast<CallInst>(UserI)) { | |||
4335 | // If this is a cold call, we can sink the addressing calculation into | |||
4336 | // the cold path. See optimizeCallInst | |||
4337 | if (!OptSize && CI->hasFnAttr(Attribute::Cold)) | |||
4338 | continue; | |||
4339 | ||||
4340 | InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue()); | |||
4341 | if (!IA) return true; | |||
4342 | ||||
4343 | // If this is a memory operand, we're cool, otherwise bail out. | |||
4344 | if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI)) | |||
4345 | return true; | |||
4346 | continue; | |||
4347 | } | |||
4348 | ||||
4349 | if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI, | |||
4350 | SeenInsts)) | |||
4351 | return true; | |||
4352 | } | |||
4353 | ||||
4354 | return false; | |||
4355 | } | |||
4356 | ||||
4357 | /// Return true if Val is already known to be live at the use site that we're | |||
4358 | /// folding it into. If so, there is no cost to include it in the addressing | |||
4359 | /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the | |||
4360 | /// instruction already. | |||
4361 | bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,Value *KnownLive1, | |||
4362 | Value *KnownLive2) { | |||
4363 | // If Val is either of the known-live values, we know it is live! | |||
4364 | if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2) | |||
4365 | return true; | |||
4366 | ||||
4367 | // All values other than instructions and arguments (e.g. constants) are live. | |||
4368 | if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true; | |||
4369 | ||||
4370 | // If Val is a constant sized alloca in the entry block, it is live, this is | |||
4371 | // true because it is just a reference to the stack/frame pointer, which is | |||
4372 | // live for the whole function. | |||
4373 | if (AllocaInst *AI = dyn_cast<AllocaInst>(Val)) | |||
4374 | if (AI->isStaticAlloca()) | |||
4375 | return true; | |||
4376 | ||||
4377 | // Check to see if this value is already used in the memory instruction's | |||
4378 | // block. If so, it's already live into the block at the very least, so we | |||
4379 | // can reasonably fold it. | |||
4380 | return Val->isUsedInBasicBlock(MemoryInst->getParent()); | |||
4381 | } | |||
4382 | ||||
4383 | /// It is possible for the addressing mode of the machine to fold the specified | |||
4384 | /// instruction into a load or store that ultimately uses it. | |||
4385 | /// However, the specified instruction has multiple uses. | |||
4386 | /// Given this, it may actually increase register pressure to fold it | |||
4387 | /// into the load. For example, consider this code: | |||
4388 | /// | |||
4389 | /// X = ... | |||
4390 | /// Y = X+1 | |||
4391 | /// use(Y) -> nonload/store | |||
4392 | /// Z = Y+1 | |||
4393 | /// load Z | |||
4394 | /// | |||
4395 | /// In this case, Y has multiple uses, and can be folded into the load of Z | |||
4396 | /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to | |||
4397 | /// be live at the use(Y) line. If we don't fold Y into load Z, we use one | |||
4398 | /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the | |||
4399 | /// number of computations either. | |||
4400 | /// | |||
4401 | /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If | |||
4402 | /// X was live across 'load Z' for other reasons, we actually *would* want to | |||
4403 | /// fold the addressing mode in the Z case. This would make Y die earlier. | |||
4404 | bool AddressingModeMatcher:: | |||
4405 | isProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore, | |||
4406 | ExtAddrMode &AMAfter) { | |||
4407 | if (IgnoreProfitability) return true; | |||
4408 | ||||
4409 | // AMBefore is the addressing mode before this instruction was folded into it, | |||
4410 | // and AMAfter is the addressing mode after the instruction was folded. Get | |||
4411 | // the set of registers referenced by AMAfter and subtract out those | |||
4412 | // referenced by AMBefore: this is the set of values which folding in this | |||
4413 | // address extends the lifetime of. | |||
4414 | // | |||
4415 | // Note that there are only two potential values being referenced here, | |||
4416 | // BaseReg and ScaleReg (global addresses are always available, as are any | |||
4417 | // folded immediates). | |||
4418 | Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg; | |||
4419 | ||||
4420 | // If the BaseReg or ScaledReg was referenced by the previous addrmode, their | |||
4421 | // lifetime wasn't extended by adding this instruction. | |||
4422 | if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg)) | |||
4423 | BaseReg = nullptr; | |||
4424 | if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg)) | |||
4425 | ScaledReg = nullptr; | |||
4426 | ||||
4427 | // If folding this instruction (and it's subexprs) didn't extend any live | |||
4428 | // ranges, we're ok with it. | |||
4429 | if (!BaseReg && !ScaledReg) | |||
4430 | return true; | |||
4431 | ||||
4432 | // If all uses of this instruction can have the address mode sunk into them, | |||
4433 | // we can remove the addressing mode and effectively trade one live register | |||
4434 | // for another (at worst.) In this context, folding an addressing mode into | |||
4435 | // the use is just a particularly nice way of sinking it. | |||
4436 | SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses; | |||
4437 | SmallPtrSet<Instruction*, 16> ConsideredInsts; | |||
4438 | if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI)) | |||
4439 | return false; // Has a non-memory, non-foldable use! | |||
4440 | ||||
4441 | // Now that we know that all uses of this instruction are part of a chain of | |||
4442 | // computation involving only operations that could theoretically be folded | |||
4443 | // into a memory use, loop over each of these memory operation uses and see | |||
4444 | // if they could *actually* fold the instruction. The assumption is that | |||
4445 | // addressing modes are cheap and that duplicating the computation involved | |||
4446 | // many times is worthwhile, even on a fastpath. For sinking candidates | |||
4447 | // (i.e. cold call sites), this serves as a way to prevent excessive code | |||
4448 | // growth since most architectures have some reasonable small and fast way to | |||
4449 | // compute an effective address. (i.e LEA on x86) | |||
4450 | SmallVector<Instruction*, 32> MatchedAddrModeInsts; | |||
4451 | for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) { | |||
4452 | Instruction *User = MemoryUses[i].first; | |||
4453 | unsigned OpNo = MemoryUses[i].second; | |||
4454 | ||||
4455 | // Get the access type of this use. If the use isn't a pointer, we don't | |||
4456 | // know what it accesses. | |||
4457 | Value *Address = User->getOperand(OpNo); | |||
4458 | PointerType *AddrTy = dyn_cast<PointerType>(Address->getType()); | |||
4459 | if (!AddrTy) | |||
4460 | return false; | |||
4461 | Type *AddressAccessTy = AddrTy->getElementType(); | |||
4462 | unsigned AS = AddrTy->getAddressSpace(); | |||
4463 | ||||
4464 | // Do a match against the root of this address, ignoring profitability. This | |||
4465 | // will tell us if the addressing mode for the memory operation will | |||
4466 | // *actually* cover the shared instruction. | |||
4467 | ExtAddrMode Result; | |||
4468 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
4469 | TPT.getRestorationPoint(); | |||
4470 | AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, TRI, | |||
4471 | AddressAccessTy, AS, | |||
4472 | MemoryInst, Result, InsertedInsts, | |||
4473 | PromotedInsts, TPT); | |||
4474 | Matcher.IgnoreProfitability = true; | |||
4475 | bool Success = Matcher.matchAddr(Address, 0); | |||
4476 | (void)Success; assert(Success && "Couldn't select *anything*?")((Success && "Couldn't select *anything*?") ? static_cast <void> (0) : __assert_fail ("Success && \"Couldn't select *anything*?\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 4476, __PRETTY_FUNCTION__)); | |||
4477 | ||||
4478 | // The match was to check the profitability, the changes made are not | |||
4479 | // part of the original matcher. Therefore, they should be dropped | |||
4480 | // otherwise the original matcher will not present the right state. | |||
4481 | TPT.rollback(LastKnownGood); | |||
4482 | ||||
4483 | // If the match didn't cover I, then it won't be shared by it. | |||
4484 | if (!is_contained(MatchedAddrModeInsts, I)) | |||
4485 | return false; | |||
4486 | ||||
4487 | MatchedAddrModeInsts.clear(); | |||
4488 | } | |||
4489 | ||||
4490 | return true; | |||
4491 | } | |||
4492 | ||||
4493 | /// Return true if the specified values are defined in a | |||
4494 | /// different basic block than BB. | |||
4495 | static bool IsNonLocalValue(Value *V, BasicBlock *BB) { | |||
4496 | if (Instruction *I = dyn_cast<Instruction>(V)) | |||
4497 | return I->getParent() != BB; | |||
4498 | return false; | |||
4499 | } | |||
4500 | ||||
4501 | /// Sink addressing mode computation immediate before MemoryInst if doing so | |||
4502 | /// can be done without increasing register pressure. The need for the | |||
4503 | /// register pressure constraint means this can end up being an all or nothing | |||
4504 | /// decision for all uses of the same addressing computation. | |||
4505 | /// | |||
4506 | /// Load and Store Instructions often have addressing modes that can do | |||
4507 | /// significant amounts of computation. As such, instruction selection will try | |||
4508 | /// to get the load or store to do as much computation as possible for the | |||
4509 | /// program. The problem is that isel can only see within a single block. As | |||
4510 | /// such, we sink as much legal addressing mode work into the block as possible. | |||
4511 | /// | |||
4512 | /// This method is used to optimize both load/store and inline asms with memory | |||
4513 | /// operands. It's also used to sink addressing computations feeding into cold | |||
4514 | /// call sites into their (cold) basic block. | |||
4515 | /// | |||
4516 | /// The motivation for handling sinking into cold blocks is that doing so can | |||
4517 | /// both enable other address mode sinking (by satisfying the register pressure | |||
4518 | /// constraint above), and reduce register pressure globally (by removing the | |||
4519 | /// addressing mode computation from the fast path entirely.). | |||
4520 | bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, | |||
4521 | Type *AccessTy, unsigned AddrSpace) { | |||
4522 | Value *Repl = Addr; | |||
4523 | ||||
4524 | // Try to collapse single-value PHI nodes. This is necessary to undo | |||
4525 | // unprofitable PRE transformations. | |||
4526 | SmallVector<Value*, 8> worklist; | |||
4527 | SmallPtrSet<Value*, 16> Visited; | |||
4528 | worklist.push_back(Addr); | |||
4529 | ||||
4530 | // Use a worklist to iteratively look through PHI and select nodes, and | |||
4531 | // ensure that the addressing mode obtained from the non-PHI/select roots of | |||
4532 | // the graph are compatible. | |||
4533 | bool PhiOrSelectSeen = false; | |||
4534 | SmallVector<Instruction*, 16> AddrModeInsts; | |||
4535 | AddressingModeCombiner AddrModes; | |||
4536 | TypePromotionTransaction TPT(RemovedInsts); | |||
4537 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
4538 | TPT.getRestorationPoint(); | |||
4539 | while (!worklist.empty()) { | |||
4540 | Value *V = worklist.back(); | |||
4541 | worklist.pop_back(); | |||
4542 | ||||
4543 | // We allow traversing cyclic Phi nodes. | |||
4544 | // In case of success after this loop we ensure that traversing through | |||
4545 | // Phi nodes ends up with all cases to compute address of the form | |||
4546 | // BaseGV + Base + Scale * Index + Offset | |||
4547 | // where Scale and Offset are constans and BaseGV, Base and Index | |||
4548 | // are exactly the same Values in all cases. | |||
4549 | // It means that BaseGV, Scale and Offset dominate our memory instruction | |||
4550 | // and have the same value as they had in address computation represented | |||
4551 | // as Phi. So we can safely sink address computation to memory instruction. | |||
4552 | if (!Visited.insert(V).second) | |||
4553 | continue; | |||
4554 | ||||
4555 | // For a PHI node, push all of its incoming values. | |||
4556 | if (PHINode *P = dyn_cast<PHINode>(V)) { | |||
4557 | for (Value *IncValue : P->incoming_values()) | |||
4558 | worklist.push_back(IncValue); | |||
4559 | PhiOrSelectSeen = true; | |||
4560 | continue; | |||
4561 | } | |||
4562 | // Similar for select. | |||
4563 | if (SelectInst *SI = dyn_cast<SelectInst>(V)) { | |||
4564 | worklist.push_back(SI->getFalseValue()); | |||
4565 | worklist.push_back(SI->getTrueValue()); | |||
4566 | PhiOrSelectSeen = true; | |||
4567 | continue; | |||
4568 | } | |||
4569 | ||||
4570 | // For non-PHIs, determine the addressing mode being computed. Note that | |||
4571 | // the result may differ depending on what other uses our candidate | |||
4572 | // addressing instructions might have. | |||
4573 | AddrModeInsts.clear(); | |||
4574 | ExtAddrMode NewAddrMode = AddressingModeMatcher::Match( | |||
4575 | V, AccessTy, AddrSpace, MemoryInst, AddrModeInsts, *TLI, *TRI, | |||
4576 | InsertedInsts, PromotedInsts, TPT); | |||
4577 | NewAddrMode.OriginalValue = V; | |||
4578 | ||||
4579 | if (!AddrModes.addNewAddrMode(NewAddrMode)) | |||
4580 | break; | |||
4581 | } | |||
4582 | ||||
4583 | // Try to combine the AddrModes we've collected. If we couldn't collect any, | |||
4584 | // or we have multiple but either couldn't combine them or combining them | |||
4585 | // wouldn't do anything useful, bail out now. | |||
4586 | if (!AddrModes.combineAddrModes()) { | |||
4587 | TPT.rollback(LastKnownGood); | |||
4588 | return false; | |||
4589 | } | |||
4590 | TPT.commit(); | |||
4591 | ||||
4592 | // Get the combined AddrMode (or the only AddrMode, if we only had one). | |||
4593 | ExtAddrMode AddrMode = AddrModes.getAddrMode(); | |||
4594 | ||||
4595 | // If all the instructions matched are already in this BB, don't do anything. | |||
4596 | // If we saw a Phi node then it is not local definitely, and if we saw a select | |||
4597 | // then we want to push the address calculation past it even if it's already | |||
4598 | // in this BB. | |||
4599 | if (!PhiOrSelectSeen && none_of(AddrModeInsts, [&](Value *V) { | |||
4600 | return IsNonLocalValue(V, MemoryInst->getParent()); | |||
4601 | })) { | |||
4602 | DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n"; } } while (false); | |||
4603 | return false; | |||
4604 | } | |||
4605 | ||||
4606 | // Insert this computation right after this user. Since our caller is | |||
4607 | // scanning from the top of the BB to the bottom, reuse of the expr are | |||
4608 | // guaranteed to happen later. | |||
4609 | IRBuilder<> Builder(MemoryInst); | |||
4610 | ||||
4611 | // Now that we determined the addressing expression we want to use and know | |||
4612 | // that we have to sink it into this block. Check to see if we have already | |||
4613 | // done this for some other load/store instr in this block. If so, reuse the | |||
4614 | // computation. | |||
4615 | Value *&SunkAddr = SunkAddrs[Addr]; | |||
4616 | if (SunkAddr) { | |||
4617 | DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (false) | |||
4618 | << *MemoryInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (false); | |||
4619 | if (SunkAddr->getType() != Addr->getType()) | |||
4620 | SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); | |||
4621 | } else if (AddrSinkUsingGEPs || | |||
4622 | (!AddrSinkUsingGEPs.getNumOccurrences() && TM && | |||
4623 | SubtargetInfo->useAA())) { | |||
4624 | // By default, we use the GEP-based method when AA is used later. This | |||
4625 | // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities. | |||
4626 | DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (false) | |||
4627 | << *MemoryInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (false); | |||
4628 | Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); | |||
4629 | Value *ResultPtr = nullptr, *ResultIndex = nullptr; | |||
4630 | ||||
4631 | // First, find the pointer. | |||
4632 | if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) { | |||
4633 | ResultPtr = AddrMode.BaseReg; | |||
4634 | AddrMode.BaseReg = nullptr; | |||
4635 | } | |||
4636 | ||||
4637 | if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) { | |||
4638 | // We can't add more than one pointer together, nor can we scale a | |||
4639 | // pointer (both of which seem meaningless). | |||
4640 | if (ResultPtr || AddrMode.Scale != 1) | |||
4641 | return false; | |||
4642 | ||||
4643 | ResultPtr = AddrMode.ScaledReg; | |||
4644 | AddrMode.Scale = 0; | |||
4645 | } | |||
4646 | ||||
4647 | // It is only safe to sign extend the BaseReg if we know that the math | |||
4648 | // required to create it did not overflow before we extend it. Since | |||
4649 | // the original IR value was tossed in favor of a constant back when | |||
4650 | // the AddrMode was created we need to bail out gracefully if widths | |||
4651 | // do not match instead of extending it. | |||
4652 | // | |||
4653 | // (See below for code to add the scale.) | |||
4654 | if (AddrMode.Scale) { | |||
4655 | Type *ScaledRegTy = AddrMode.ScaledReg->getType(); | |||
4656 | if (cast<IntegerType>(IntPtrTy)->getBitWidth() > | |||
4657 | cast<IntegerType>(ScaledRegTy)->getBitWidth()) | |||
4658 | return false; | |||
4659 | } | |||
4660 | ||||
4661 | if (AddrMode.BaseGV) { | |||
4662 | if (ResultPtr) | |||
4663 | return false; | |||
4664 | ||||
4665 | ResultPtr = AddrMode.BaseGV; | |||
4666 | } | |||
4667 | ||||
4668 | // If the real base value actually came from an inttoptr, then the matcher | |||
4669 | // will look through it and provide only the integer value. In that case, | |||
4670 | // use it here. | |||
4671 | if (!DL->isNonIntegralPointerType(Addr->getType())) { | |||
4672 | if (!ResultPtr && AddrMode.BaseReg) { | |||
4673 | ResultPtr = Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), | |||
4674 | "sunkaddr"); | |||
4675 | AddrMode.BaseReg = nullptr; | |||
4676 | } else if (!ResultPtr && AddrMode.Scale == 1) { | |||
4677 | ResultPtr = Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), | |||
4678 | "sunkaddr"); | |||
4679 | AddrMode.Scale = 0; | |||
4680 | } | |||
4681 | } | |||
4682 | ||||
4683 | if (!ResultPtr && | |||
4684 | !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) { | |||
4685 | SunkAddr = Constant::getNullValue(Addr->getType()); | |||
4686 | } else if (!ResultPtr) { | |||
4687 | return false; | |||
4688 | } else { | |||
4689 | Type *I8PtrTy = | |||
4690 | Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace()); | |||
4691 | Type *I8Ty = Builder.getInt8Ty(); | |||
4692 | ||||
4693 | // Start with the base register. Do this first so that subsequent address | |||
4694 | // matching finds it last, which will prevent it from trying to match it | |||
4695 | // as the scaled value in case it happens to be a mul. That would be | |||
4696 | // problematic if we've sunk a different mul for the scale, because then | |||
4697 | // we'd end up sinking both muls. | |||
4698 | if (AddrMode.BaseReg) { | |||
4699 | Value *V = AddrMode.BaseReg; | |||
4700 | if (V->getType() != IntPtrTy) | |||
4701 | V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); | |||
4702 | ||||
4703 | ResultIndex = V; | |||
4704 | } | |||
4705 | ||||
4706 | // Add the scale value. | |||
4707 | if (AddrMode.Scale) { | |||
4708 | Value *V = AddrMode.ScaledReg; | |||
4709 | if (V->getType() == IntPtrTy) { | |||
4710 | // done. | |||
4711 | } else { | |||
4712 | assert(cast<IntegerType>(IntPtrTy)->getBitWidth() <((cast<IntegerType>(IntPtrTy)->getBitWidth() < cast <IntegerType>(V->getType())->getBitWidth() && "We can't transform if ScaledReg is too narrow") ? static_cast <void> (0) : __assert_fail ("cast<IntegerType>(IntPtrTy)->getBitWidth() < cast<IntegerType>(V->getType())->getBitWidth() && \"We can't transform if ScaledReg is too narrow\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 4714, __PRETTY_FUNCTION__)) | |||
4713 | cast<IntegerType>(V->getType())->getBitWidth() &&((cast<IntegerType>(IntPtrTy)->getBitWidth() < cast <IntegerType>(V->getType())->getBitWidth() && "We can't transform if ScaledReg is too narrow") ? static_cast <void> (0) : __assert_fail ("cast<IntegerType>(IntPtrTy)->getBitWidth() < cast<IntegerType>(V->getType())->getBitWidth() && \"We can't transform if ScaledReg is too narrow\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 4714, __PRETTY_FUNCTION__)) | |||
4714 | "We can't transform if ScaledReg is too narrow")((cast<IntegerType>(IntPtrTy)->getBitWidth() < cast <IntegerType>(V->getType())->getBitWidth() && "We can't transform if ScaledReg is too narrow") ? static_cast <void> (0) : __assert_fail ("cast<IntegerType>(IntPtrTy)->getBitWidth() < cast<IntegerType>(V->getType())->getBitWidth() && \"We can't transform if ScaledReg is too narrow\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 4714, __PRETTY_FUNCTION__)); | |||
4715 | V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); | |||
4716 | } | |||
4717 | ||||
4718 | if (AddrMode.Scale != 1) | |||
4719 | V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), | |||
4720 | "sunkaddr"); | |||
4721 | if (ResultIndex) | |||
4722 | ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr"); | |||
4723 | else | |||
4724 | ResultIndex = V; | |||
4725 | } | |||
4726 | ||||
4727 | // Add in the Base Offset if present. | |||
4728 | if (AddrMode.BaseOffs) { | |||
4729 | Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); | |||
4730 | if (ResultIndex) { | |||
4731 | // We need to add this separately from the scale above to help with | |||
4732 | // SDAG consecutive load/store merging. | |||
4733 | if (ResultPtr->getType() != I8PtrTy) | |||
4734 | ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); | |||
4735 | ResultPtr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); | |||
4736 | } | |||
4737 | ||||
4738 | ResultIndex = V; | |||
4739 | } | |||
4740 | ||||
4741 | if (!ResultIndex) { | |||
4742 | SunkAddr = ResultPtr; | |||
4743 | } else { | |||
4744 | if (ResultPtr->getType() != I8PtrTy) | |||
4745 | ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy); | |||
4746 | SunkAddr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr"); | |||
4747 | } | |||
4748 | ||||
4749 | if (SunkAddr->getType() != Addr->getType()) | |||
4750 | SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType()); | |||
4751 | } | |||
4752 | } else { | |||
4753 | // We'd require a ptrtoint/inttoptr down the line, which we can't do for | |||
4754 | // non-integral pointers, so in that case bail out now. | |||
4755 | Type *BaseTy = AddrMode.BaseReg ? AddrMode.BaseReg->getType() : nullptr; | |||
4756 | Type *ScaleTy = AddrMode.Scale ? AddrMode.ScaledReg->getType() : nullptr; | |||
4757 | PointerType *BasePtrTy = dyn_cast_or_null<PointerType>(BaseTy); | |||
4758 | PointerType *ScalePtrTy = dyn_cast_or_null<PointerType>(ScaleTy); | |||
4759 | if (DL->isNonIntegralPointerType(Addr->getType()) || | |||
4760 | (BasePtrTy && DL->isNonIntegralPointerType(BasePtrTy)) || | |||
4761 | (ScalePtrTy && DL->isNonIntegralPointerType(ScalePtrTy)) || | |||
4762 | (AddrMode.BaseGV && | |||
4763 | DL->isNonIntegralPointerType(AddrMode.BaseGV->getType()))) | |||
4764 | return false; | |||
4765 | ||||
4766 | DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (false) | |||
4767 | << *MemoryInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " << *MemoryInst << "\n"; } } while (false); | |||
4768 | Type *IntPtrTy = DL->getIntPtrType(Addr->getType()); | |||
4769 | Value *Result = nullptr; | |||
4770 | ||||
4771 | // Start with the base register. Do this first so that subsequent address | |||
4772 | // matching finds it last, which will prevent it from trying to match it | |||
4773 | // as the scaled value in case it happens to be a mul. That would be | |||
4774 | // problematic if we've sunk a different mul for the scale, because then | |||
4775 | // we'd end up sinking both muls. | |||
4776 | if (AddrMode.BaseReg) { | |||
4777 | Value *V = AddrMode.BaseReg; | |||
4778 | if (V->getType()->isPointerTy()) | |||
4779 | V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); | |||
4780 | if (V->getType() != IntPtrTy) | |||
4781 | V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr"); | |||
4782 | Result = V; | |||
4783 | } | |||
4784 | ||||
4785 | // Add the scale value. | |||
4786 | if (AddrMode.Scale) { | |||
4787 | Value *V = AddrMode.ScaledReg; | |||
4788 | if (V->getType() == IntPtrTy) { | |||
4789 | // done. | |||
4790 | } else if (V->getType()->isPointerTy()) { | |||
4791 | V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr"); | |||
4792 | } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() < | |||
4793 | cast<IntegerType>(V->getType())->getBitWidth()) { | |||
4794 | V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr"); | |||
4795 | } else { | |||
4796 | // It is only safe to sign extend the BaseReg if we know that the math | |||
4797 | // required to create it did not overflow before we extend it. Since | |||
4798 | // the original IR value was tossed in favor of a constant back when | |||
4799 | // the AddrMode was created we need to bail out gracefully if widths | |||
4800 | // do not match instead of extending it. | |||
4801 | Instruction *I = dyn_cast_or_null<Instruction>(Result); | |||
4802 | if (I && (Result != AddrMode.BaseReg)) | |||
4803 | I->eraseFromParent(); | |||
4804 | return false; | |||
4805 | } | |||
4806 | if (AddrMode.Scale != 1) | |||
4807 | V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale), | |||
4808 | "sunkaddr"); | |||
4809 | if (Result) | |||
4810 | Result = Builder.CreateAdd(Result, V, "sunkaddr"); | |||
4811 | else | |||
4812 | Result = V; | |||
4813 | } | |||
4814 | ||||
4815 | // Add in the BaseGV if present. | |||
4816 | if (AddrMode.BaseGV) { | |||
4817 | Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr"); | |||
4818 | if (Result) | |||
4819 | Result = Builder.CreateAdd(Result, V, "sunkaddr"); | |||
4820 | else | |||
4821 | Result = V; | |||
4822 | } | |||
4823 | ||||
4824 | // Add in the Base Offset if present. | |||
4825 | if (AddrMode.BaseOffs) { | |||
4826 | Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs); | |||
4827 | if (Result) | |||
4828 | Result = Builder.CreateAdd(Result, V, "sunkaddr"); | |||
4829 | else | |||
4830 | Result = V; | |||
4831 | } | |||
4832 | ||||
4833 | if (!Result) | |||
4834 | SunkAddr = Constant::getNullValue(Addr->getType()); | |||
4835 | else | |||
4836 | SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr"); | |||
4837 | } | |||
4838 | ||||
4839 | MemoryInst->replaceUsesOfWith(Repl, SunkAddr); | |||
4840 | ||||
4841 | // If we have no uses, recursively delete the value and all dead instructions | |||
4842 | // using it. | |||
4843 | if (Repl->use_empty()) { | |||
4844 | // This can cause recursive deletion, which can invalidate our iterator. | |||
4845 | // Use a WeakTrackingVH to hold onto it in case this happens. | |||
4846 | Value *CurValue = &*CurInstIterator; | |||
4847 | WeakTrackingVH IterHandle(CurValue); | |||
4848 | BasicBlock *BB = CurInstIterator->getParent(); | |||
4849 | ||||
4850 | RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo); | |||
4851 | ||||
4852 | if (IterHandle != CurValue) { | |||
4853 | // If the iterator instruction was recursively deleted, start over at the | |||
4854 | // start of the block. | |||
4855 | CurInstIterator = BB->begin(); | |||
4856 | SunkAddrs.clear(); | |||
4857 | } | |||
4858 | } | |||
4859 | ++NumMemoryInsts; | |||
4860 | return true; | |||
4861 | } | |||
4862 | ||||
4863 | /// If there are any memory operands, use OptimizeMemoryInst to sink their | |||
4864 | /// address computing into the block when possible / profitable. | |||
4865 | bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) { | |||
4866 | bool MadeChange = false; | |||
4867 | ||||
4868 | const TargetRegisterInfo *TRI = | |||
4869 | TM->getSubtargetImpl(*CS->getFunction())->getRegisterInfo(); | |||
4870 | TargetLowering::AsmOperandInfoVector TargetConstraints = | |||
4871 | TLI->ParseConstraints(*DL, TRI, CS); | |||
4872 | unsigned ArgNo = 0; | |||
4873 | for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { | |||
4874 | TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; | |||
4875 | ||||
4876 | // Compute the constraint code and ConstraintType to use. | |||
4877 | TLI->ComputeConstraintToUse(OpInfo, SDValue()); | |||
4878 | ||||
4879 | if (OpInfo.ConstraintType == TargetLowering::C_Memory && | |||
4880 | OpInfo.isIndirect) { | |||
4881 | Value *OpVal = CS->getArgOperand(ArgNo++); | |||
4882 | MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u); | |||
4883 | } else if (OpInfo.Type == InlineAsm::isInput) | |||
4884 | ArgNo++; | |||
4885 | } | |||
4886 | ||||
4887 | return MadeChange; | |||
4888 | } | |||
4889 | ||||
4890 | /// \brief Check if all the uses of \p Val are equivalent (or free) zero or | |||
4891 | /// sign extensions. | |||
4892 | static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) { | |||
4893 | assert(!Val->use_empty() && "Input must have at least one use")((!Val->use_empty() && "Input must have at least one use" ) ? static_cast<void> (0) : __assert_fail ("!Val->use_empty() && \"Input must have at least one use\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 4893, __PRETTY_FUNCTION__)); | |||
4894 | const Instruction *FirstUser = cast<Instruction>(*Val->user_begin()); | |||
4895 | bool IsSExt = isa<SExtInst>(FirstUser); | |||
4896 | Type *ExtTy = FirstUser->getType(); | |||
4897 | for (const User *U : Val->users()) { | |||
4898 | const Instruction *UI = cast<Instruction>(U); | |||
4899 | if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI))) | |||
4900 | return false; | |||
4901 | Type *CurTy = UI->getType(); | |||
4902 | // Same input and output types: Same instruction after CSE. | |||
4903 | if (CurTy == ExtTy) | |||
4904 | continue; | |||
4905 | ||||
4906 | // If IsSExt is true, we are in this situation: | |||
4907 | // a = Val | |||
4908 | // b = sext ty1 a to ty2 | |||
4909 | // c = sext ty1 a to ty3 | |||
4910 | // Assuming ty2 is shorter than ty3, this could be turned into: | |||
4911 | // a = Val | |||
4912 | // b = sext ty1 a to ty2 | |||
4913 | // c = sext ty2 b to ty3 | |||
4914 | // However, the last sext is not free. | |||
4915 | if (IsSExt) | |||
4916 | return false; | |||
4917 | ||||
4918 | // This is a ZExt, maybe this is free to extend from one type to another. | |||
4919 | // In that case, we would not account for a different use. | |||
4920 | Type *NarrowTy; | |||
4921 | Type *LargeTy; | |||
4922 | if (ExtTy->getScalarType()->getIntegerBitWidth() > | |||
4923 | CurTy->getScalarType()->getIntegerBitWidth()) { | |||
4924 | NarrowTy = CurTy; | |||
4925 | LargeTy = ExtTy; | |||
4926 | } else { | |||
4927 | NarrowTy = ExtTy; | |||
4928 | LargeTy = CurTy; | |||
4929 | } | |||
4930 | ||||
4931 | if (!TLI.isZExtFree(NarrowTy, LargeTy)) | |||
4932 | return false; | |||
4933 | } | |||
4934 | // All uses are the same or can be derived from one another for free. | |||
4935 | return true; | |||
4936 | } | |||
4937 | ||||
4938 | /// \brief Try to speculatively promote extensions in \p Exts and continue | |||
4939 | /// promoting through newly promoted operands recursively as far as doing so is | |||
4940 | /// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts. | |||
4941 | /// When some promotion happened, \p TPT contains the proper state to revert | |||
4942 | /// them. | |||
4943 | /// | |||
4944 | /// \return true if some promotion happened, false otherwise. | |||
4945 | bool CodeGenPrepare::tryToPromoteExts( | |||
4946 | TypePromotionTransaction &TPT, const SmallVectorImpl<Instruction *> &Exts, | |||
4947 | SmallVectorImpl<Instruction *> &ProfitablyMovedExts, | |||
4948 | unsigned CreatedInstsCost) { | |||
4949 | bool Promoted = false; | |||
4950 | ||||
4951 | // Iterate over all the extensions to try to promote them. | |||
4952 | for (auto I : Exts) { | |||
4953 | // Early check if we directly have ext(load). | |||
4954 | if (isa<LoadInst>(I->getOperand(0))) { | |||
4955 | ProfitablyMovedExts.push_back(I); | |||
4956 | continue; | |||
4957 | } | |||
4958 | ||||
4959 | // Check whether or not we want to do any promotion. The reason we have | |||
4960 | // this check inside the for loop is to catch the case where an extension | |||
4961 | // is directly fed by a load because in such case the extension can be moved | |||
4962 | // up without any promotion on its operands. | |||
4963 | if (!TLI || !TLI->enableExtLdPromotion() || DisableExtLdPromotion) | |||
4964 | return false; | |||
4965 | ||||
4966 | // Get the action to perform the promotion. | |||
4967 | TypePromotionHelper::Action TPH = | |||
4968 | TypePromotionHelper::getAction(I, InsertedInsts, *TLI, PromotedInsts); | |||
4969 | // Check if we can promote. | |||
4970 | if (!TPH) { | |||
4971 | // Save the current extension as we cannot move up through its operand. | |||
4972 | ProfitablyMovedExts.push_back(I); | |||
4973 | continue; | |||
4974 | } | |||
4975 | ||||
4976 | // Save the current state. | |||
4977 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
4978 | TPT.getRestorationPoint(); | |||
4979 | SmallVector<Instruction *, 4> NewExts; | |||
4980 | unsigned NewCreatedInstsCost = 0; | |||
4981 | unsigned ExtCost = !TLI->isExtFree(I); | |||
4982 | // Promote. | |||
4983 | Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost, | |||
4984 | &NewExts, nullptr, *TLI); | |||
4985 | assert(PromotedVal &&((PromotedVal && "TypePromotionHelper should have filtered out those cases" ) ? static_cast<void> (0) : __assert_fail ("PromotedVal && \"TypePromotionHelper should have filtered out those cases\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 4986, __PRETTY_FUNCTION__)) | |||
4986 | "TypePromotionHelper should have filtered out those cases")((PromotedVal && "TypePromotionHelper should have filtered out those cases" ) ? static_cast<void> (0) : __assert_fail ("PromotedVal && \"TypePromotionHelper should have filtered out those cases\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 4986, __PRETTY_FUNCTION__)); | |||
4987 | ||||
4988 | // We would be able to merge only one extension in a load. | |||
4989 | // Therefore, if we have more than 1 new extension we heuristically | |||
4990 | // cut this search path, because it means we degrade the code quality. | |||
4991 | // With exactly 2, the transformation is neutral, because we will merge | |||
4992 | // one extension but leave one. However, we optimistically keep going, | |||
4993 | // because the new extension may be removed too. | |||
4994 | long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost; | |||
4995 | // FIXME: It would be possible to propagate a negative value instead of | |||
4996 | // conservatively ceiling it to 0. | |||
4997 | TotalCreatedInstsCost = | |||
4998 | std::max((long long)0, (TotalCreatedInstsCost - ExtCost)); | |||
4999 | if (!StressExtLdPromotion && | |||
5000 | (TotalCreatedInstsCost > 1 || | |||
5001 | !isPromotedInstructionLegal(*TLI, *DL, PromotedVal))) { | |||
5002 | // This promotion is not profitable, rollback to the previous state, and | |||
5003 | // save the current extension in ProfitablyMovedExts as the latest | |||
5004 | // speculative promotion turned out to be unprofitable. | |||
5005 | TPT.rollback(LastKnownGood); | |||
5006 | ProfitablyMovedExts.push_back(I); | |||
5007 | continue; | |||
5008 | } | |||
5009 | // Continue promoting NewExts as far as doing so is profitable. | |||
5010 | SmallVector<Instruction *, 2> NewlyMovedExts; | |||
5011 | (void)tryToPromoteExts(TPT, NewExts, NewlyMovedExts, TotalCreatedInstsCost); | |||
5012 | bool NewPromoted = false; | |||
5013 | for (auto ExtInst : NewlyMovedExts) { | |||
5014 | Instruction *MovedExt = cast<Instruction>(ExtInst); | |||
5015 | Value *ExtOperand = MovedExt->getOperand(0); | |||
5016 | // If we have reached to a load, we need this extra profitability check | |||
5017 | // as it could potentially be merged into an ext(load). | |||
5018 | if (isa<LoadInst>(ExtOperand) && | |||
5019 | !(StressExtLdPromotion || NewCreatedInstsCost <= ExtCost || | |||
5020 | (ExtOperand->hasOneUse() || hasSameExtUse(ExtOperand, *TLI)))) | |||
5021 | continue; | |||
5022 | ||||
5023 | ProfitablyMovedExts.push_back(MovedExt); | |||
5024 | NewPromoted = true; | |||
5025 | } | |||
5026 | ||||
5027 | // If none of speculative promotions for NewExts is profitable, rollback | |||
5028 | // and save the current extension (I) as the last profitable extension. | |||
5029 | if (!NewPromoted) { | |||
5030 | TPT.rollback(LastKnownGood); | |||
5031 | ProfitablyMovedExts.push_back(I); | |||
5032 | continue; | |||
5033 | } | |||
5034 | // The promotion is profitable. | |||
5035 | Promoted = true; | |||
5036 | } | |||
5037 | return Promoted; | |||
5038 | } | |||
5039 | ||||
5040 | /// Merging redundant sexts when one is dominating the other. | |||
5041 | bool CodeGenPrepare::mergeSExts(Function &F) { | |||
5042 | DominatorTree DT(F); | |||
5043 | bool Changed = false; | |||
5044 | for (auto &Entry : ValToSExtendedUses) { | |||
5045 | SExts &Insts = Entry.second; | |||
5046 | SExts CurPts; | |||
5047 | for (Instruction *Inst : Insts) { | |||
5048 | if (RemovedInsts.count(Inst) || !isa<SExtInst>(Inst) || | |||
5049 | Inst->getOperand(0) != Entry.first) | |||
5050 | continue; | |||
5051 | bool inserted = false; | |||
5052 | for (auto &Pt : CurPts) { | |||
5053 | if (DT.dominates(Inst, Pt)) { | |||
5054 | Pt->replaceAllUsesWith(Inst); | |||
5055 | RemovedInsts.insert(Pt); | |||
5056 | Pt->removeFromParent(); | |||
5057 | Pt = Inst; | |||
5058 | inserted = true; | |||
5059 | Changed = true; | |||
5060 | break; | |||
5061 | } | |||
5062 | if (!DT.dominates(Pt, Inst)) | |||
5063 | // Give up if we need to merge in a common dominator as the | |||
5064 | // expermients show it is not profitable. | |||
5065 | continue; | |||
5066 | Inst->replaceAllUsesWith(Pt); | |||
5067 | RemovedInsts.insert(Inst); | |||
5068 | Inst->removeFromParent(); | |||
5069 | inserted = true; | |||
5070 | Changed = true; | |||
5071 | break; | |||
5072 | } | |||
5073 | if (!inserted) | |||
5074 | CurPts.push_back(Inst); | |||
5075 | } | |||
5076 | } | |||
5077 | return Changed; | |||
5078 | } | |||
5079 | ||||
5080 | /// Return true, if an ext(load) can be formed from an extension in | |||
5081 | /// \p MovedExts. | |||
5082 | bool CodeGenPrepare::canFormExtLd( | |||
5083 | const SmallVectorImpl<Instruction *> &MovedExts, LoadInst *&LI, | |||
5084 | Instruction *&Inst, bool HasPromoted) { | |||
5085 | for (auto *MovedExtInst : MovedExts) { | |||
5086 | if (isa<LoadInst>(MovedExtInst->getOperand(0))) { | |||
5087 | LI = cast<LoadInst>(MovedExtInst->getOperand(0)); | |||
5088 | Inst = MovedExtInst; | |||
5089 | break; | |||
5090 | } | |||
5091 | } | |||
5092 | if (!LI) | |||
5093 | return false; | |||
5094 | ||||
5095 | // If they're already in the same block, there's nothing to do. | |||
5096 | // Make the cheap checks first if we did not promote. | |||
5097 | // If we promoted, we need to check if it is indeed profitable. | |||
5098 | if (!HasPromoted && LI->getParent() == Inst->getParent()) | |||
5099 | return false; | |||
5100 | ||||
5101 | return TLI->isExtLoad(LI, Inst, *DL); | |||
5102 | } | |||
5103 | ||||
5104 | /// Move a zext or sext fed by a load into the same basic block as the load, | |||
5105 | /// unless conditions are unfavorable. This allows SelectionDAG to fold the | |||
5106 | /// extend into the load. | |||
5107 | /// | |||
5108 | /// E.g., | |||
5109 | /// \code | |||
5110 | /// %ld = load i32* %addr | |||
5111 | /// %add = add nuw i32 %ld, 4 | |||
5112 | /// %zext = zext i32 %add to i64 | |||
5113 | // \endcode | |||
5114 | /// => | |||
5115 | /// \code | |||
5116 | /// %ld = load i32* %addr | |||
5117 | /// %zext = zext i32 %ld to i64 | |||
5118 | /// %add = add nuw i64 %zext, 4 | |||
5119 | /// \encode | |||
5120 | /// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which | |||
5121 | /// allow us to match zext(load i32*) to i64. | |||
5122 | /// | |||
5123 | /// Also, try to promote the computations used to obtain a sign extended | |||
5124 | /// value used into memory accesses. | |||
5125 | /// E.g., | |||
5126 | /// \code | |||
5127 | /// a = add nsw i32 b, 3 | |||
5128 | /// d = sext i32 a to i64 | |||
5129 | /// e = getelementptr ..., i64 d | |||
5130 | /// \endcode | |||
5131 | /// => | |||
5132 | /// \code | |||
5133 | /// f = sext i32 b to i64 | |||
5134 | /// a = add nsw i64 f, 3 | |||
5135 | /// e = getelementptr ..., i64 a | |||
5136 | /// \endcode | |||
5137 | /// | |||
5138 | /// \p Inst[in/out] the extension may be modified during the process if some | |||
5139 | /// promotions apply. | |||
5140 | bool CodeGenPrepare::optimizeExt(Instruction *&Inst) { | |||
5141 | // ExtLoad formation and address type promotion infrastructure requires TLI to | |||
5142 | // be effective. | |||
5143 | if (!TLI) | |||
5144 | return false; | |||
5145 | ||||
5146 | bool AllowPromotionWithoutCommonHeader = false; | |||
5147 | /// See if it is an interesting sext operations for the address type | |||
5148 | /// promotion before trying to promote it, e.g., the ones with the right | |||
5149 | /// type and used in memory accesses. | |||
5150 | bool ATPConsiderable = TTI->shouldConsiderAddressTypePromotion( | |||
5151 | *Inst, AllowPromotionWithoutCommonHeader); | |||
5152 | TypePromotionTransaction TPT(RemovedInsts); | |||
5153 | TypePromotionTransaction::ConstRestorationPt LastKnownGood = | |||
5154 | TPT.getRestorationPoint(); | |||
5155 | SmallVector<Instruction *, 1> Exts; | |||
5156 | SmallVector<Instruction *, 2> SpeculativelyMovedExts; | |||
5157 | Exts.push_back(Inst); | |||
5158 | ||||
5159 | bool HasPromoted = tryToPromoteExts(TPT, Exts, SpeculativelyMovedExts); | |||
5160 | ||||
5161 | // Look for a load being extended. | |||
5162 | LoadInst *LI = nullptr; | |||
5163 | Instruction *ExtFedByLoad; | |||
5164 | ||||
5165 | // Try to promote a chain of computation if it allows to form an extended | |||
5166 | // load. | |||
5167 | if (canFormExtLd(SpeculativelyMovedExts, LI, ExtFedByLoad, HasPromoted)) { | |||
5168 | assert(LI && ExtFedByLoad && "Expect a valid load and extension")((LI && ExtFedByLoad && "Expect a valid load and extension" ) ? static_cast<void> (0) : __assert_fail ("LI && ExtFedByLoad && \"Expect a valid load and extension\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 5168, __PRETTY_FUNCTION__)); | |||
5169 | TPT.commit(); | |||
5170 | // Move the extend into the same block as the load | |||
5171 | ExtFedByLoad->moveAfter(LI); | |||
5172 | // CGP does not check if the zext would be speculatively executed when moved | |||
5173 | // to the same basic block as the load. Preserving its original location | |||
5174 | // would pessimize the debugging experience, as well as negatively impact | |||
5175 | // the quality of sample pgo. We don't want to use "line 0" as that has a | |||
5176 | // size cost in the line-table section and logically the zext can be seen as | |||
5177 | // part of the load. Therefore we conservatively reuse the same debug | |||
5178 | // location for the load and the zext. | |||
5179 | ExtFedByLoad->setDebugLoc(LI->getDebugLoc()); | |||
5180 | ++NumExtsMoved; | |||
5181 | Inst = ExtFedByLoad; | |||
5182 | return true; | |||
5183 | } | |||
5184 | ||||
5185 | // Continue promoting SExts if known as considerable depending on targets. | |||
5186 | if (ATPConsiderable && | |||
5187 | performAddressTypePromotion(Inst, AllowPromotionWithoutCommonHeader, | |||
5188 | HasPromoted, TPT, SpeculativelyMovedExts)) | |||
5189 | return true; | |||
5190 | ||||
5191 | TPT.rollback(LastKnownGood); | |||
5192 | return false; | |||
5193 | } | |||
5194 | ||||
5195 | // Perform address type promotion if doing so is profitable. | |||
5196 | // If AllowPromotionWithoutCommonHeader == false, we should find other sext | |||
5197 | // instructions that sign extended the same initial value. However, if | |||
5198 | // AllowPromotionWithoutCommonHeader == true, we expect promoting the | |||
5199 | // extension is just profitable. | |||
5200 | bool CodeGenPrepare::performAddressTypePromotion( | |||
5201 | Instruction *&Inst, bool AllowPromotionWithoutCommonHeader, | |||
5202 | bool HasPromoted, TypePromotionTransaction &TPT, | |||
5203 | SmallVectorImpl<Instruction *> &SpeculativelyMovedExts) { | |||
5204 | bool Promoted = false; | |||
5205 | SmallPtrSet<Instruction *, 1> UnhandledExts; | |||
5206 | bool AllSeenFirst = true; | |||
5207 | for (auto I : SpeculativelyMovedExts) { | |||
5208 | Value *HeadOfChain = I->getOperand(0); | |||
5209 | DenseMap<Value *, Instruction *>::iterator AlreadySeen = | |||
5210 | SeenChainsForSExt.find(HeadOfChain); | |||
5211 | // If there is an unhandled SExt which has the same header, try to promote | |||
5212 | // it as well. | |||
5213 | if (AlreadySeen != SeenChainsForSExt.end()) { | |||
5214 | if (AlreadySeen->second != nullptr) | |||
5215 | UnhandledExts.insert(AlreadySeen->second); | |||
5216 | AllSeenFirst = false; | |||
5217 | } | |||
5218 | } | |||
5219 | ||||
5220 | if (!AllSeenFirst || (AllowPromotionWithoutCommonHeader && | |||
5221 | SpeculativelyMovedExts.size() == 1)) { | |||
5222 | TPT.commit(); | |||
5223 | if (HasPromoted) | |||
5224 | Promoted = true; | |||
5225 | for (auto I : SpeculativelyMovedExts) { | |||
5226 | Value *HeadOfChain = I->getOperand(0); | |||
5227 | SeenChainsForSExt[HeadOfChain] = nullptr; | |||
5228 | ValToSExtendedUses[HeadOfChain].push_back(I); | |||
5229 | } | |||
5230 | // Update Inst as promotion happen. | |||
5231 | Inst = SpeculativelyMovedExts.pop_back_val(); | |||
5232 | } else { | |||
5233 | // This is the first chain visited from the header, keep the current chain | |||
5234 | // as unhandled. Defer to promote this until we encounter another SExt | |||
5235 | // chain derived from the same header. | |||
5236 | for (auto I : SpeculativelyMovedExts) { | |||
5237 | Value *HeadOfChain = I->getOperand(0); | |||
5238 | SeenChainsForSExt[HeadOfChain] = Inst; | |||
5239 | } | |||
5240 | return false; | |||
5241 | } | |||
5242 | ||||
5243 | if (!AllSeenFirst && !UnhandledExts.empty()) | |||
5244 | for (auto VisitedSExt : UnhandledExts) { | |||
5245 | if (RemovedInsts.count(VisitedSExt)) | |||
5246 | continue; | |||
5247 | TypePromotionTransaction TPT(RemovedInsts); | |||
5248 | SmallVector<Instruction *, 1> Exts; | |||
5249 | SmallVector<Instruction *, 2> Chains; | |||
5250 | Exts.push_back(VisitedSExt); | |||
5251 | bool HasPromoted = tryToPromoteExts(TPT, Exts, Chains); | |||
5252 | TPT.commit(); | |||
5253 | if (HasPromoted) | |||
5254 | Promoted = true; | |||
5255 | for (auto I : Chains) { | |||
5256 | Value *HeadOfChain = I->getOperand(0); | |||
5257 | // Mark this as handled. | |||
5258 | SeenChainsForSExt[HeadOfChain] = nullptr; | |||
5259 | ValToSExtendedUses[HeadOfChain].push_back(I); | |||
5260 | } | |||
5261 | } | |||
5262 | return Promoted; | |||
5263 | } | |||
5264 | ||||
5265 | bool CodeGenPrepare::optimizeExtUses(Instruction *I) { | |||
5266 | BasicBlock *DefBB = I->getParent(); | |||
5267 | ||||
5268 | // If the result of a {s|z}ext and its source are both live out, rewrite all | |||
5269 | // other uses of the source with result of extension. | |||
5270 | Value *Src = I->getOperand(0); | |||
5271 | if (Src->hasOneUse()) | |||
5272 | return false; | |||
5273 | ||||
5274 | // Only do this xform if truncating is free. | |||
5275 | if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType())) | |||
5276 | return false; | |||
5277 | ||||
5278 | // Only safe to perform the optimization if the source is also defined in | |||
5279 | // this block. | |||
5280 | if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent()) | |||
5281 | return false; | |||
5282 | ||||
5283 | bool DefIsLiveOut = false; | |||
5284 | for (User *U : I->users()) { | |||
5285 | Instruction *UI = cast<Instruction>(U); | |||
5286 | ||||
5287 | // Figure out which BB this ext is used in. | |||
5288 | BasicBlock *UserBB = UI->getParent(); | |||
5289 | if (UserBB == DefBB) continue; | |||
5290 | DefIsLiveOut = true; | |||
5291 | break; | |||
5292 | } | |||
5293 | if (!DefIsLiveOut) | |||
5294 | return false; | |||
5295 | ||||
5296 | // Make sure none of the uses are PHI nodes. | |||
5297 | for (User *U : Src->users()) { | |||
5298 | Instruction *UI = cast<Instruction>(U); | |||
5299 | BasicBlock *UserBB = UI->getParent(); | |||
5300 | if (UserBB == DefBB) continue; | |||
5301 | // Be conservative. We don't want this xform to end up introducing | |||
5302 | // reloads just before load / store instructions. | |||
5303 | if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI)) | |||
5304 | return false; | |||
5305 | } | |||
5306 | ||||
5307 | // InsertedTruncs - Only insert one trunc in each block once. | |||
5308 | DenseMap<BasicBlock*, Instruction*> InsertedTruncs; | |||
5309 | ||||
5310 | bool MadeChange = false; | |||
5311 | for (Use &U : Src->uses()) { | |||
5312 | Instruction *User = cast<Instruction>(U.getUser()); | |||
5313 | ||||
5314 | // Figure out which BB this ext is used in. | |||
5315 | BasicBlock *UserBB = User->getParent(); | |||
5316 | if (UserBB == DefBB) continue; | |||
5317 | ||||
5318 | // Both src and def are live in this block. Rewrite the use. | |||
5319 | Instruction *&InsertedTrunc = InsertedTruncs[UserBB]; | |||
5320 | ||||
5321 | if (!InsertedTrunc) { | |||
5322 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); | |||
5323 | assert(InsertPt != UserBB->end())((InsertPt != UserBB->end()) ? static_cast<void> (0) : __assert_fail ("InsertPt != UserBB->end()", "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 5323, __PRETTY_FUNCTION__)); | |||
5324 | InsertedTrunc = new TruncInst(I, Src->getType(), "", &*InsertPt); | |||
5325 | InsertedInsts.insert(InsertedTrunc); | |||
5326 | } | |||
5327 | ||||
5328 | // Replace a use of the {s|z}ext source with a use of the result. | |||
5329 | U = InsertedTrunc; | |||
5330 | ++NumExtUses; | |||
5331 | MadeChange = true; | |||
5332 | } | |||
5333 | ||||
5334 | return MadeChange; | |||
5335 | } | |||
5336 | ||||
5337 | // Find loads whose uses only use some of the loaded value's bits. Add an "and" | |||
5338 | // just after the load if the target can fold this into one extload instruction, | |||
5339 | // with the hope of eliminating some of the other later "and" instructions using | |||
5340 | // the loaded value. "and"s that are made trivially redundant by the insertion | |||
5341 | // of the new "and" are removed by this function, while others (e.g. those whose | |||
5342 | // path from the load goes through a phi) are left for isel to potentially | |||
5343 | // remove. | |||
5344 | // | |||
5345 | // For example: | |||
5346 | // | |||
5347 | // b0: | |||
5348 | // x = load i32 | |||
5349 | // ... | |||
5350 | // b1: | |||
5351 | // y = and x, 0xff | |||
5352 | // z = use y | |||
5353 | // | |||
5354 | // becomes: | |||
5355 | // | |||
5356 | // b0: | |||
5357 | // x = load i32 | |||
5358 | // x' = and x, 0xff | |||
5359 | // ... | |||
5360 | // b1: | |||
5361 | // z = use x' | |||
5362 | // | |||
5363 | // whereas: | |||
5364 | // | |||
5365 | // b0: | |||
5366 | // x1 = load i32 | |||
5367 | // ... | |||
5368 | // b1: | |||
5369 | // x2 = load i32 | |||
5370 | // ... | |||
5371 | // b2: | |||
5372 | // x = phi x1, x2 | |||
5373 | // y = and x, 0xff | |||
5374 | // | |||
5375 | // becomes (after a call to optimizeLoadExt for each load): | |||
5376 | // | |||
5377 | // b0: | |||
5378 | // x1 = load i32 | |||
5379 | // x1' = and x1, 0xff | |||
5380 | // ... | |||
5381 | // b1: | |||
5382 | // x2 = load i32 | |||
5383 | // x2' = and x2, 0xff | |||
5384 | // ... | |||
5385 | // b2: | |||
5386 | // x = phi x1', x2' | |||
5387 | // y = and x, 0xff | |||
5388 | bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) { | |||
5389 | if (!Load->isSimple() || | |||
5390 | !(Load->getType()->isIntegerTy() || Load->getType()->isPointerTy())) | |||
5391 | return false; | |||
5392 | ||||
5393 | // Skip loads we've already transformed. | |||
5394 | if (Load->hasOneUse() && | |||
5395 | InsertedInsts.count(cast<Instruction>(*Load->user_begin()))) | |||
5396 | return false; | |||
5397 | ||||
5398 | // Look at all uses of Load, looking through phis, to determine how many bits | |||
5399 | // of the loaded value are needed. | |||
5400 | SmallVector<Instruction *, 8> WorkList; | |||
5401 | SmallPtrSet<Instruction *, 16> Visited; | |||
5402 | SmallVector<Instruction *, 8> AndsToMaybeRemove; | |||
5403 | for (auto *U : Load->users()) | |||
5404 | WorkList.push_back(cast<Instruction>(U)); | |||
5405 | ||||
5406 | EVT LoadResultVT = TLI->getValueType(*DL, Load->getType()); | |||
5407 | unsigned BitWidth = LoadResultVT.getSizeInBits(); | |||
5408 | APInt DemandBits(BitWidth, 0); | |||
5409 | APInt WidestAndBits(BitWidth, 0); | |||
5410 | ||||
5411 | while (!WorkList.empty()) { | |||
5412 | Instruction *I = WorkList.back(); | |||
5413 | WorkList.pop_back(); | |||
5414 | ||||
5415 | // Break use-def graph loops. | |||
5416 | if (!Visited.insert(I).second) | |||
5417 | continue; | |||
5418 | ||||
5419 | // For a PHI node, push all of its users. | |||
5420 | if (auto *Phi = dyn_cast<PHINode>(I)) { | |||
5421 | for (auto *U : Phi->users()) | |||
5422 | WorkList.push_back(cast<Instruction>(U)); | |||
5423 | continue; | |||
5424 | } | |||
5425 | ||||
5426 | switch (I->getOpcode()) { | |||
5427 | case Instruction::And: { | |||
5428 | auto *AndC = dyn_cast<ConstantInt>(I->getOperand(1)); | |||
5429 | if (!AndC) | |||
5430 | return false; | |||
5431 | APInt AndBits = AndC->getValue(); | |||
5432 | DemandBits |= AndBits; | |||
5433 | // Keep track of the widest and mask we see. | |||
5434 | if (AndBits.ugt(WidestAndBits)) | |||
5435 | WidestAndBits = AndBits; | |||
5436 | if (AndBits == WidestAndBits && I->getOperand(0) == Load) | |||
5437 | AndsToMaybeRemove.push_back(I); | |||
5438 | break; | |||
5439 | } | |||
5440 | ||||
5441 | case Instruction::Shl: { | |||
5442 | auto *ShlC = dyn_cast<ConstantInt>(I->getOperand(1)); | |||
5443 | if (!ShlC) | |||
5444 | return false; | |||
5445 | uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1); | |||
5446 | DemandBits.setLowBits(BitWidth - ShiftAmt); | |||
5447 | break; | |||
5448 | } | |||
5449 | ||||
5450 | case Instruction::Trunc: { | |||
5451 | EVT TruncVT = TLI->getValueType(*DL, I->getType()); | |||
5452 | unsigned TruncBitWidth = TruncVT.getSizeInBits(); | |||
5453 | DemandBits.setLowBits(TruncBitWidth); | |||
5454 | break; | |||
5455 | } | |||
5456 | ||||
5457 | default: | |||
5458 | return false; | |||
5459 | } | |||
5460 | } | |||
5461 | ||||
5462 | uint32_t ActiveBits = DemandBits.getActiveBits(); | |||
5463 | // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the | |||
5464 | // target even if isLoadExtLegal says an i1 EXTLOAD is valid. For example, | |||
5465 | // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but | |||
5466 | // (and (load x) 1) is not matched as a single instruction, rather as a LDR | |||
5467 | // followed by an AND. | |||
5468 | // TODO: Look into removing this restriction by fixing backends to either | |||
5469 | // return false for isLoadExtLegal for i1 or have them select this pattern to | |||
5470 | // a single instruction. | |||
5471 | // | |||
5472 | // Also avoid hoisting if we didn't see any ands with the exact DemandBits | |||
5473 | // mask, since these are the only ands that will be removed by isel. | |||
5474 | if (ActiveBits <= 1 || !DemandBits.isMask(ActiveBits) || | |||
5475 | WidestAndBits != DemandBits) | |||
5476 | return false; | |||
5477 | ||||
5478 | LLVMContext &Ctx = Load->getType()->getContext(); | |||
5479 | Type *TruncTy = Type::getIntNTy(Ctx, ActiveBits); | |||
5480 | EVT TruncVT = TLI->getValueType(*DL, TruncTy); | |||
5481 | ||||
5482 | // Reject cases that won't be matched as extloads. | |||
5483 | if (!LoadResultVT.bitsGT(TruncVT) || !TruncVT.isRound() || | |||
5484 | !TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT)) | |||
5485 | return false; | |||
5486 | ||||
5487 | IRBuilder<> Builder(Load->getNextNode()); | |||
5488 | auto *NewAnd = dyn_cast<Instruction>( | |||
5489 | Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits))); | |||
5490 | // Mark this instruction as "inserted by CGP", so that other | |||
5491 | // optimizations don't touch it. | |||
5492 | InsertedInsts.insert(NewAnd); | |||
5493 | ||||
5494 | // Replace all uses of load with new and (except for the use of load in the | |||
5495 | // new and itself). | |||
5496 | Load->replaceAllUsesWith(NewAnd); | |||
5497 | NewAnd->setOperand(0, Load); | |||
5498 | ||||
5499 | // Remove any and instructions that are now redundant. | |||
5500 | for (auto *And : AndsToMaybeRemove) | |||
5501 | // Check that the and mask is the same as the one we decided to put on the | |||
5502 | // new and. | |||
5503 | if (cast<ConstantInt>(And->getOperand(1))->getValue() == DemandBits) { | |||
5504 | And->replaceAllUsesWith(NewAnd); | |||
5505 | if (&*CurInstIterator == And) | |||
5506 | CurInstIterator = std::next(And->getIterator()); | |||
5507 | And->eraseFromParent(); | |||
5508 | ++NumAndUses; | |||
5509 | } | |||
5510 | ||||
5511 | ++NumAndsAdded; | |||
5512 | return true; | |||
5513 | } | |||
5514 | ||||
5515 | /// Check if V (an operand of a select instruction) is an expensive instruction | |||
5516 | /// that is only used once. | |||
5517 | static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V) { | |||
5518 | auto *I = dyn_cast<Instruction>(V); | |||
5519 | // If it's safe to speculatively execute, then it should not have side | |||
5520 | // effects; therefore, it's safe to sink and possibly *not* execute. | |||
5521 | return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) && | |||
5522 | TTI->getUserCost(I) >= TargetTransformInfo::TCC_Expensive; | |||
5523 | } | |||
5524 | ||||
5525 | /// Returns true if a SelectInst should be turned into an explicit branch. | |||
5526 | static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI, | |||
5527 | const TargetLowering *TLI, | |||
5528 | SelectInst *SI) { | |||
5529 | // If even a predictable select is cheap, then a branch can't be cheaper. | |||
5530 | if (!TLI->isPredictableSelectExpensive()) | |||
5531 | return false; | |||
5532 | ||||
5533 | // FIXME: This should use the same heuristics as IfConversion to determine | |||
5534 | // whether a select is better represented as a branch. | |||
5535 | ||||
5536 | // If metadata tells us that the select condition is obviously predictable, | |||
5537 | // then we want to replace the select with a branch. | |||
5538 | uint64_t TrueWeight, FalseWeight; | |||
5539 | if (SI->extractProfMetadata(TrueWeight, FalseWeight)) { | |||
5540 | uint64_t Max = std::max(TrueWeight, FalseWeight); | |||
5541 | uint64_t Sum = TrueWeight + FalseWeight; | |||
5542 | if (Sum != 0) { | |||
5543 | auto Probability = BranchProbability::getBranchProbability(Max, Sum); | |||
5544 | if (Probability > TLI->getPredictableBranchThreshold()) | |||
5545 | return true; | |||
5546 | } | |||
5547 | } | |||
5548 | ||||
5549 | CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); | |||
5550 | ||||
5551 | // If a branch is predictable, an out-of-order CPU can avoid blocking on its | |||
5552 | // comparison condition. If the compare has more than one use, there's | |||
5553 | // probably another cmov or setcc around, so it's not worth emitting a branch. | |||
5554 | if (!Cmp || !Cmp->hasOneUse()) | |||
5555 | return false; | |||
5556 | ||||
5557 | // If either operand of the select is expensive and only needed on one side | |||
5558 | // of the select, we should form a branch. | |||
5559 | if (sinkSelectOperand(TTI, SI->getTrueValue()) || | |||
5560 | sinkSelectOperand(TTI, SI->getFalseValue())) | |||
5561 | return true; | |||
5562 | ||||
5563 | return false; | |||
5564 | } | |||
5565 | ||||
5566 | /// If \p isTrue is true, return the true value of \p SI, otherwise return | |||
5567 | /// false value of \p SI. If the true/false value of \p SI is defined by any | |||
5568 | /// select instructions in \p Selects, look through the defining select | |||
5569 | /// instruction until the true/false value is not defined in \p Selects. | |||
5570 | static Value *getTrueOrFalseValue( | |||
5571 | SelectInst *SI, bool isTrue, | |||
5572 | const SmallPtrSet<const Instruction *, 2> &Selects) { | |||
5573 | Value *V; | |||
5574 | ||||
5575 | for (SelectInst *DefSI = SI; DefSI != nullptr && Selects.count(DefSI); | |||
5576 | DefSI = dyn_cast<SelectInst>(V)) { | |||
5577 | assert(DefSI->getCondition() == SI->getCondition() &&((DefSI->getCondition() == SI->getCondition() && "The condition of DefSI does not match with SI") ? static_cast <void> (0) : __assert_fail ("DefSI->getCondition() == SI->getCondition() && \"The condition of DefSI does not match with SI\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 5578, __PRETTY_FUNCTION__)) | |||
5578 | "The condition of DefSI does not match with SI")((DefSI->getCondition() == SI->getCondition() && "The condition of DefSI does not match with SI") ? static_cast <void> (0) : __assert_fail ("DefSI->getCondition() == SI->getCondition() && \"The condition of DefSI does not match with SI\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 5578, __PRETTY_FUNCTION__)); | |||
5579 | V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue()); | |||
5580 | } | |||
5581 | return V; | |||
5582 | } | |||
5583 | ||||
5584 | /// If we have a SelectInst that will likely profit from branch prediction, | |||
5585 | /// turn it into a branch. | |||
5586 | bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) { | |||
5587 | // Find all consecutive select instructions that share the same condition. | |||
5588 | SmallVector<SelectInst *, 2> ASI; | |||
5589 | ASI.push_back(SI); | |||
5590 | for (BasicBlock::iterator It = ++BasicBlock::iterator(SI); | |||
5591 | It != SI->getParent()->end(); ++It) { | |||
5592 | SelectInst *I = dyn_cast<SelectInst>(&*It); | |||
5593 | if (I && SI->getCondition() == I->getCondition()) { | |||
5594 | ASI.push_back(I); | |||
5595 | } else { | |||
5596 | break; | |||
5597 | } | |||
5598 | } | |||
5599 | ||||
5600 | SelectInst *LastSI = ASI.back(); | |||
5601 | // Increment the current iterator to skip all the rest of select instructions | |||
5602 | // because they will be either "not lowered" or "all lowered" to branch. | |||
5603 | CurInstIterator = std::next(LastSI->getIterator()); | |||
5604 | ||||
5605 | bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1); | |||
5606 | ||||
5607 | // Can we convert the 'select' to CF ? | |||
5608 | if (DisableSelectToBranch || OptSize || !TLI || VectorCond || | |||
5609 | SI->getMetadata(LLVMContext::MD_unpredictable)) | |||
5610 | return false; | |||
5611 | ||||
5612 | TargetLowering::SelectSupportKind SelectKind; | |||
5613 | if (VectorCond) | |||
5614 | SelectKind = TargetLowering::VectorMaskSelect; | |||
5615 | else if (SI->getType()->isVectorTy()) | |||
5616 | SelectKind = TargetLowering::ScalarCondVectorVal; | |||
5617 | else | |||
5618 | SelectKind = TargetLowering::ScalarValSelect; | |||
5619 | ||||
5620 | if (TLI->isSelectSupported(SelectKind) && | |||
5621 | !isFormingBranchFromSelectProfitable(TTI, TLI, SI)) | |||
5622 | return false; | |||
5623 | ||||
5624 | ModifiedDT = true; | |||
5625 | ||||
5626 | // Transform a sequence like this: | |||
5627 | // start: | |||
5628 | // %cmp = cmp uge i32 %a, %b | |||
5629 | // %sel = select i1 %cmp, i32 %c, i32 %d | |||
5630 | // | |||
5631 | // Into: | |||
5632 | // start: | |||
5633 | // %cmp = cmp uge i32 %a, %b | |||
5634 | // br i1 %cmp, label %select.true, label %select.false | |||
5635 | // select.true: | |||
5636 | // br label %select.end | |||
5637 | // select.false: | |||
5638 | // br label %select.end | |||
5639 | // select.end: | |||
5640 | // %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ] | |||
5641 | // | |||
5642 | // In addition, we may sink instructions that produce %c or %d from | |||
5643 | // the entry block into the destination(s) of the new branch. | |||
5644 | // If the true or false blocks do not contain a sunken instruction, that | |||
5645 | // block and its branch may be optimized away. In that case, one side of the | |||
5646 | // first branch will point directly to select.end, and the corresponding PHI | |||
5647 | // predecessor block will be the start block. | |||
5648 | ||||
5649 | // First, we split the block containing the select into 2 blocks. | |||
5650 | BasicBlock *StartBlock = SI->getParent(); | |||
5651 | BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(LastSI)); | |||
5652 | BasicBlock *EndBlock = StartBlock->splitBasicBlock(SplitPt, "select.end"); | |||
5653 | ||||
5654 | // Delete the unconditional branch that was just created by the split. | |||
5655 | StartBlock->getTerminator()->eraseFromParent(); | |||
5656 | ||||
5657 | // These are the new basic blocks for the conditional branch. | |||
5658 | // At least one will become an actual new basic block. | |||
5659 | BasicBlock *TrueBlock = nullptr; | |||
5660 | BasicBlock *FalseBlock = nullptr; | |||
5661 | BranchInst *TrueBranch = nullptr; | |||
5662 | BranchInst *FalseBranch = nullptr; | |||
5663 | ||||
5664 | // Sink expensive instructions into the conditional blocks to avoid executing | |||
5665 | // them speculatively. | |||
5666 | for (SelectInst *SI : ASI) { | |||
5667 | if (sinkSelectOperand(TTI, SI->getTrueValue())) { | |||
5668 | if (TrueBlock == nullptr) { | |||
5669 | TrueBlock = BasicBlock::Create(SI->getContext(), "select.true.sink", | |||
5670 | EndBlock->getParent(), EndBlock); | |||
5671 | TrueBranch = BranchInst::Create(EndBlock, TrueBlock); | |||
5672 | } | |||
5673 | auto *TrueInst = cast<Instruction>(SI->getTrueValue()); | |||
5674 | TrueInst->moveBefore(TrueBranch); | |||
5675 | } | |||
5676 | if (sinkSelectOperand(TTI, SI->getFalseValue())) { | |||
5677 | if (FalseBlock == nullptr) { | |||
5678 | FalseBlock = BasicBlock::Create(SI->getContext(), "select.false.sink", | |||
5679 | EndBlock->getParent(), EndBlock); | |||
5680 | FalseBranch = BranchInst::Create(EndBlock, FalseBlock); | |||
5681 | } | |||
5682 | auto *FalseInst = cast<Instruction>(SI->getFalseValue()); | |||
5683 | FalseInst->moveBefore(FalseBranch); | |||
5684 | } | |||
5685 | } | |||
5686 | ||||
5687 | // If there was nothing to sink, then arbitrarily choose the 'false' side | |||
5688 | // for a new input value to the PHI. | |||
5689 | if (TrueBlock == FalseBlock) { | |||
5690 | assert(TrueBlock == nullptr &&((TrueBlock == nullptr && "Unexpected basic block transform while optimizing select" ) ? static_cast<void> (0) : __assert_fail ("TrueBlock == nullptr && \"Unexpected basic block transform while optimizing select\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 5691, __PRETTY_FUNCTION__)) | |||
5691 | "Unexpected basic block transform while optimizing select")((TrueBlock == nullptr && "Unexpected basic block transform while optimizing select" ) ? static_cast<void> (0) : __assert_fail ("TrueBlock == nullptr && \"Unexpected basic block transform while optimizing select\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 5691, __PRETTY_FUNCTION__)); | |||
5692 | ||||
5693 | FalseBlock = BasicBlock::Create(SI->getContext(), "select.false", | |||
5694 | EndBlock->getParent(), EndBlock); | |||
5695 | BranchInst::Create(EndBlock, FalseBlock); | |||
5696 | } | |||
5697 | ||||
5698 | // Insert the real conditional branch based on the original condition. | |||
5699 | // If we did not create a new block for one of the 'true' or 'false' paths | |||
5700 | // of the condition, it means that side of the branch goes to the end block | |||
5701 | // directly and the path originates from the start block from the point of | |||
5702 | // view of the new PHI. | |||
5703 | BasicBlock *TT, *FT; | |||
5704 | if (TrueBlock == nullptr) { | |||
5705 | TT = EndBlock; | |||
5706 | FT = FalseBlock; | |||
5707 | TrueBlock = StartBlock; | |||
5708 | } else if (FalseBlock == nullptr) { | |||
5709 | TT = TrueBlock; | |||
5710 | FT = EndBlock; | |||
5711 | FalseBlock = StartBlock; | |||
5712 | } else { | |||
5713 | TT = TrueBlock; | |||
5714 | FT = FalseBlock; | |||
5715 | } | |||
5716 | IRBuilder<>(SI).CreateCondBr(SI->getCondition(), TT, FT, SI); | |||
5717 | ||||
5718 | SmallPtrSet<const Instruction *, 2> INS; | |||
5719 | INS.insert(ASI.begin(), ASI.end()); | |||
5720 | // Use reverse iterator because later select may use the value of the | |||
5721 | // earlier select, and we need to propagate value through earlier select | |||
5722 | // to get the PHI operand. | |||
5723 | for (auto It = ASI.rbegin(); It != ASI.rend(); ++It) { | |||
5724 | SelectInst *SI = *It; | |||
5725 | // The select itself is replaced with a PHI Node. | |||
5726 | PHINode *PN = PHINode::Create(SI->getType(), 2, "", &EndBlock->front()); | |||
5727 | PN->takeName(SI); | |||
5728 | PN->addIncoming(getTrueOrFalseValue(SI, true, INS), TrueBlock); | |||
5729 | PN->addIncoming(getTrueOrFalseValue(SI, false, INS), FalseBlock); | |||
5730 | ||||
5731 | SI->replaceAllUsesWith(PN); | |||
5732 | SI->eraseFromParent(); | |||
5733 | INS.erase(SI); | |||
5734 | ++NumSelectsExpanded; | |||
5735 | } | |||
5736 | ||||
5737 | // Instruct OptimizeBlock to skip to the next block. | |||
5738 | CurInstIterator = StartBlock->end(); | |||
5739 | return true; | |||
5740 | } | |||
5741 | ||||
5742 | static bool isBroadcastShuffle(ShuffleVectorInst *SVI) { | |||
5743 | SmallVector<int, 16> Mask(SVI->getShuffleMask()); | |||
5744 | int SplatElem = -1; | |||
5745 | for (unsigned i = 0; i < Mask.size(); ++i) { | |||
5746 | if (SplatElem != -1 && Mask[i] != -1 && Mask[i] != SplatElem) | |||
5747 | return false; | |||
5748 | SplatElem = Mask[i]; | |||
5749 | } | |||
5750 | ||||
5751 | return true; | |||
5752 | } | |||
5753 | ||||
5754 | /// Some targets have expensive vector shifts if the lanes aren't all the same | |||
5755 | /// (e.g. x86 only introduced "vpsllvd" and friends with AVX2). In these cases | |||
5756 | /// it's often worth sinking a shufflevector splat down to its use so that | |||
5757 | /// codegen can spot all lanes are identical. | |||
5758 | bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) { | |||
5759 | BasicBlock *DefBB = SVI->getParent(); | |||
5760 | ||||
5761 | // Only do this xform if variable vector shifts are particularly expensive. | |||
5762 | if (!TLI || !TLI->isVectorShiftByScalarCheap(SVI->getType())) | |||
5763 | return false; | |||
5764 | ||||
5765 | // We only expect better codegen by sinking a shuffle if we can recognise a | |||
5766 | // constant splat. | |||
5767 | if (!isBroadcastShuffle(SVI)) | |||
5768 | return false; | |||
5769 | ||||
5770 | // InsertedShuffles - Only insert a shuffle in each block once. | |||
5771 | DenseMap<BasicBlock*, Instruction*> InsertedShuffles; | |||
5772 | ||||
5773 | bool MadeChange = false; | |||
5774 | for (User *U : SVI->users()) { | |||
5775 | Instruction *UI = cast<Instruction>(U); | |||
5776 | ||||
5777 | // Figure out which BB this ext is used in. | |||
5778 | BasicBlock *UserBB = UI->getParent(); | |||
5779 | if (UserBB == DefBB) continue; | |||
5780 | ||||
5781 | // For now only apply this when the splat is used by a shift instruction. | |||
5782 | if (!UI->isShift()) continue; | |||
5783 | ||||
5784 | // Everything checks out, sink the shuffle if the user's block doesn't | |||
5785 | // already have a copy. | |||
5786 | Instruction *&InsertedShuffle = InsertedShuffles[UserBB]; | |||
5787 | ||||
5788 | if (!InsertedShuffle) { | |||
5789 | BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt(); | |||
5790 | assert(InsertPt != UserBB->end())((InsertPt != UserBB->end()) ? static_cast<void> (0) : __assert_fail ("InsertPt != UserBB->end()", "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 5790, __PRETTY_FUNCTION__)); | |||
5791 | InsertedShuffle = | |||
5792 | new ShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1), | |||
5793 | SVI->getOperand(2), "", &*InsertPt); | |||
5794 | } | |||
5795 | ||||
5796 | UI->replaceUsesOfWith(SVI, InsertedShuffle); | |||
5797 | MadeChange = true; | |||
5798 | } | |||
5799 | ||||
5800 | // If we removed all uses, nuke the shuffle. | |||
5801 | if (SVI->use_empty()) { | |||
5802 | SVI->eraseFromParent(); | |||
5803 | MadeChange = true; | |||
5804 | } | |||
5805 | ||||
5806 | return MadeChange; | |||
5807 | } | |||
5808 | ||||
5809 | bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) { | |||
5810 | if (!TLI || !DL) | |||
5811 | return false; | |||
5812 | ||||
5813 | Value *Cond = SI->getCondition(); | |||
5814 | Type *OldType = Cond->getType(); | |||
5815 | LLVMContext &Context = Cond->getContext(); | |||
5816 | MVT RegType = TLI->getRegisterType(Context, TLI->getValueType(*DL, OldType)); | |||
5817 | unsigned RegWidth = RegType.getSizeInBits(); | |||
5818 | ||||
5819 | if (RegWidth <= cast<IntegerType>(OldType)->getBitWidth()) | |||
5820 | return false; | |||
5821 | ||||
5822 | // If the register width is greater than the type width, expand the condition | |||
5823 | // of the switch instruction and each case constant to the width of the | |||
5824 | // register. By widening the type of the switch condition, subsequent | |||
5825 | // comparisons (for case comparisons) will not need to be extended to the | |||
5826 | // preferred register width, so we will potentially eliminate N-1 extends, | |||
5827 | // where N is the number of cases in the switch. | |||
5828 | auto *NewType = Type::getIntNTy(Context, RegWidth); | |||
5829 | ||||
5830 | // Zero-extend the switch condition and case constants unless the switch | |||
5831 | // condition is a function argument that is already being sign-extended. | |||
5832 | // In that case, we can avoid an unnecessary mask/extension by sign-extending | |||
5833 | // everything instead. | |||
5834 | Instruction::CastOps ExtType = Instruction::ZExt; | |||
5835 | if (auto *Arg = dyn_cast<Argument>(Cond)) | |||
5836 | if (Arg->hasSExtAttr()) | |||
5837 | ExtType = Instruction::SExt; | |||
5838 | ||||
5839 | auto *ExtInst = CastInst::Create(ExtType, Cond, NewType); | |||
5840 | ExtInst->insertBefore(SI); | |||
5841 | SI->setCondition(ExtInst); | |||
5842 | for (auto Case : SI->cases()) { | |||
5843 | APInt NarrowConst = Case.getCaseValue()->getValue(); | |||
5844 | APInt WideConst = (ExtType == Instruction::ZExt) ? | |||
5845 | NarrowConst.zext(RegWidth) : NarrowConst.sext(RegWidth); | |||
5846 | Case.setValue(ConstantInt::get(Context, WideConst)); | |||
5847 | } | |||
5848 | ||||
5849 | return true; | |||
5850 | } | |||
5851 | ||||
5852 | ||||
5853 | namespace { | |||
5854 | ||||
5855 | /// \brief Helper class to promote a scalar operation to a vector one. | |||
5856 | /// This class is used to move downward extractelement transition. | |||
5857 | /// E.g., | |||
5858 | /// a = vector_op <2 x i32> | |||
5859 | /// b = extractelement <2 x i32> a, i32 0 | |||
5860 | /// c = scalar_op b | |||
5861 | /// store c | |||
5862 | /// | |||
5863 | /// => | |||
5864 | /// a = vector_op <2 x i32> | |||
5865 | /// c = vector_op a (equivalent to scalar_op on the related lane) | |||
5866 | /// * d = extractelement <2 x i32> c, i32 0 | |||
5867 | /// * store d | |||
5868 | /// Assuming both extractelement and store can be combine, we get rid of the | |||
5869 | /// transition. | |||
5870 | class VectorPromoteHelper { | |||
5871 | /// DataLayout associated with the current module. | |||
5872 | const DataLayout &DL; | |||
5873 | ||||
5874 | /// Used to perform some checks on the legality of vector operations. | |||
5875 | const TargetLowering &TLI; | |||
5876 | ||||
5877 | /// Used to estimated the cost of the promoted chain. | |||
5878 | const TargetTransformInfo &TTI; | |||
5879 | ||||
5880 | /// The transition being moved downwards. | |||
5881 | Instruction *Transition; | |||
5882 | ||||
5883 | /// The sequence of instructions to be promoted. | |||
5884 | SmallVector<Instruction *, 4> InstsToBePromoted; | |||
5885 | ||||
5886 | /// Cost of combining a store and an extract. | |||
5887 | unsigned StoreExtractCombineCost; | |||
5888 | ||||
5889 | /// Instruction that will be combined with the transition. | |||
5890 | Instruction *CombineInst = nullptr; | |||
5891 | ||||
5892 | /// \brief The instruction that represents the current end of the transition. | |||
5893 | /// Since we are faking the promotion until we reach the end of the chain | |||
5894 | /// of computation, we need a way to get the current end of the transition. | |||
5895 | Instruction *getEndOfTransition() const { | |||
5896 | if (InstsToBePromoted.empty()) | |||
5897 | return Transition; | |||
5898 | return InstsToBePromoted.back(); | |||
5899 | } | |||
5900 | ||||
5901 | /// \brief Return the index of the original value in the transition. | |||
5902 | /// E.g., for "extractelement <2 x i32> c, i32 1" the original value, | |||
5903 | /// c, is at index 0. | |||
5904 | unsigned getTransitionOriginalValueIdx() const { | |||
5905 | assert(isa<ExtractElementInst>(Transition) &&((isa<ExtractElementInst>(Transition) && "Other kind of transitions are not supported yet" ) ? static_cast<void> (0) : __assert_fail ("isa<ExtractElementInst>(Transition) && \"Other kind of transitions are not supported yet\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 5906, __PRETTY_FUNCTION__)) | |||
5906 | "Other kind of transitions are not supported yet")((isa<ExtractElementInst>(Transition) && "Other kind of transitions are not supported yet" ) ? static_cast<void> (0) : __assert_fail ("isa<ExtractElementInst>(Transition) && \"Other kind of transitions are not supported yet\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 5906, __PRETTY_FUNCTION__)); | |||
5907 | return 0; | |||
5908 | } | |||
5909 | ||||
5910 | /// \brief Return the index of the index in the transition. | |||
5911 | /// E.g., for "extractelement <2 x i32> c, i32 0" the index | |||
5912 | /// is at index 1. | |||
5913 | unsigned getTransitionIdx() const { | |||
5914 | assert(isa<ExtractElementInst>(Transition) &&((isa<ExtractElementInst>(Transition) && "Other kind of transitions are not supported yet" ) ? static_cast<void> (0) : __assert_fail ("isa<ExtractElementInst>(Transition) && \"Other kind of transitions are not supported yet\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 5915, __PRETTY_FUNCTION__)) | |||
5915 | "Other kind of transitions are not supported yet")((isa<ExtractElementInst>(Transition) && "Other kind of transitions are not supported yet" ) ? static_cast<void> (0) : __assert_fail ("isa<ExtractElementInst>(Transition) && \"Other kind of transitions are not supported yet\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 5915, __PRETTY_FUNCTION__)); | |||
5916 | return 1; | |||
5917 | } | |||
5918 | ||||
5919 | /// \brief Get the type of the transition. | |||
5920 | /// This is the type of the original value. | |||
5921 | /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the | |||
5922 | /// transition is <2 x i32>. | |||
5923 | Type *getTransitionType() const { | |||
5924 | return Transition->getOperand(getTransitionOriginalValueIdx())->getType(); | |||
5925 | } | |||
5926 | ||||
5927 | /// \brief Promote \p ToBePromoted by moving \p Def downward through. | |||
5928 | /// I.e., we have the following sequence: | |||
5929 | /// Def = Transition <ty1> a to <ty2> | |||
5930 | /// b = ToBePromoted <ty2> Def, ... | |||
5931 | /// => | |||
5932 | /// b = ToBePromoted <ty1> a, ... | |||
5933 | /// Def = Transition <ty1> ToBePromoted to <ty2> | |||
5934 | void promoteImpl(Instruction *ToBePromoted); | |||
5935 | ||||
5936 | /// \brief Check whether or not it is profitable to promote all the | |||
5937 | /// instructions enqueued to be promoted. | |||
5938 | bool isProfitableToPromote() { | |||
5939 | Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx()); | |||
5940 | unsigned Index = isa<ConstantInt>(ValIdx) | |||
5941 | ? cast<ConstantInt>(ValIdx)->getZExtValue() | |||
5942 | : -1; | |||
5943 | Type *PromotedType = getTransitionType(); | |||
5944 | ||||
5945 | StoreInst *ST = cast<StoreInst>(CombineInst); | |||
5946 | unsigned AS = ST->getPointerAddressSpace(); | |||
5947 | unsigned Align = ST->getAlignment(); | |||
5948 | // Check if this store is supported. | |||
5949 | if (!TLI.allowsMisalignedMemoryAccesses( | |||
5950 | TLI.getValueType(DL, ST->getValueOperand()->getType()), AS, | |||
5951 | Align)) { | |||
5952 | // If this is not supported, there is no way we can combine | |||
5953 | // the extract with the store. | |||
5954 | return false; | |||
5955 | } | |||
5956 | ||||
5957 | // The scalar chain of computation has to pay for the transition | |||
5958 | // scalar to vector. | |||
5959 | // The vector chain has to account for the combining cost. | |||
5960 | uint64_t ScalarCost = | |||
5961 | TTI.getVectorInstrCost(Transition->getOpcode(), PromotedType, Index); | |||
5962 | uint64_t VectorCost = StoreExtractCombineCost; | |||
5963 | for (const auto &Inst : InstsToBePromoted) { | |||
5964 | // Compute the cost. | |||
5965 | // By construction, all instructions being promoted are arithmetic ones. | |||
5966 | // Moreover, one argument is a constant that can be viewed as a splat | |||
5967 | // constant. | |||
5968 | Value *Arg0 = Inst->getOperand(0); | |||
5969 | bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) || | |||
5970 | isa<ConstantFP>(Arg0); | |||
5971 | TargetTransformInfo::OperandValueKind Arg0OVK = | |||
5972 | IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue | |||
5973 | : TargetTransformInfo::OK_AnyValue; | |||
5974 | TargetTransformInfo::OperandValueKind Arg1OVK = | |||
5975 | !IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue | |||
5976 | : TargetTransformInfo::OK_AnyValue; | |||
5977 | ScalarCost += TTI.getArithmeticInstrCost( | |||
5978 | Inst->getOpcode(), Inst->getType(), Arg0OVK, Arg1OVK); | |||
5979 | VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType, | |||
5980 | Arg0OVK, Arg1OVK); | |||
5981 | } | |||
5982 | DEBUG(dbgs() << "Estimated cost of computation to be promoted:\nScalar: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Estimated cost of computation to be promoted:\nScalar: " << ScalarCost << "\nVector: " << VectorCost << '\n'; } } while (false) | |||
5983 | << ScalarCost << "\nVector: " << VectorCost << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Estimated cost of computation to be promoted:\nScalar: " << ScalarCost << "\nVector: " << VectorCost << '\n'; } } while (false); | |||
5984 | return ScalarCost > VectorCost; | |||
5985 | } | |||
5986 | ||||
5987 | /// \brief Generate a constant vector with \p Val with the same | |||
5988 | /// number of elements as the transition. | |||
5989 | /// \p UseSplat defines whether or not \p Val should be replicated | |||
5990 | /// across the whole vector. | |||
5991 | /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>, | |||
5992 | /// otherwise we generate a vector with as many undef as possible: | |||
5993 | /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only | |||
5994 | /// used at the index of the extract. | |||
5995 | Value *getConstantVector(Constant *Val, bool UseSplat) const { | |||
5996 | unsigned ExtractIdx = std::numeric_limits<unsigned>::max(); | |||
5997 | if (!UseSplat) { | |||
5998 | // If we cannot determine where the constant must be, we have to | |||
5999 | // use a splat constant. | |||
6000 | Value *ValExtractIdx = Transition->getOperand(getTransitionIdx()); | |||
6001 | if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx)) | |||
6002 | ExtractIdx = CstVal->getSExtValue(); | |||
6003 | else | |||
6004 | UseSplat = true; | |||
6005 | } | |||
6006 | ||||
6007 | unsigned End = getTransitionType()->getVectorNumElements(); | |||
6008 | if (UseSplat) | |||
6009 | return ConstantVector::getSplat(End, Val); | |||
6010 | ||||
6011 | SmallVector<Constant *, 4> ConstVec; | |||
6012 | UndefValue *UndefVal = UndefValue::get(Val->getType()); | |||
6013 | for (unsigned Idx = 0; Idx != End; ++Idx) { | |||
6014 | if (Idx == ExtractIdx) | |||
6015 | ConstVec.push_back(Val); | |||
6016 | else | |||
6017 | ConstVec.push_back(UndefVal); | |||
6018 | } | |||
6019 | return ConstantVector::get(ConstVec); | |||
6020 | } | |||
6021 | ||||
6022 | /// \brief Check if promoting to a vector type an operand at \p OperandIdx | |||
6023 | /// in \p Use can trigger undefined behavior. | |||
6024 | static bool canCauseUndefinedBehavior(const Instruction *Use, | |||
6025 | unsigned OperandIdx) { | |||
6026 | // This is not safe to introduce undef when the operand is on | |||
6027 | // the right hand side of a division-like instruction. | |||
6028 | if (OperandIdx != 1) | |||
6029 | return false; | |||
6030 | switch (Use->getOpcode()) { | |||
6031 | default: | |||
6032 | return false; | |||
6033 | case Instruction::SDiv: | |||
6034 | case Instruction::UDiv: | |||
6035 | case Instruction::SRem: | |||
6036 | case Instruction::URem: | |||
6037 | return true; | |||
6038 | case Instruction::FDiv: | |||
6039 | case Instruction::FRem: | |||
6040 | return !Use->hasNoNaNs(); | |||
6041 | } | |||
6042 | llvm_unreachable(nullptr)::llvm::llvm_unreachable_internal(nullptr, "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 6042); | |||
6043 | } | |||
6044 | ||||
6045 | public: | |||
6046 | VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI, | |||
6047 | const TargetTransformInfo &TTI, Instruction *Transition, | |||
6048 | unsigned CombineCost) | |||
6049 | : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition), | |||
6050 | StoreExtractCombineCost(CombineCost) { | |||
6051 | assert(Transition && "Do not know how to promote null")((Transition && "Do not know how to promote null") ? static_cast <void> (0) : __assert_fail ("Transition && \"Do not know how to promote null\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 6051, __PRETTY_FUNCTION__)); | |||
6052 | } | |||
6053 | ||||
6054 | /// \brief Check if we can promote \p ToBePromoted to \p Type. | |||
6055 | bool canPromote(const Instruction *ToBePromoted) const { | |||
6056 | // We could support CastInst too. | |||
6057 | return isa<BinaryOperator>(ToBePromoted); | |||
6058 | } | |||
6059 | ||||
6060 | /// \brief Check if it is profitable to promote \p ToBePromoted | |||
6061 | /// by moving downward the transition through. | |||
6062 | bool shouldPromote(const Instruction *ToBePromoted) const { | |||
6063 | // Promote only if all the operands can be statically expanded. | |||
6064 | // Indeed, we do not want to introduce any new kind of transitions. | |||
6065 | for (const Use &U : ToBePromoted->operands()) { | |||
6066 | const Value *Val = U.get(); | |||
6067 | if (Val == getEndOfTransition()) { | |||
6068 | // If the use is a division and the transition is on the rhs, | |||
6069 | // we cannot promote the operation, otherwise we may create a | |||
6070 | // division by zero. | |||
6071 | if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())) | |||
6072 | return false; | |||
6073 | continue; | |||
6074 | } | |||
6075 | if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) && | |||
6076 | !isa<ConstantFP>(Val)) | |||
6077 | return false; | |||
6078 | } | |||
6079 | // Check that the resulting operation is legal. | |||
6080 | int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode()); | |||
6081 | if (!ISDOpcode) | |||
6082 | return false; | |||
6083 | return StressStoreExtract || | |||
6084 | TLI.isOperationLegalOrCustom( | |||
6085 | ISDOpcode, TLI.getValueType(DL, getTransitionType(), true)); | |||
6086 | } | |||
6087 | ||||
6088 | /// \brief Check whether or not \p Use can be combined | |||
6089 | /// with the transition. | |||
6090 | /// I.e., is it possible to do Use(Transition) => AnotherUse? | |||
6091 | bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); } | |||
6092 | ||||
6093 | /// \brief Record \p ToBePromoted as part of the chain to be promoted. | |||
6094 | void enqueueForPromotion(Instruction *ToBePromoted) { | |||
6095 | InstsToBePromoted.push_back(ToBePromoted); | |||
6096 | } | |||
6097 | ||||
6098 | /// \brief Set the instruction that will be combined with the transition. | |||
6099 | void recordCombineInstruction(Instruction *ToBeCombined) { | |||
6100 | assert(canCombine(ToBeCombined) && "Unsupported instruction to combine")((canCombine(ToBeCombined) && "Unsupported instruction to combine" ) ? static_cast<void> (0) : __assert_fail ("canCombine(ToBeCombined) && \"Unsupported instruction to combine\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 6100, __PRETTY_FUNCTION__)); | |||
6101 | CombineInst = ToBeCombined; | |||
6102 | } | |||
6103 | ||||
6104 | /// \brief Promote all the instructions enqueued for promotion if it is | |||
6105 | /// is profitable. | |||
6106 | /// \return True if the promotion happened, false otherwise. | |||
6107 | bool promote() { | |||
6108 | // Check if there is something to promote. | |||
6109 | // Right now, if we do not have anything to combine with, | |||
6110 | // we assume the promotion is not profitable. | |||
6111 | if (InstsToBePromoted.empty() || !CombineInst) | |||
6112 | return false; | |||
6113 | ||||
6114 | // Check cost. | |||
6115 | if (!StressStoreExtract && !isProfitableToPromote()) | |||
6116 | return false; | |||
6117 | ||||
6118 | // Promote. | |||
6119 | for (auto &ToBePromoted : InstsToBePromoted) | |||
6120 | promoteImpl(ToBePromoted); | |||
6121 | InstsToBePromoted.clear(); | |||
6122 | return true; | |||
6123 | } | |||
6124 | }; | |||
6125 | ||||
6126 | } // end anonymous namespace | |||
6127 | ||||
6128 | void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) { | |||
6129 | // At this point, we know that all the operands of ToBePromoted but Def | |||
6130 | // can be statically promoted. | |||
6131 | // For Def, we need to use its parameter in ToBePromoted: | |||
6132 | // b = ToBePromoted ty1 a | |||
6133 | // Def = Transition ty1 b to ty2 | |||
6134 | // Move the transition down. | |||
6135 | // 1. Replace all uses of the promoted operation by the transition. | |||
6136 | // = ... b => = ... Def. | |||
6137 | assert(ToBePromoted->getType() == Transition->getType() &&((ToBePromoted->getType() == Transition->getType() && "The type of the result of the transition does not match " "the final type" ) ? static_cast<void> (0) : __assert_fail ("ToBePromoted->getType() == Transition->getType() && \"The type of the result of the transition does not match \" \"the final type\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 6139, __PRETTY_FUNCTION__)) | |||
6138 | "The type of the result of the transition does not match "((ToBePromoted->getType() == Transition->getType() && "The type of the result of the transition does not match " "the final type" ) ? static_cast<void> (0) : __assert_fail ("ToBePromoted->getType() == Transition->getType() && \"The type of the result of the transition does not match \" \"the final type\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 6139, __PRETTY_FUNCTION__)) | |||
6139 | "the final type")((ToBePromoted->getType() == Transition->getType() && "The type of the result of the transition does not match " "the final type" ) ? static_cast<void> (0) : __assert_fail ("ToBePromoted->getType() == Transition->getType() && \"The type of the result of the transition does not match \" \"the final type\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 6139, __PRETTY_FUNCTION__)); | |||
6140 | ToBePromoted->replaceAllUsesWith(Transition); | |||
6141 | // 2. Update the type of the uses. | |||
6142 | // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def. | |||
6143 | Type *TransitionTy = getTransitionType(); | |||
6144 | ToBePromoted->mutateType(TransitionTy); | |||
6145 | // 3. Update all the operands of the promoted operation with promoted | |||
6146 | // operands. | |||
6147 | // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a. | |||
6148 | for (Use &U : ToBePromoted->operands()) { | |||
6149 | Value *Val = U.get(); | |||
6150 | Value *NewVal = nullptr; | |||
6151 | if (Val == Transition) | |||
6152 | NewVal = Transition->getOperand(getTransitionOriginalValueIdx()); | |||
6153 | else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) || | |||
6154 | isa<ConstantFP>(Val)) { | |||
6155 | // Use a splat constant if it is not safe to use undef. | |||
6156 | NewVal = getConstantVector( | |||
6157 | cast<Constant>(Val), | |||
6158 | isa<UndefValue>(Val) || | |||
6159 | canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo())); | |||
6160 | } else | |||
6161 | llvm_unreachable("Did you modified shouldPromote and forgot to update "::llvm::llvm_unreachable_internal("Did you modified shouldPromote and forgot to update " "this?", "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 6162) | |||
6162 | "this?")::llvm::llvm_unreachable_internal("Did you modified shouldPromote and forgot to update " "this?", "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 6162); | |||
6163 | ToBePromoted->setOperand(U.getOperandNo(), NewVal); | |||
6164 | } | |||
6165 | Transition->moveAfter(ToBePromoted); | |||
6166 | Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted); | |||
6167 | } | |||
6168 | ||||
6169 | /// Some targets can do store(extractelement) with one instruction. | |||
6170 | /// Try to push the extractelement towards the stores when the target | |||
6171 | /// has this feature and this is profitable. | |||
6172 | bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) { | |||
6173 | unsigned CombineCost = std::numeric_limits<unsigned>::max(); | |||
6174 | if (DisableStoreExtract || !TLI || | |||
6175 | (!StressStoreExtract && | |||
6176 | !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(), | |||
6177 | Inst->getOperand(1), CombineCost))) | |||
6178 | return false; | |||
6179 | ||||
6180 | // At this point we know that Inst is a vector to scalar transition. | |||
6181 | // Try to move it down the def-use chain, until: | |||
6182 | // - We can combine the transition with its single use | |||
6183 | // => we got rid of the transition. | |||
6184 | // - We escape the current basic block | |||
6185 | // => we would need to check that we are moving it at a cheaper place and | |||
6186 | // we do not do that for now. | |||
6187 | BasicBlock *Parent = Inst->getParent(); | |||
6188 | DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Found an interesting transition: " << *Inst << '\n'; } } while (false); | |||
6189 | VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost); | |||
6190 | // If the transition has more than one use, assume this is not going to be | |||
6191 | // beneficial. | |||
6192 | while (Inst->hasOneUse()) { | |||
6193 | Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin()); | |||
6194 | DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Use: " << *ToBePromoted << '\n'; } } while (false); | |||
6195 | ||||
6196 | if (ToBePromoted->getParent() != Parent) { | |||
6197 | DEBUG(dbgs() << "Instruction to promote is in a different block ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Instruction to promote is in a different block (" << ToBePromoted->getParent()->getName() << ") than the transition (" << Parent->getName() << ").\n"; } } while (false) | |||
6198 | << ToBePromoted->getParent()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Instruction to promote is in a different block (" << ToBePromoted->getParent()->getName() << ") than the transition (" << Parent->getName() << ").\n"; } } while (false) | |||
6199 | << ") than the transition (" << Parent->getName() << ").\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Instruction to promote is in a different block (" << ToBePromoted->getParent()->getName() << ") than the transition (" << Parent->getName() << ").\n"; } } while (false); | |||
6200 | return false; | |||
6201 | } | |||
6202 | ||||
6203 | if (VPH.canCombine(ToBePromoted)) { | |||
6204 | DEBUG(dbgs() << "Assume " << *Inst << '\n'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Assume " << *Inst << '\n' << "will be combined with: " << *ToBePromoted << '\n'; } } while (false) | |||
6205 | << "will be combined with: " << *ToBePromoted << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Assume " << *Inst << '\n' << "will be combined with: " << *ToBePromoted << '\n'; } } while (false); | |||
6206 | VPH.recordCombineInstruction(ToBePromoted); | |||
6207 | bool Changed = VPH.promote(); | |||
6208 | NumStoreExtractExposed += Changed; | |||
6209 | return Changed; | |||
6210 | } | |||
6211 | ||||
6212 | DEBUG(dbgs() << "Try promoting.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Try promoting.\n"; } } while (false); | |||
6213 | if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted)) | |||
6214 | return false; | |||
6215 | ||||
6216 | DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Promoting is possible... Enqueue for promotion!\n" ; } } while (false); | |||
6217 | ||||
6218 | VPH.enqueueForPromotion(ToBePromoted); | |||
6219 | Inst = ToBePromoted; | |||
6220 | } | |||
6221 | return false; | |||
6222 | } | |||
6223 | ||||
6224 | /// For the instruction sequence of store below, F and I values | |||
6225 | /// are bundled together as an i64 value before being stored into memory. | |||
6226 | /// Sometimes it is more efficent to generate separate stores for F and I, | |||
6227 | /// which can remove the bitwise instructions or sink them to colder places. | |||
6228 | /// | |||
6229 | /// (store (or (zext (bitcast F to i32) to i64), | |||
6230 | /// (shl (zext I to i64), 32)), addr) --> | |||
6231 | /// (store F, addr) and (store I, addr+4) | |||
6232 | /// | |||
6233 | /// Similarly, splitting for other merged store can also be beneficial, like: | |||
6234 | /// For pair of {i32, i32}, i64 store --> two i32 stores. | |||
6235 | /// For pair of {i32, i16}, i64 store --> two i32 stores. | |||
6236 | /// For pair of {i16, i16}, i32 store --> two i16 stores. | |||
6237 | /// For pair of {i16, i8}, i32 store --> two i16 stores. | |||
6238 | /// For pair of {i8, i8}, i16 store --> two i8 stores. | |||
6239 | /// | |||
6240 | /// We allow each target to determine specifically which kind of splitting is | |||
6241 | /// supported. | |||
6242 | /// | |||
6243 | /// The store patterns are commonly seen from the simple code snippet below | |||
6244 | /// if only std::make_pair(...) is sroa transformed before inlined into hoo. | |||
6245 | /// void goo(const std::pair<int, float> &); | |||
6246 | /// hoo() { | |||
6247 | /// ... | |||
6248 | /// goo(std::make_pair(tmp, ftmp)); | |||
6249 | /// ... | |||
6250 | /// } | |||
6251 | /// | |||
6252 | /// Although we already have similar splitting in DAG Combine, we duplicate | |||
6253 | /// it in CodeGenPrepare to catch the case in which pattern is across | |||
6254 | /// multiple BBs. The logic in DAG Combine is kept to catch case generated | |||
6255 | /// during code expansion. | |||
6256 | static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL, | |||
6257 | const TargetLowering &TLI) { | |||
6258 | // Handle simple but common cases only. | |||
6259 | Type *StoreType = SI.getValueOperand()->getType(); | |||
6260 | if (DL.getTypeStoreSizeInBits(StoreType) != DL.getTypeSizeInBits(StoreType) || | |||
6261 | DL.getTypeSizeInBits(StoreType) == 0) | |||
6262 | return false; | |||
6263 | ||||
6264 | unsigned HalfValBitSize = DL.getTypeSizeInBits(StoreType) / 2; | |||
6265 | Type *SplitStoreType = Type::getIntNTy(SI.getContext(), HalfValBitSize); | |||
6266 | if (DL.getTypeStoreSizeInBits(SplitStoreType) != | |||
6267 | DL.getTypeSizeInBits(SplitStoreType)) | |||
6268 | return false; | |||
6269 | ||||
6270 | // Match the following patterns: | |||
6271 | // (store (or (zext LValue to i64), | |||
6272 | // (shl (zext HValue to i64), 32)), HalfValBitSize) | |||
6273 | // or | |||
6274 | // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize) | |||
6275 | // (zext LValue to i64), | |||
6276 | // Expect both operands of OR and the first operand of SHL have only | |||
6277 | // one use. | |||
6278 | Value *LValue, *HValue; | |||
6279 | if (!match(SI.getValueOperand(), | |||
6280 | m_c_Or(m_OneUse(m_ZExt(m_Value(LValue))), | |||
6281 | m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue))), | |||
6282 | m_SpecificInt(HalfValBitSize)))))) | |||
6283 | return false; | |||
6284 | ||||
6285 | // Check LValue and HValue are int with size less or equal than 32. | |||
6286 | if (!LValue->getType()->isIntegerTy() || | |||
6287 | DL.getTypeSizeInBits(LValue->getType()) > HalfValBitSize || | |||
6288 | !HValue->getType()->isIntegerTy() || | |||
6289 | DL.getTypeSizeInBits(HValue->getType()) > HalfValBitSize) | |||
6290 | return false; | |||
6291 | ||||
6292 | // If LValue/HValue is a bitcast instruction, use the EVT before bitcast | |||
6293 | // as the input of target query. | |||
6294 | auto *LBC = dyn_cast<BitCastInst>(LValue); | |||
6295 | auto *HBC = dyn_cast<BitCastInst>(HValue); | |||
6296 | EVT LowTy = LBC ? EVT::getEVT(LBC->getOperand(0)->getType()) | |||
6297 | : EVT::getEVT(LValue->getType()); | |||
6298 | EVT HighTy = HBC ? EVT::getEVT(HBC->getOperand(0)->getType()) | |||
6299 | : EVT::getEVT(HValue->getType()); | |||
6300 | if (!ForceSplitStore && !TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy)) | |||
6301 | return false; | |||
6302 | ||||
6303 | // Start to split store. | |||
6304 | IRBuilder<> Builder(SI.getContext()); | |||
6305 | Builder.SetInsertPoint(&SI); | |||
6306 | ||||
6307 | // If LValue/HValue is a bitcast in another BB, create a new one in current | |||
6308 | // BB so it may be merged with the splitted stores by dag combiner. | |||
6309 | if (LBC && LBC->getParent() != SI.getParent()) | |||
6310 | LValue = Builder.CreateBitCast(LBC->getOperand(0), LBC->getType()); | |||
6311 | if (HBC && HBC->getParent() != SI.getParent()) | |||
6312 | HValue = Builder.CreateBitCast(HBC->getOperand(0), HBC->getType()); | |||
6313 | ||||
6314 | auto CreateSplitStore = [&](Value *V, bool Upper) { | |||
6315 | V = Builder.CreateZExtOrBitCast(V, SplitStoreType); | |||
6316 | Value *Addr = Builder.CreateBitCast( | |||
6317 | SI.getOperand(1), | |||
6318 | SplitStoreType->getPointerTo(SI.getPointerAddressSpace())); | |||
6319 | if (Upper) | |||
6320 | Addr = Builder.CreateGEP( | |||
6321 | SplitStoreType, Addr, | |||
6322 | ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1)); | |||
6323 | Builder.CreateAlignedStore( | |||
6324 | V, Addr, Upper ? SI.getAlignment() / 2 : SI.getAlignment()); | |||
6325 | }; | |||
6326 | ||||
6327 | CreateSplitStore(LValue, false); | |||
6328 | CreateSplitStore(HValue, true); | |||
6329 | ||||
6330 | // Delete the old store. | |||
6331 | SI.eraseFromParent(); | |||
6332 | return true; | |||
6333 | } | |||
6334 | ||||
6335 | // Return true if the GEP has two operands, the first operand is of a sequential | |||
6336 | // type, and the second operand is a constant. | |||
6337 | static bool GEPSequentialConstIndexed(GetElementPtrInst *GEP) { | |||
6338 | gep_type_iterator I = gep_type_begin(*GEP); | |||
6339 | return GEP->getNumOperands() == 2 && | |||
6340 | I.isSequential() && | |||
6341 | isa<ConstantInt>(GEP->getOperand(1)); | |||
6342 | } | |||
6343 | ||||
6344 | // Try unmerging GEPs to reduce liveness interference (register pressure) across | |||
6345 | // IndirectBr edges. Since IndirectBr edges tend to touch on many blocks, | |||
6346 | // reducing liveness interference across those edges benefits global register | |||
6347 | // allocation. Currently handles only certain cases. | |||
6348 | // | |||
6349 | // For example, unmerge %GEPI and %UGEPI as below. | |||
6350 | // | |||
6351 | // ---------- BEFORE ---------- | |||
6352 | // SrcBlock: | |||
6353 | // ... | |||
6354 | // %GEPIOp = ... | |||
6355 | // ... | |||
6356 | // %GEPI = gep %GEPIOp, Idx | |||
6357 | // ... | |||
6358 | // indirectbr ... [ label %DstB0, label %DstB1, ... label %DstBi ... ] | |||
6359 | // (* %GEPI is alive on the indirectbr edges due to other uses ahead) | |||
6360 | // (* %GEPIOp is alive on the indirectbr edges only because of it's used by | |||
6361 | // %UGEPI) | |||
6362 | // | |||
6363 | // DstB0: ... (there may be a gep similar to %UGEPI to be unmerged) | |||
6364 | // DstB1: ... (there may be a gep similar to %UGEPI to be unmerged) | |||
6365 | // ... | |||
6366 | // | |||
6367 | // DstBi: | |||
6368 | // ... | |||
6369 | // %UGEPI = gep %GEPIOp, UIdx | |||
6370 | // ... | |||
6371 | // --------------------------- | |||
6372 | // | |||
6373 | // ---------- AFTER ---------- | |||
6374 | // SrcBlock: | |||
6375 | // ... (same as above) | |||
6376 | // (* %GEPI is still alive on the indirectbr edges) | |||
6377 | // (* %GEPIOp is no longer alive on the indirectbr edges as a result of the | |||
6378 | // unmerging) | |||
6379 | // ... | |||
6380 | // | |||
6381 | // DstBi: | |||
6382 | // ... | |||
6383 | // %UGEPI = gep %GEPI, (UIdx-Idx) | |||
6384 | // ... | |||
6385 | // --------------------------- | |||
6386 | // | |||
6387 | // The register pressure on the IndirectBr edges is reduced because %GEPIOp is | |||
6388 | // no longer alive on them. | |||
6389 | // | |||
6390 | // We try to unmerge GEPs here in CodGenPrepare, as opposed to limiting merging | |||
6391 | // of GEPs in the first place in InstCombiner::visitGetElementPtrInst() so as | |||
6392 | // not to disable further simplications and optimizations as a result of GEP | |||
6393 | // merging. | |||
6394 | // | |||
6395 | // Note this unmerging may increase the length of the data flow critical path | |||
6396 | // (the path from %GEPIOp to %UGEPI would go through %GEPI), which is a tradeoff | |||
6397 | // between the register pressure and the length of data-flow critical | |||
6398 | // path. Restricting this to the uncommon IndirectBr case would minimize the | |||
6399 | // impact of potentially longer critical path, if any, and the impact on compile | |||
6400 | // time. | |||
6401 | static bool tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst *GEPI, | |||
6402 | const TargetTransformInfo *TTI) { | |||
6403 | BasicBlock *SrcBlock = GEPI->getParent(); | |||
6404 | // Check that SrcBlock ends with an IndirectBr. If not, give up. The common | |||
6405 | // (non-IndirectBr) cases exit early here. | |||
6406 | if (!isa<IndirectBrInst>(SrcBlock->getTerminator())) | |||
6407 | return false; | |||
6408 | // Check that GEPI is a simple gep with a single constant index. | |||
6409 | if (!GEPSequentialConstIndexed(GEPI)) | |||
6410 | return false; | |||
6411 | ConstantInt *GEPIIdx = cast<ConstantInt>(GEPI->getOperand(1)); | |||
6412 | // Check that GEPI is a cheap one. | |||
6413 | if (TTI->getIntImmCost(GEPIIdx->getValue(), GEPIIdx->getType()) | |||
6414 | > TargetTransformInfo::TCC_Basic) | |||
6415 | return false; | |||
6416 | Value *GEPIOp = GEPI->getOperand(0); | |||
6417 | // Check that GEPIOp is an instruction that's also defined in SrcBlock. | |||
6418 | if (!isa<Instruction>(GEPIOp)) | |||
6419 | return false; | |||
6420 | auto *GEPIOpI = cast<Instruction>(GEPIOp); | |||
6421 | if (GEPIOpI->getParent() != SrcBlock) | |||
6422 | return false; | |||
6423 | // Check that GEP is used outside the block, meaning it's alive on the | |||
6424 | // IndirectBr edge(s). | |||
6425 | if (find_if(GEPI->users(), [&](User *Usr) { | |||
6426 | if (auto *I = dyn_cast<Instruction>(Usr)) { | |||
6427 | if (I->getParent() != SrcBlock) { | |||
6428 | return true; | |||
6429 | } | |||
6430 | } | |||
6431 | return false; | |||
6432 | }) == GEPI->users().end()) | |||
6433 | return false; | |||
6434 | // The second elements of the GEP chains to be unmerged. | |||
6435 | std::vector<GetElementPtrInst *> UGEPIs; | |||
6436 | // Check each user of GEPIOp to check if unmerging would make GEPIOp not alive | |||
6437 | // on IndirectBr edges. | |||
6438 | for (User *Usr : GEPIOp->users()) { | |||
6439 | if (Usr == GEPI) continue; | |||
6440 | // Check if Usr is an Instruction. If not, give up. | |||
6441 | if (!isa<Instruction>(Usr)) | |||
6442 | return false; | |||
6443 | auto *UI = cast<Instruction>(Usr); | |||
6444 | // Check if Usr in the same block as GEPIOp, which is fine, skip. | |||
6445 | if (UI->getParent() == SrcBlock) | |||
6446 | continue; | |||
6447 | // Check if Usr is a GEP. If not, give up. | |||
6448 | if (!isa<GetElementPtrInst>(Usr)) | |||
6449 | return false; | |||
6450 | auto *UGEPI = cast<GetElementPtrInst>(Usr); | |||
6451 | // Check if UGEPI is a simple gep with a single constant index and GEPIOp is | |||
6452 | // the pointer operand to it. If so, record it in the vector. If not, give | |||
6453 | // up. | |||
6454 | if (!GEPSequentialConstIndexed(UGEPI)) | |||
6455 | return false; | |||
6456 | if (UGEPI->getOperand(0) != GEPIOp) | |||
6457 | return false; | |||
6458 | if (GEPIIdx->getType() != | |||
6459 | cast<ConstantInt>(UGEPI->getOperand(1))->getType()) | |||
6460 | return false; | |||
6461 | ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); | |||
6462 | if (TTI->getIntImmCost(UGEPIIdx->getValue(), UGEPIIdx->getType()) | |||
6463 | > TargetTransformInfo::TCC_Basic) | |||
6464 | return false; | |||
6465 | UGEPIs.push_back(UGEPI); | |||
6466 | } | |||
6467 | if (UGEPIs.size() == 0) | |||
6468 | return false; | |||
6469 | // Check the materializing cost of (Uidx-Idx). | |||
6470 | for (GetElementPtrInst *UGEPI : UGEPIs) { | |||
6471 | ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); | |||
6472 | APInt NewIdx = UGEPIIdx->getValue() - GEPIIdx->getValue(); | |||
6473 | unsigned ImmCost = TTI->getIntImmCost(NewIdx, GEPIIdx->getType()); | |||
6474 | if (ImmCost > TargetTransformInfo::TCC_Basic) | |||
6475 | return false; | |||
6476 | } | |||
6477 | // Now unmerge between GEPI and UGEPIs. | |||
6478 | for (GetElementPtrInst *UGEPI : UGEPIs) { | |||
6479 | UGEPI->setOperand(0, GEPI); | |||
6480 | ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1)); | |||
6481 | Constant *NewUGEPIIdx = | |||
6482 | ConstantInt::get(GEPIIdx->getType(), | |||
6483 | UGEPIIdx->getValue() - GEPIIdx->getValue()); | |||
6484 | UGEPI->setOperand(1, NewUGEPIIdx); | |||
6485 | // If GEPI is not inbounds but UGEPI is inbounds, change UGEPI to not | |||
6486 | // inbounds to avoid UB. | |||
6487 | if (!GEPI->isInBounds()) { | |||
6488 | UGEPI->setIsInBounds(false); | |||
6489 | } | |||
6490 | } | |||
6491 | // After unmerging, verify that GEPIOp is actually only used in SrcBlock (not | |||
6492 | // alive on IndirectBr edges). | |||
6493 | assert(find_if(GEPIOp->users(), [&](User *Usr) {((find_if(GEPIOp->users(), [&](User *Usr) { return cast <Instruction>(Usr)->getParent() != SrcBlock; }) == GEPIOp ->users().end() && "GEPIOp is used outside SrcBlock" ) ? static_cast<void> (0) : __assert_fail ("find_if(GEPIOp->users(), [&](User *Usr) { return cast<Instruction>(Usr)->getParent() != SrcBlock; }) == GEPIOp->users().end() && \"GEPIOp is used outside SrcBlock\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 6495, __PRETTY_FUNCTION__)) | |||
6494 | return cast<Instruction>(Usr)->getParent() != SrcBlock;((find_if(GEPIOp->users(), [&](User *Usr) { return cast <Instruction>(Usr)->getParent() != SrcBlock; }) == GEPIOp ->users().end() && "GEPIOp is used outside SrcBlock" ) ? static_cast<void> (0) : __assert_fail ("find_if(GEPIOp->users(), [&](User *Usr) { return cast<Instruction>(Usr)->getParent() != SrcBlock; }) == GEPIOp->users().end() && \"GEPIOp is used outside SrcBlock\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 6495, __PRETTY_FUNCTION__)) | |||
6495 | }) == GEPIOp->users().end() && "GEPIOp is used outside SrcBlock")((find_if(GEPIOp->users(), [&](User *Usr) { return cast <Instruction>(Usr)->getParent() != SrcBlock; }) == GEPIOp ->users().end() && "GEPIOp is used outside SrcBlock" ) ? static_cast<void> (0) : __assert_fail ("find_if(GEPIOp->users(), [&](User *Usr) { return cast<Instruction>(Usr)->getParent() != SrcBlock; }) == GEPIOp->users().end() && \"GEPIOp is used outside SrcBlock\"" , "/build/llvm-toolchain-snapshot-6.0~svn316259/lib/CodeGen/CodeGenPrepare.cpp" , 6495, __PRETTY_FUNCTION__)); | |||
6496 | return true; | |||
6497 | } | |||
6498 | ||||
6499 | bool CodeGenPrepare::optimizeInst(Instruction *I, bool &ModifiedDT) { | |||
6500 | // Bail out if we inserted the instruction to prevent optimizations from | |||
6501 | // stepping on each other's toes. | |||
6502 | if (InsertedInsts.count(I)) | |||
6503 | return false; | |||
6504 | ||||
6505 | if (PHINode *P = dyn_cast<PHINode>(I)) { | |||
6506 | // It is possible for very late stage optimizations (such as SimplifyCFG) | |||
6507 | // to introduce PHI nodes too late to be cleaned up. If we detect such a | |||
6508 | // trivial PHI, go ahead and zap it here. | |||
6509 | if (Value *V = SimplifyInstruction(P, {*DL, TLInfo})) { | |||
6510 | P->replaceAllUsesWith(V); | |||
6511 | P->eraseFromParent(); | |||
6512 | ++NumPHIsElim; | |||
6513 | return true; | |||
6514 | } | |||
6515 | return false; | |||
6516 | } | |||
6517 | ||||
6518 | if (CastInst *CI = dyn_cast<CastInst>(I)) { | |||
6519 | // If the source of the cast is a constant, then this should have | |||
6520 | // already been constant folded. The only reason NOT to constant fold | |||
6521 | // it is if something (e.g. LSR) was careful to place the constant | |||
6522 | // evaluation in a block other than then one that uses it (e.g. to hoist | |||
6523 | // the address of globals out of a loop). If this is the case, we don't | |||
6524 | // want to forward-subst the cast. | |||
6525 | if (isa<Constant>(CI->getOperand(0))) | |||
6526 | return false; | |||
6527 | ||||
6528 | if (TLI && OptimizeNoopCopyExpression(CI, *TLI, *DL)) | |||
6529 | return true; | |||
6530 | ||||
6531 | if (isa<ZExtInst>(I) || isa<SExtInst>(I)) { | |||
6532 | /// Sink a zext or sext into its user blocks if the target type doesn't | |||
6533 | /// fit in one register | |||
6534 | if (TLI && | |||
6535 | TLI->getTypeAction(CI->getContext(), | |||
6536 | TLI->getValueType(*DL, CI->getType())) == | |||
6537 | TargetLowering::TypeExpandInteger) { | |||
6538 | return SinkCast(CI); | |||
6539 | } else { | |||
6540 | bool MadeChange = optimizeExt(I); | |||
6541 | return MadeChange | optimizeExtUses(I); | |||
6542 | } | |||
6543 | } | |||
6544 | return false; | |||
6545 | } | |||
6546 | ||||
6547 | if (CmpInst *CI = dyn_cast<CmpInst>(I)) | |||
6548 | if (!TLI || !TLI->hasMultipleConditionRegisters()) | |||
6549 | return OptimizeCmpExpression(CI, TLI); | |||
6550 | ||||
6551 | if (LoadInst *LI = dyn_cast<LoadInst>(I)) { | |||
6552 | LI->setMetadata(LLVMContext::MD_invariant_group, nullptr); | |||
6553 | if (TLI) { | |||
6554 | bool Modified = optimizeLoadExt(LI); | |||
6555 | unsigned AS = LI->getPointerAddressSpace(); | |||
6556 | Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS); | |||
6557 | return Modified; | |||
6558 | } | |||
6559 | return false; | |||
6560 | } | |||
6561 | ||||
6562 | if (StoreInst *SI = dyn_cast<StoreInst>(I)) { | |||
6563 | if (TLI && splitMergedValStore(*SI, *DL, *TLI)) | |||
6564 | return true; | |||
6565 | SI->setMetadata(LLVMContext::MD_invariant_group, nullptr); | |||
6566 | if (TLI) { | |||
6567 | unsigned AS = SI->getPointerAddressSpace(); | |||
6568 | return optimizeMemoryInst(I, SI->getOperand(1), | |||
6569 | SI->getOperand(0)->getType(), AS); | |||
6570 | } | |||
6571 | return false; | |||
6572 | } | |||
6573 | ||||
6574 | if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { | |||
6575 | unsigned AS = RMW->getPointerAddressSpace(); | |||
6576 | return optimizeMemoryInst(I, RMW->getPointerOperand(), | |||
6577 | RMW->getType(), AS); | |||
6578 | } | |||
6579 | ||||
6580 | if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(I)) { | |||
6581 | unsigned AS = CmpX->getPointerAddressSpace(); | |||
6582 | return optimizeMemoryInst(I, CmpX->getPointerOperand(), | |||
6583 | CmpX->getCompareOperand()->getType(), AS); | |||
6584 | } | |||
6585 | ||||
6586 | BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I); | |||
6587 | ||||
6588 | if (BinOp && (BinOp->getOpcode() == Instruction::And) && | |||
6589 | EnableAndCmpSinking && TLI) | |||
6590 | return sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts); | |||
6591 | ||||
6592 | if (BinOp && (BinOp->getOpcode() == Instruction::AShr || | |||
6593 | BinOp->getOpcode() == Instruction::LShr)) { | |||
6594 | ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1)); | |||
6595 | if (TLI && CI && TLI->hasExtractBitsInsn()) | |||
6596 | return OptimizeExtractBits(BinOp, CI, *TLI, *DL); | |||
6597 | ||||
6598 | return false; | |||
6599 | } | |||
6600 | ||||
6601 | if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { | |||
6602 | if (GEPI->hasAllZeroIndices()) { | |||
6603 | /// The GEP operand must be a pointer, so must its result -> BitCast | |||
6604 | Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(), | |||
6605 | GEPI->getName(), GEPI); | |||
6606 | GEPI->replaceAllUsesWith(NC); | |||
6607 | GEPI->eraseFromParent(); | |||
6608 | ++NumGEPsElim; | |||
6609 | optimizeInst(NC, ModifiedDT); | |||
6610 | return true; | |||
6611 | } | |||
6612 | if (tryUnmergingGEPsAcrossIndirectBr(GEPI, TTI)) { | |||
6613 | return true; | |||
6614 | } | |||
6615 | return false; | |||
6616 | } | |||
6617 | ||||
6618 | if (CallInst *CI = dyn_cast<CallInst>(I)) | |||
6619 | return optimizeCallInst(CI, ModifiedDT); | |||
6620 | ||||
6621 | if (SelectInst *SI = dyn_cast<SelectInst>(I)) | |||
6622 | return optimizeSelectInst(SI); | |||
6623 | ||||
6624 | if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I)) | |||
6625 | return optimizeShuffleVectorInst(SVI); | |||
6626 | ||||
6627 | if (auto *Switch = dyn_cast<SwitchInst>(I)) | |||
6628 | return optimizeSwitchInst(Switch); | |||
6629 | ||||
6630 | if (isa<ExtractElementInst>(I)) | |||
6631 | return optimizeExtractElementInst(I); | |||
6632 | ||||
6633 | return false; | |||
6634 | } | |||
6635 | ||||
6636 | /// Given an OR instruction, check to see if this is a bitreverse | |||
6637 | /// idiom. If so, insert the new intrinsic and return true. | |||
6638 | static bool makeBitReverse(Instruction &I, const DataLayout &DL, | |||
6639 | const TargetLowering &TLI) { | |||
6640 | if (!I.getType()->isIntegerTy() || | |||
6641 | !TLI.isOperationLegalOrCustom(ISD::BITREVERSE, | |||
6642 | TLI.getValueType(DL, I.getType(), true))) | |||
6643 | return false; | |||
6644 | ||||
6645 | SmallVector<Instruction*, 4> Insts; | |||
6646 | if (!recognizeBSwapOrBitReverseIdiom(&I, false, true, Insts)) | |||
6647 | return false; | |||
6648 | Instruction *LastInst = Insts.back(); | |||
6649 | I.replaceAllUsesWith(LastInst); | |||
6650 | RecursivelyDeleteTriviallyDeadInstructions(&I); | |||
6651 | return true; | |||
6652 | } | |||
6653 | ||||
6654 | // In this pass we look for GEP and cast instructions that are used | |||
6655 | // across basic blocks and rewrite them to improve basic-block-at-a-time | |||
6656 | // selection. | |||
6657 | bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, bool &ModifiedDT) { | |||
6658 | SunkAddrs.clear(); | |||
6659 | bool MadeChange = false; | |||
6660 | ||||
6661 | CurInstIterator = BB.begin(); | |||
6662 | while (CurInstIterator != BB.end()) { | |||
6663 | MadeChange |= optimizeInst(&*CurInstIterator++, ModifiedDT); | |||
6664 | if (ModifiedDT) | |||
6665 | return true; | |||
6666 | } | |||
6667 | ||||
6668 | bool MadeBitReverse = true; | |||
6669 | while (TLI && MadeBitReverse) { | |||
6670 | MadeBitReverse = false; | |||
6671 | for (auto &I : reverse(BB)) { | |||
6672 | if (makeBitReverse(I, *DL, *TLI)) { | |||
6673 | MadeBitReverse = MadeChange = true; | |||
6674 | ModifiedDT = true; | |||
6675 | break; | |||
6676 | } | |||
6677 | } | |||
6678 | } | |||
6679 | MadeChange |= dupRetToEnableTailCallOpts(&BB); | |||
6680 | ||||
6681 | return MadeChange; | |||
6682 | } | |||
6683 | ||||
6684 | // llvm.dbg.value is far away from the value then iSel may not be able | |||
6685 | // handle it properly. iSel will drop llvm.dbg.value if it can not | |||
6686 | // find a node corresponding to the value. | |||
6687 | bool CodeGenPrepare::placeDbgValues(Function &F) { | |||
6688 | bool MadeChange = false; | |||
6689 | for (BasicBlock &BB : F) { | |||
6690 | Instruction *PrevNonDbgInst = nullptr; | |||
6691 | for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) { | |||
6692 | Instruction *Insn = &*BI++; | |||
6693 | DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn); | |||
6694 | // Leave dbg.values that refer to an alloca alone. These | |||
6695 | // instrinsics describe the address of a variable (= the alloca) | |||
6696 | // being taken. They should not be moved next to the alloca | |||
6697 | // (and to the beginning of the scope), but rather stay close to | |||
6698 | // where said address is used. | |||
6699 | if (!DVI || (DVI->getValue() && isa<AllocaInst>(DVI->getValue()))) { | |||
6700 | PrevNonDbgInst = Insn; | |||
6701 | continue; | |||
6702 | } | |||
6703 | ||||
6704 | Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue()); | |||
6705 | if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) { | |||
6706 | // If VI is a phi in a block with an EHPad terminator, we can't insert | |||
6707 | // after it. | |||
6708 | if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad()) | |||
6709 | continue; | |||
6710 | DEBUG(dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI; } } while (false); | |||
6711 | DVI->removeFromParent(); | |||
6712 | if (isa<PHINode>(VI)) | |||
6713 | DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt()); | |||
6714 | else | |||
6715 | DVI->insertAfter(VI); | |||
6716 | MadeChange = true; | |||
6717 | ++NumDbgValueMoved; | |||
6718 | } | |||
6719 | } | |||
6720 | } | |||
6721 | return MadeChange; | |||
6722 | } | |||
6723 | ||||
6724 | /// \brief Scale down both weights to fit into uint32_t. | |||
6725 | static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) { | |||
6726 | uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse; | |||
6727 | uint32_t Scale = (NewMax / std::numeric_limits<uint32_t>::max()) + 1; | |||
6728 | NewTrue = NewTrue / Scale; | |||
6729 | NewFalse = NewFalse / Scale; | |||
6730 | } | |||
6731 | ||||
6732 | /// \brief Some targets prefer to split a conditional branch like: | |||
6733 | /// \code | |||
6734 | /// %0 = icmp ne i32 %a, 0 | |||
6735 | /// %1 = icmp ne i32 %b, 0 | |||
6736 | /// %or.cond = or i1 %0, %1 | |||
6737 | /// br i1 %or.cond, label %TrueBB, label %FalseBB | |||
6738 | /// \endcode | |||
6739 | /// into multiple branch instructions like: | |||
6740 | /// \code | |||
6741 | /// bb1: | |||
6742 | /// %0 = icmp ne i32 %a, 0 | |||
6743 | /// br i1 %0, label %TrueBB, label %bb2 | |||
6744 | /// bb2: | |||
6745 | /// %1 = icmp ne i32 %b, 0 | |||
6746 | /// br i1 %1, label %TrueBB, label %FalseBB | |||
6747 | /// \endcode | |||
6748 | /// This usually allows instruction selection to do even further optimizations | |||
6749 | /// and combine the compare with the branch instruction. Currently this is | |||
6750 | /// applied for targets which have "cheap" jump instructions. | |||
6751 | /// | |||
6752 | /// FIXME: Remove the (equivalent?) implementation in SelectionDAG. | |||
6753 | /// | |||
6754 | bool CodeGenPrepare::splitBranchCondition(Function &F) { | |||
6755 | if (!TM || !TM->Options.EnableFastISel || !TLI || TLI->isJumpExpensive()) | |||
6756 | return false; | |||
6757 | ||||
6758 | bool MadeChange = false; | |||
6759 | for (auto &BB : F) { | |||
6760 | // Does this BB end with the following? | |||
6761 | // %cond1 = icmp|fcmp|binary instruction ... | |||
6762 | // %cond2 = icmp|fcmp|binary instruction ... | |||
6763 | // %cond.or = or|and i1 %cond1, cond2 | |||
6764 | // br i1 %cond.or label %dest1, label %dest2" | |||
6765 | BinaryOperator *LogicOp; | |||
6766 | BasicBlock *TBB, *FBB; | |||
6767 | if (!match(BB.getTerminator(), m_Br(m_OneUse(m_BinOp(LogicOp)), TBB, FBB))) | |||
6768 | continue; | |||
6769 | ||||
6770 | auto *Br1 = cast<BranchInst>(BB.getTerminator()); | |||
6771 | if (Br1->getMetadata(LLVMContext::MD_unpredictable)) | |||
6772 | continue; | |||
6773 | ||||
6774 | unsigned Opc; | |||
6775 | Value *Cond1, *Cond2; | |||
6776 | if (match(LogicOp, m_And(m_OneUse(m_Value(Cond1)), | |||
6777 | m_OneUse(m_Value(Cond2))))) | |||
6778 | Opc = Instruction::And; | |||
6779 | else if (match(LogicOp, m_Or(m_OneUse(m_Value(Cond1)), | |||
6780 | m_OneUse(m_Value(Cond2))))) | |||
6781 | Opc = Instruction::Or; | |||
6782 | else | |||
6783 | continue; | |||
6784 | ||||
6785 | if (!match(Cond1, m_CombineOr(m_Cmp(), m_BinOp())) || | |||
6786 | !match(Cond2, m_CombineOr(m_Cmp(), m_BinOp())) ) | |||
6787 | continue; | |||
6788 | ||||
6789 | DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "Before branch condition splitting\n" ; BB.dump(); } } while (false); | |||
6790 | ||||
6791 | // Create a new BB. | |||
6792 | auto TmpBB = | |||
6793 | BasicBlock::Create(BB.getContext(), BB.getName() + ".cond.split", | |||
6794 | BB.getParent(), BB.getNextNode()); | |||
6795 | ||||
6796 | // Update original basic block by using the first condition directly by the | |||
6797 | // branch instruction and removing the no longer needed and/or instruction. | |||
6798 | Br1->setCondition(Cond1); | |||
6799 | LogicOp->eraseFromParent(); | |||
6800 | ||||
6801 | // Depending on the conditon we have to either replace the true or the false | |||
6802 | // successor of the original branch instruction. | |||
6803 | if (Opc == Instruction::And) | |||
6804 | Br1->setSuccessor(0, TmpBB); | |||
6805 | else | |||
6806 | Br1->setSuccessor(1, TmpBB); | |||
6807 | ||||
6808 | // Fill in the new basic block. | |||
6809 | auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB); | |||
6810 | if (auto *I = dyn_cast<Instruction>(Cond2)) { | |||
6811 | I->removeFromParent(); | |||
6812 | I->insertBefore(Br2); | |||
6813 | } | |||
6814 | ||||
6815 | // Update PHI nodes in both successors. The original BB needs to be | |||
6816 | // replaced in one successor's PHI nodes, because the branch comes now from | |||
6817 | // the newly generated BB (NewBB). In the other successor we need to add one | |||
6818 | // incoming edge to the PHI nodes, because both branch instructions target | |||
6819 | // now the same successor. Depending on the original branch condition | |||
6820 | // (and/or) we have to swap the successors (TrueDest, FalseDest), so that | |||
6821 | // we perform the correct update for the PHI nodes. | |||
6822 | // This doesn't change the successor order of the just created branch | |||
6823 | // instruction (or any other instruction). | |||
6824 | if (Opc == Instruction::Or) | |||
6825 | std::swap(TBB, FBB); | |||
6826 | ||||
6827 | // Replace the old BB with the new BB. | |||
6828 | for (auto &I : *TBB) { | |||
6829 | PHINode *PN = dyn_cast<PHINode>(&I); | |||
6830 | if (!PN) | |||
6831 | break; | |||
6832 | int i; | |||
6833 | while ((i = PN->getBasicBlockIndex(&BB)) >= 0) | |||
6834 | PN->setIncomingBlock(i, TmpBB); | |||
6835 | } | |||
6836 | ||||
6837 | // Add another incoming edge form the new BB. | |||
6838 | for (auto &I : *FBB) { | |||
6839 | PHINode *PN = dyn_cast<PHINode>(&I); | |||
6840 | if (!PN) | |||
6841 | break; | |||
6842 | auto *Val = PN->getIncomingValueForBlock(&BB); | |||
6843 | PN->addIncoming(Val, TmpBB); | |||
6844 | } | |||
6845 | ||||
6846 | // Update the branch weights (from SelectionDAGBuilder:: | |||
6847 | // FindMergedConditions). | |||
6848 | if (Opc == Instruction::Or) { | |||
6849 | // Codegen X | Y as: | |||
6850 | // BB1: | |||
6851 | // jmp_if_X TBB | |||
6852 | // jmp TmpBB | |||
6853 | // TmpBB: | |||
6854 | // jmp_if_Y TBB | |||
6855 | // jmp FBB | |||
6856 | // | |||
6857 | ||||
6858 | // We have flexibility in setting Prob for BB1 and Prob for NewBB. | |||
6859 | // The requirement is that | |||
6860 | // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) | |||
6861 | // = TrueProb for orignal BB. | |||
6862 | // Assuming the orignal weights are A and B, one choice is to set BB1's | |||
6863 | // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice | |||
6864 | // assumes that | |||
6865 | // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. | |||
6866 | // Another choice is to assume TrueProb for BB1 equals to TrueProb for | |||
6867 | // TmpBB, but the math is more complicated. | |||
6868 | uint64_t TrueWeight, FalseWeight; | |||
6869 | if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) { | |||
6870 | uint64_t NewTrueWeight = TrueWeight; | |||
6871 | uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight; | |||
6872 | scaleWeights(NewTrueWeight, NewFalseWeight); | |||
6873 | Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) | |||
6874 | .createBranchWeights(TrueWeight, FalseWeight)); | |||
6875 | ||||
6876 | NewTrueWeight = TrueWeight; | |||
6877 | NewFalseWeight = 2 * FalseWeight; | |||
6878 | scaleWeights(NewTrueWeight, NewFalseWeight); | |||
6879 | Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) | |||
6880 | .createBranchWeights(TrueWeight, FalseWeight)); | |||
6881 | } | |||
6882 | } else { | |||
6883 | // Codegen X & Y as: | |||
6884 | // BB1: | |||
6885 | // jmp_if_X TmpBB | |||
6886 | // jmp FBB | |||
6887 | // TmpBB: | |||
6888 | // jmp_if_Y TBB | |||
6889 | // jmp FBB | |||
6890 | // | |||
6891 | // This requires creation of TmpBB after CurBB. | |||
6892 | ||||
6893 | // We have flexibility in setting Prob for BB1 and Prob for TmpBB. | |||
6894 | // The requirement is that | |||
6895 | // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) | |||
6896 | // = FalseProb for orignal BB. | |||
6897 | // Assuming the orignal weights are A and B, one choice is to set BB1's | |||
6898 | // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice | |||
6899 | // assumes that | |||
6900 | // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB. | |||
6901 | uint64_t TrueWeight, FalseWeight; | |||
6902 | if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) { | |||
6903 | uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight; | |||
6904 | uint64_t NewFalseWeight = FalseWeight; | |||
6905 | scaleWeights(NewTrueWeight, NewFalseWeight); | |||
6906 | Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext()) | |||
6907 | .createBranchWeights(TrueWeight, FalseWeight)); | |||
6908 | ||||
6909 | NewTrueWeight = 2 * TrueWeight; | |||
6910 | NewFalseWeight = FalseWeight; | |||
6911 | scaleWeights(NewTrueWeight, NewFalseWeight); | |||
6912 | Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext()) | |||
6913 | .createBranchWeights(TrueWeight, FalseWeight)); | |||
6914 | } | |||
6915 | } | |||
6916 | ||||
6917 | // Note: No point in getting fancy here, since the DT info is never | |||
6918 | // available to CodeGenPrepare. | |||
6919 | ModifiedDT = true; | |||
6920 | ||||
6921 | MadeChange = true; | |||
6922 | ||||
6923 | DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "After branch condition splitting\n" ; BB.dump(); TmpBB->dump(); } } while (false) | |||
6924 | TmpBB->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("codegenprepare")) { dbgs() << "After branch condition splitting\n" ; BB.dump(); TmpBB->dump(); } } while (false); | |||
6925 | } | |||
6926 | return MadeChange; | |||
6927 | } |