Bug Summary

File:lib/CodeGen/CodeGenPrepare.cpp
Warning:line 4659, column 3
Undefined or garbage value returned to caller

Annotated Source Code

1//===- CodeGenPrepare.cpp - Prepare a function for code generation --------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass munges the code in the input function to better prepare it for
11// SelectionDAG-based code generation. This works around limitations in it's
12// basic-block-at-a-time approach. It should eventually be removed.
13//
14//===----------------------------------------------------------------------===//
15
16#include "llvm/CodeGen/Passes.h"
17#include "llvm/CodeGen/TargetPassConfig.h"
18#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/SetVector.h"
20#include "llvm/ADT/SmallSet.h"
21#include "llvm/ADT/Statistic.h"
22#include "llvm/Analysis/BlockFrequencyInfo.h"
23#include "llvm/Analysis/BranchProbabilityInfo.h"
24#include "llvm/Analysis/CFG.h"
25#include "llvm/Analysis/InstructionSimplify.h"
26#include "llvm/Analysis/LoopInfo.h"
27#include "llvm/Analysis/ProfileSummaryInfo.h"
28#include "llvm/Analysis/TargetLibraryInfo.h"
29#include "llvm/Analysis/TargetTransformInfo.h"
30#include "llvm/Analysis/ValueTracking.h"
31#include "llvm/Analysis/MemoryBuiltins.h"
32#include "llvm/CodeGen/Analysis.h"
33#include "llvm/IR/CallSite.h"
34#include "llvm/IR/Constants.h"
35#include "llvm/IR/DataLayout.h"
36#include "llvm/IR/DerivedTypes.h"
37#include "llvm/IR/Dominators.h"
38#include "llvm/IR/Function.h"
39#include "llvm/IR/GetElementPtrTypeIterator.h"
40#include "llvm/IR/IRBuilder.h"
41#include "llvm/IR/InlineAsm.h"
42#include "llvm/IR/Instructions.h"
43#include "llvm/IR/IntrinsicInst.h"
44#include "llvm/IR/MDBuilder.h"
45#include "llvm/IR/PatternMatch.h"
46#include "llvm/IR/Statepoint.h"
47#include "llvm/IR/ValueHandle.h"
48#include "llvm/IR/ValueMap.h"
49#include "llvm/Pass.h"
50#include "llvm/Support/BranchProbability.h"
51#include "llvm/Support/CommandLine.h"
52#include "llvm/Support/Debug.h"
53#include "llvm/Support/raw_ostream.h"
54#include "llvm/Target/TargetLowering.h"
55#include "llvm/Target/TargetSubtargetInfo.h"
56#include "llvm/Transforms/Utils/BasicBlockUtils.h"
57#include "llvm/Transforms/Utils/BuildLibCalls.h"
58#include "llvm/Transforms/Utils/BypassSlowDivision.h"
59#include "llvm/Transforms/Utils/Cloning.h"
60#include "llvm/Transforms/Utils/Local.h"
61#include "llvm/Transforms/Utils/SimplifyLibCalls.h"
62#include "llvm/Transforms/Utils/ValueMapper.h"
63using namespace llvm;
64using namespace llvm::PatternMatch;
65
66#define DEBUG_TYPE"codegenprepare" "codegenprepare"
67
68STATISTIC(NumBlocksElim, "Number of blocks eliminated")static llvm::Statistic NumBlocksElim = {"codegenprepare", "NumBlocksElim"
, "Number of blocks eliminated", {0}, false}
;
69STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated")static llvm::Statistic NumPHIsElim = {"codegenprepare", "NumPHIsElim"
, "Number of trivial PHIs eliminated", {0}, false}
;
70STATISTIC(NumGEPsElim, "Number of GEPs converted to casts")static llvm::Statistic NumGEPsElim = {"codegenprepare", "NumGEPsElim"
, "Number of GEPs converted to casts", {0}, false}
;
71STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "static llvm::Statistic NumCmpUses = {"codegenprepare", "NumCmpUses"
, "Number of uses of Cmp expressions replaced with uses of " "sunken Cmps"
, {0}, false}
72 "sunken Cmps")static llvm::Statistic NumCmpUses = {"codegenprepare", "NumCmpUses"
, "Number of uses of Cmp expressions replaced with uses of " "sunken Cmps"
, {0}, false}
;
73STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "static llvm::Statistic NumCastUses = {"codegenprepare", "NumCastUses"
, "Number of uses of Cast expressions replaced with uses " "of sunken Casts"
, {0}, false}
74 "of sunken Casts")static llvm::Statistic NumCastUses = {"codegenprepare", "NumCastUses"
, "Number of uses of Cast expressions replaced with uses " "of sunken Casts"
, {0}, false}
;
75STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "static llvm::Statistic NumMemoryInsts = {"codegenprepare", "NumMemoryInsts"
, "Number of memory instructions whose address " "computations were sunk"
, {0}, false}
76 "computations were sunk")static llvm::Statistic NumMemoryInsts = {"codegenprepare", "NumMemoryInsts"
, "Number of memory instructions whose address " "computations were sunk"
, {0}, false}
;
77STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads")static llvm::Statistic NumExtsMoved = {"codegenprepare", "NumExtsMoved"
, "Number of [s|z]ext instructions combined with loads", {0},
false}
;
78STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized")static llvm::Statistic NumExtUses = {"codegenprepare", "NumExtUses"
, "Number of uses of [s|z]ext instructions optimized", {0}, false
}
;
79STATISTIC(NumAndsAdded,static llvm::Statistic NumAndsAdded = {"codegenprepare", "NumAndsAdded"
, "Number of and mask instructions added to form ext loads", {
0}, false}
80 "Number of and mask instructions added to form ext loads")static llvm::Statistic NumAndsAdded = {"codegenprepare", "NumAndsAdded"
, "Number of and mask instructions added to form ext loads", {
0}, false}
;
81STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized")static llvm::Statistic NumAndUses = {"codegenprepare", "NumAndUses"
, "Number of uses of and mask instructions optimized", {0}, false
}
;
82STATISTIC(NumRetsDup, "Number of return instructions duplicated")static llvm::Statistic NumRetsDup = {"codegenprepare", "NumRetsDup"
, "Number of return instructions duplicated", {0}, false}
;
83STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved")static llvm::Statistic NumDbgValueMoved = {"codegenprepare", "NumDbgValueMoved"
, "Number of debug value instructions moved", {0}, false}
;
84STATISTIC(NumSelectsExpanded, "Number of selects turned into branches")static llvm::Statistic NumSelectsExpanded = {"codegenprepare"
, "NumSelectsExpanded", "Number of selects turned into branches"
, {0}, false}
;
85STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed")static llvm::Statistic NumStoreExtractExposed = {"codegenprepare"
, "NumStoreExtractExposed", "Number of store(extractelement) exposed"
, {0}, false}
;
86
87static cl::opt<bool> DisableBranchOpts(
88 "disable-cgp-branch-opts", cl::Hidden, cl::init(false),
89 cl::desc("Disable branch optimizations in CodeGenPrepare"));
90
91static cl::opt<bool>
92 DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false),
93 cl::desc("Disable GC optimizations in CodeGenPrepare"));
94
95static cl::opt<bool> DisableSelectToBranch(
96 "disable-cgp-select2branch", cl::Hidden, cl::init(false),
97 cl::desc("Disable select to branch conversion."));
98
99static cl::opt<bool> AddrSinkUsingGEPs(
100 "addr-sink-using-gep", cl::Hidden, cl::init(true),
101 cl::desc("Address sinking in CGP using GEPs."));
102
103static cl::opt<bool> EnableAndCmpSinking(
104 "enable-andcmp-sinking", cl::Hidden, cl::init(true),
105 cl::desc("Enable sinkinig and/cmp into branches."));
106
107static cl::opt<bool> DisableStoreExtract(
108 "disable-cgp-store-extract", cl::Hidden, cl::init(false),
109 cl::desc("Disable store(extract) optimizations in CodeGenPrepare"));
110
111static cl::opt<bool> StressStoreExtract(
112 "stress-cgp-store-extract", cl::Hidden, cl::init(false),
113 cl::desc("Stress test store(extract) optimizations in CodeGenPrepare"));
114
115static cl::opt<bool> DisableExtLdPromotion(
116 "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
117 cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in "
118 "CodeGenPrepare"));
119
120static cl::opt<bool> StressExtLdPromotion(
121 "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
122 cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) "
123 "optimization in CodeGenPrepare"));
124
125static cl::opt<bool> DisablePreheaderProtect(
126 "disable-preheader-prot", cl::Hidden, cl::init(false),
127 cl::desc("Disable protection against removing loop preheaders"));
128
129static cl::opt<bool> ProfileGuidedSectionPrefix(
130 "profile-guided-section-prefix", cl::Hidden, cl::init(true),
131 cl::desc("Use profile info to add section prefix for hot/cold functions"));
132
133static cl::opt<unsigned> FreqRatioToSkipMerge(
134 "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2),
135 cl::desc("Skip merging empty blocks if (frequency of empty block) / "
136 "(frequency of destination block) is greater than this ratio"));
137
138static cl::opt<bool> ForceSplitStore(
139 "force-split-store", cl::Hidden, cl::init(false),
140 cl::desc("Force store splitting no matter what the target query says."));
141
142static cl::opt<bool>
143EnableTypePromotionMerge("cgp-type-promotion-merge", cl::Hidden,
144 cl::desc("Enable merging of redundant sexts when one is dominating"
145 " the other."), cl::init(true));
146
147namespace {
148typedef SmallPtrSet<Instruction *, 16> SetOfInstrs;
149typedef PointerIntPair<Type *, 1, bool> TypeIsSExt;
150typedef DenseMap<Instruction *, TypeIsSExt> InstrToOrigTy;
151typedef SmallVector<Instruction *, 16> SExts;
152typedef DenseMap<Value *, SExts> ValueToSExts;
153class TypePromotionTransaction;
154
155 class CodeGenPrepare : public FunctionPass {
156 const TargetMachine *TM;
157 const TargetSubtargetInfo *SubtargetInfo;
158 const TargetLowering *TLI;
159 const TargetRegisterInfo *TRI;
160 const TargetTransformInfo *TTI;
161 const TargetLibraryInfo *TLInfo;
162 const LoopInfo *LI;
163 std::unique_ptr<BlockFrequencyInfo> BFI;
164 std::unique_ptr<BranchProbabilityInfo> BPI;
165
166 /// As we scan instructions optimizing them, this is the next instruction
167 /// to optimize. Transforms that can invalidate this should update it.
168 BasicBlock::iterator CurInstIterator;
169
170 /// Keeps track of non-local addresses that have been sunk into a block.
171 /// This allows us to avoid inserting duplicate code for blocks with
172 /// multiple load/stores of the same address.
173 ValueMap<Value*, Value*> SunkAddrs;
174
175 /// Keeps track of all instructions inserted for the current function.
176 SetOfInstrs InsertedInsts;
177 /// Keeps track of the type of the related instruction before their
178 /// promotion for the current function.
179 InstrToOrigTy PromotedInsts;
180
181 /// Keep track of instructions removed during promotion.
182 SetOfInstrs RemovedInsts;
183
184 /// Keep track of sext chains based on their initial value.
185 DenseMap<Value *, Instruction *> SeenChainsForSExt;
186
187 /// Keep track of SExt promoted.
188 ValueToSExts ValToSExtendedUses;
189
190 /// True if CFG is modified in any way.
191 bool ModifiedDT;
192
193 /// True if optimizing for size.
194 bool OptSize;
195
196 /// DataLayout for the Function being processed.
197 const DataLayout *DL;
198
199 public:
200 static char ID; // Pass identification, replacement for typeid
201 CodeGenPrepare()
202 : FunctionPass(ID), TM(nullptr), TLI(nullptr), TTI(nullptr),
203 DL(nullptr) {
204 initializeCodeGenPreparePass(*PassRegistry::getPassRegistry());
205 }
206 bool runOnFunction(Function &F) override;
207
208 StringRef getPassName() const override { return "CodeGen Prepare"; }
209
210 void getAnalysisUsage(AnalysisUsage &AU) const override {
211 // FIXME: When we can selectively preserve passes, preserve the domtree.
212 AU.addRequired<ProfileSummaryInfoWrapperPass>();
213 AU.addRequired<TargetLibraryInfoWrapperPass>();
214 AU.addRequired<TargetTransformInfoWrapperPass>();
215 AU.addRequired<LoopInfoWrapperPass>();
216 }
217
218 private:
219 bool eliminateFallThrough(Function &F);
220 bool eliminateMostlyEmptyBlocks(Function &F);
221 BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB);
222 bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const;
223 void eliminateMostlyEmptyBlock(BasicBlock *BB);
224 bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB,
225 bool isPreheader);
226 bool optimizeBlock(BasicBlock &BB, bool& ModifiedDT);
227 bool optimizeInst(Instruction *I, bool& ModifiedDT);
228 bool optimizeMemoryInst(Instruction *I, Value *Addr,
229 Type *AccessTy, unsigned AS);
230 bool optimizeInlineAsmInst(CallInst *CS);
231 bool optimizeCallInst(CallInst *CI, bool& ModifiedDT);
232 bool optimizeExt(Instruction *&I);
233 bool optimizeExtUses(Instruction *I);
234 bool optimizeLoadExt(LoadInst *I);
235 bool optimizeSelectInst(SelectInst *SI);
236 bool optimizeShuffleVectorInst(ShuffleVectorInst *SI);
237 bool optimizeSwitchInst(SwitchInst *CI);
238 bool optimizeExtractElementInst(Instruction *Inst);
239 bool dupRetToEnableTailCallOpts(BasicBlock *BB);
240 bool placeDbgValues(Function &F);
241 bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts,
242 LoadInst *&LI, Instruction *&Inst, bool HasPromoted);
243 bool tryToPromoteExts(TypePromotionTransaction &TPT,
244 const SmallVectorImpl<Instruction *> &Exts,
245 SmallVectorImpl<Instruction *> &ProfitablyMovedExts,
246 unsigned CreatedInstsCost = 0);
247 bool mergeSExts(Function &F);
248 bool performAddressTypePromotion(
249 Instruction *&Inst,
250 bool AllowPromotionWithoutCommonHeader,
251 bool HasPromoted, TypePromotionTransaction &TPT,
252 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts);
253 bool splitBranchCondition(Function &F);
254 bool simplifyOffsetableRelocate(Instruction &I);
255 bool splitIndirectCriticalEdges(Function &F);
256 };
257}
258
259char CodeGenPrepare::ID = 0;
260INITIALIZE_PASS_BEGIN(CodeGenPrepare, "codegenprepare",static void *initializeCodeGenPreparePassOnce(PassRegistry &
Registry) {
261 "Optimize for code generation", false, false)static void *initializeCodeGenPreparePassOnce(PassRegistry &
Registry) {
262INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)initializeProfileSummaryInfoWrapperPassPass(Registry);
263INITIALIZE_PASS_END(CodeGenPrepare, "codegenprepare",PassInfo *PI = new PassInfo( "Optimize for code generation", "codegenprepare"
, &CodeGenPrepare::ID, PassInfo::NormalCtor_t(callDefaultCtor
<CodeGenPrepare>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeCodeGenPreparePassFlag
; void llvm::initializeCodeGenPreparePass(PassRegistry &Registry
) { llvm::call_once(InitializeCodeGenPreparePassFlag, initializeCodeGenPreparePassOnce
, std::ref(Registry)); }
264 "Optimize for code generation", false, false)PassInfo *PI = new PassInfo( "Optimize for code generation", "codegenprepare"
, &CodeGenPrepare::ID, PassInfo::NormalCtor_t(callDefaultCtor
<CodeGenPrepare>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeCodeGenPreparePassFlag
; void llvm::initializeCodeGenPreparePass(PassRegistry &Registry
) { llvm::call_once(InitializeCodeGenPreparePassFlag, initializeCodeGenPreparePassOnce
, std::ref(Registry)); }
265
266FunctionPass *llvm::createCodeGenPreparePass() { return new CodeGenPrepare(); }
267
268bool CodeGenPrepare::runOnFunction(Function &F) {
269 if (skipFunction(F))
270 return false;
271
272 DL = &F.getParent()->getDataLayout();
273
274 bool EverMadeChange = false;
275 // Clear per function information.
276 InsertedInsts.clear();
277 PromotedInsts.clear();
278 BFI.reset();
279 BPI.reset();
280
281 ModifiedDT = false;
282 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) {
283 TM = &TPC->getTM<TargetMachine>();
284 SubtargetInfo = TM->getSubtargetImpl(F);
285 TLI = SubtargetInfo->getTargetLowering();
286 TRI = SubtargetInfo->getRegisterInfo();
287 }
288 TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
289 TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
290 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
291 OptSize = F.optForSize();
292
293 if (ProfileGuidedSectionPrefix) {
294 ProfileSummaryInfo *PSI =
295 getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
296 if (PSI->isFunctionHotInCallGraph(&F))
297 F.setSectionPrefix(".hot");
298 else if (PSI->isFunctionColdInCallGraph(&F))
299 F.setSectionPrefix(".unlikely");
300 }
301
302 /// This optimization identifies DIV instructions that can be
303 /// profitably bypassed and carried out with a shorter, faster divide.
304 if (!OptSize && TLI && TLI->isSlowDivBypassed()) {
305 const DenseMap<unsigned int, unsigned int> &BypassWidths =
306 TLI->getBypassSlowDivWidths();
307 BasicBlock* BB = &*F.begin();
308 while (BB != nullptr) {
309 // bypassSlowDivision may create new BBs, but we don't want to reapply the
310 // optimization to those blocks.
311 BasicBlock* Next = BB->getNextNode();
312 EverMadeChange |= bypassSlowDivision(BB, BypassWidths);
313 BB = Next;
314 }
315 }
316
317 // Eliminate blocks that contain only PHI nodes and an
318 // unconditional branch.
319 EverMadeChange |= eliminateMostlyEmptyBlocks(F);
320
321 // llvm.dbg.value is far away from the value then iSel may not be able
322 // handle it properly. iSel will drop llvm.dbg.value if it can not
323 // find a node corresponding to the value.
324 EverMadeChange |= placeDbgValues(F);
325
326 if (!DisableBranchOpts)
327 EverMadeChange |= splitBranchCondition(F);
328
329 // Split some critical edges where one of the sources is an indirect branch,
330 // to help generate sane code for PHIs involving such edges.
331 EverMadeChange |= splitIndirectCriticalEdges(F);
332
333 bool MadeChange = true;
334 while (MadeChange) {
335 MadeChange = false;
336 SeenChainsForSExt.clear();
337 ValToSExtendedUses.clear();
338 RemovedInsts.clear();
339 for (Function::iterator I = F.begin(); I != F.end(); ) {
340 BasicBlock *BB = &*I++;
341 bool ModifiedDTOnIteration = false;
342 MadeChange |= optimizeBlock(*BB, ModifiedDTOnIteration);
343
344 // Restart BB iteration if the dominator tree of the Function was changed
345 if (ModifiedDTOnIteration)
346 break;
347 }
348 if (EnableTypePromotionMerge && !ValToSExtendedUses.empty())
349 MadeChange |= mergeSExts(F);
350
351 // Really free removed instructions during promotion.
352 for (Instruction *I : RemovedInsts)
353 I->deleteValue();
354
355 EverMadeChange |= MadeChange;
356 }
357
358 SunkAddrs.clear();
359
360 if (!DisableBranchOpts) {
361 MadeChange = false;
362 SmallPtrSet<BasicBlock*, 8> WorkList;
363 for (BasicBlock &BB : F) {
364 SmallVector<BasicBlock *, 2> Successors(succ_begin(&BB), succ_end(&BB));
365 MadeChange |= ConstantFoldTerminator(&BB, true);
366 if (!MadeChange) continue;
367
368 for (SmallVectorImpl<BasicBlock*>::iterator
369 II = Successors.begin(), IE = Successors.end(); II != IE; ++II)
370 if (pred_begin(*II) == pred_end(*II))
371 WorkList.insert(*II);
372 }
373
374 // Delete the dead blocks and any of their dead successors.
375 MadeChange |= !WorkList.empty();
376 while (!WorkList.empty()) {
377 BasicBlock *BB = *WorkList.begin();
378 WorkList.erase(BB);
379 SmallVector<BasicBlock*, 2> Successors(succ_begin(BB), succ_end(BB));
380
381 DeleteDeadBlock(BB);
382
383 for (SmallVectorImpl<BasicBlock*>::iterator
384 II = Successors.begin(), IE = Successors.end(); II != IE; ++II)
385 if (pred_begin(*II) == pred_end(*II))
386 WorkList.insert(*II);
387 }
388
389 // Merge pairs of basic blocks with unconditional branches, connected by
390 // a single edge.
391 if (EverMadeChange || MadeChange)
392 MadeChange |= eliminateFallThrough(F);
393
394 EverMadeChange |= MadeChange;
395 }
396
397 if (!DisableGCOpts) {
398 SmallVector<Instruction *, 2> Statepoints;
399 for (BasicBlock &BB : F)
400 for (Instruction &I : BB)
401 if (isStatepoint(I))
402 Statepoints.push_back(&I);
403 for (auto &I : Statepoints)
404 EverMadeChange |= simplifyOffsetableRelocate(*I);
405 }
406
407 return EverMadeChange;
408}
409
410/// Merge basic blocks which are connected by a single edge, where one of the
411/// basic blocks has a single successor pointing to the other basic block,
412/// which has a single predecessor.
413bool CodeGenPrepare::eliminateFallThrough(Function &F) {
414 bool Changed = false;
415 // Scan all of the blocks in the function, except for the entry block.
416 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) {
417 BasicBlock *BB = &*I++;
418 // If the destination block has a single pred, then this is a trivial
419 // edge, just collapse it.
420 BasicBlock *SinglePred = BB->getSinglePredecessor();
421
422 // Don't merge if BB's address is taken.
423 if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue;
424
425 BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator());
426 if (Term && !Term->isConditional()) {
427 Changed = true;
428 DEBUG(dbgs() << "To merge:\n"<< *SinglePred << "\n\n\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "To merge:\n"<< *
SinglePred << "\n\n\n"; } } while (false)
;
429 // Remember if SinglePred was the entry block of the function.
430 // If so, we will need to move BB back to the entry position.
431 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock();
432 MergeBasicBlockIntoOnlyPred(BB, nullptr);
433
434 if (isEntry && BB != &BB->getParent()->getEntryBlock())
435 BB->moveBefore(&BB->getParent()->getEntryBlock());
436
437 // We have erased a block. Update the iterator.
438 I = BB->getIterator();
439 }
440 }
441 return Changed;
442}
443
444/// Find a destination block from BB if BB is mergeable empty block.
445BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) {
446 // If this block doesn't end with an uncond branch, ignore it.
447 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
448 if (!BI || !BI->isUnconditional())
449 return nullptr;
450
451 // If the instruction before the branch (skipping debug info) isn't a phi
452 // node, then other stuff is happening here.
453 BasicBlock::iterator BBI = BI->getIterator();
454 if (BBI != BB->begin()) {
455 --BBI;
456 while (isa<DbgInfoIntrinsic>(BBI)) {
457 if (BBI == BB->begin())
458 break;
459 --BBI;
460 }
461 if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI))
462 return nullptr;
463 }
464
465 // Do not break infinite loops.
466 BasicBlock *DestBB = BI->getSuccessor(0);
467 if (DestBB == BB)
468 return nullptr;
469
470 if (!canMergeBlocks(BB, DestBB))
471 DestBB = nullptr;
472
473 return DestBB;
474}
475
476// Return the unique indirectbr predecessor of a block. This may return null
477// even if such a predecessor exists, if it's not useful for splitting.
478// If a predecessor is found, OtherPreds will contain all other (non-indirectbr)
479// predecessors of BB.
480static BasicBlock *
481findIBRPredecessor(BasicBlock *BB, SmallVectorImpl<BasicBlock *> &OtherPreds) {
482 // If the block doesn't have any PHIs, we don't care about it, since there's
483 // no point in splitting it.
484 PHINode *PN = dyn_cast<PHINode>(BB->begin());
485 if (!PN)
486 return nullptr;
487
488 // Verify we have exactly one IBR predecessor.
489 // Conservatively bail out if one of the other predecessors is not a "regular"
490 // terminator (that is, not a switch or a br).
491 BasicBlock *IBB = nullptr;
492 for (unsigned Pred = 0, E = PN->getNumIncomingValues(); Pred != E; ++Pred) {
493 BasicBlock *PredBB = PN->getIncomingBlock(Pred);
494 TerminatorInst *PredTerm = PredBB->getTerminator();
495 switch (PredTerm->getOpcode()) {
496 case Instruction::IndirectBr:
497 if (IBB)
498 return nullptr;
499 IBB = PredBB;
500 break;
501 case Instruction::Br:
502 case Instruction::Switch:
503 OtherPreds.push_back(PredBB);
504 continue;
505 default:
506 return nullptr;
507 }
508 }
509
510 return IBB;
511}
512
513// Split critical edges where the source of the edge is an indirectbr
514// instruction. This isn't always possible, but we can handle some easy cases.
515// This is useful because MI is unable to split such critical edges,
516// which means it will not be able to sink instructions along those edges.
517// This is especially painful for indirect branches with many successors, where
518// we end up having to prepare all outgoing values in the origin block.
519//
520// Our normal algorithm for splitting critical edges requires us to update
521// the outgoing edges of the edge origin block, but for an indirectbr this
522// is hard, since it would require finding and updating the block addresses
523// the indirect branch uses. But if a block only has a single indirectbr
524// predecessor, with the others being regular branches, we can do it in a
525// different way.
526// Say we have A -> D, B -> D, I -> D where only I -> D is an indirectbr.
527// We can split D into D0 and D1, where D0 contains only the PHIs from D,
528// and D1 is the D block body. We can then duplicate D0 as D0A and D0B, and
529// create the following structure:
530// A -> D0A, B -> D0A, I -> D0B, D0A -> D1, D0B -> D1
531bool CodeGenPrepare::splitIndirectCriticalEdges(Function &F) {
532 // Check whether the function has any indirectbrs, and collect which blocks
533 // they may jump to. Since most functions don't have indirect branches,
534 // this lowers the common case's overhead to O(Blocks) instead of O(Edges).
535 SmallSetVector<BasicBlock *, 16> Targets;
536 for (auto &BB : F) {
537 auto *IBI = dyn_cast<IndirectBrInst>(BB.getTerminator());
538 if (!IBI)
539 continue;
540
541 for (unsigned Succ = 0, E = IBI->getNumSuccessors(); Succ != E; ++Succ)
542 Targets.insert(IBI->getSuccessor(Succ));
543 }
544
545 if (Targets.empty())
546 return false;
547
548 bool Changed = false;
549 for (BasicBlock *Target : Targets) {
550 SmallVector<BasicBlock *, 16> OtherPreds;
551 BasicBlock *IBRPred = findIBRPredecessor(Target, OtherPreds);
552 // If we did not found an indirectbr, or the indirectbr is the only
553 // incoming edge, this isn't the kind of edge we're looking for.
554 if (!IBRPred || OtherPreds.empty())
555 continue;
556
557 // Don't even think about ehpads/landingpads.
558 Instruction *FirstNonPHI = Target->getFirstNonPHI();
559 if (FirstNonPHI->isEHPad() || Target->isLandingPad())
560 continue;
561
562 BasicBlock *BodyBlock = Target->splitBasicBlock(FirstNonPHI, ".split");
563 // It's possible Target was its own successor through an indirectbr.
564 // In this case, the indirectbr now comes from BodyBlock.
565 if (IBRPred == Target)
566 IBRPred = BodyBlock;
567
568 // At this point Target only has PHIs, and BodyBlock has the rest of the
569 // block's body. Create a copy of Target that will be used by the "direct"
570 // preds.
571 ValueToValueMapTy VMap;
572 BasicBlock *DirectSucc = CloneBasicBlock(Target, VMap, ".clone", &F);
573
574 for (BasicBlock *Pred : OtherPreds) {
575 // If the target is a loop to itself, then the terminator of the split
576 // block needs to be updated.
577 if (Pred == Target)
578 BodyBlock->getTerminator()->replaceUsesOfWith(Target, DirectSucc);
579 else
580 Pred->getTerminator()->replaceUsesOfWith(Target, DirectSucc);
581 }
582
583 // Ok, now fix up the PHIs. We know the two blocks only have PHIs, and that
584 // they are clones, so the number of PHIs are the same.
585 // (a) Remove the edge coming from IBRPred from the "Direct" PHI
586 // (b) Leave that as the only edge in the "Indirect" PHI.
587 // (c) Merge the two in the body block.
588 BasicBlock::iterator Indirect = Target->begin(),
589 End = Target->getFirstNonPHI()->getIterator();
590 BasicBlock::iterator Direct = DirectSucc->begin();
591 BasicBlock::iterator MergeInsert = BodyBlock->getFirstInsertionPt();
592
593 assert(&*End == Target->getTerminator() &&((&*End == Target->getTerminator() && "Block was expected to only contain PHIs"
) ? static_cast<void> (0) : __assert_fail ("&*End == Target->getTerminator() && \"Block was expected to only contain PHIs\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 594, __PRETTY_FUNCTION__))
594 "Block was expected to only contain PHIs")((&*End == Target->getTerminator() && "Block was expected to only contain PHIs"
) ? static_cast<void> (0) : __assert_fail ("&*End == Target->getTerminator() && \"Block was expected to only contain PHIs\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 594, __PRETTY_FUNCTION__))
;
595
596 while (Indirect != End) {
597 PHINode *DirPHI = cast<PHINode>(Direct);
598 PHINode *IndPHI = cast<PHINode>(Indirect);
599
600 // Now, clean up - the direct block shouldn't get the indirect value,
601 // and vice versa.
602 DirPHI->removeIncomingValue(IBRPred);
603 Direct++;
604
605 // Advance the pointer here, to avoid invalidation issues when the old
606 // PHI is erased.
607 Indirect++;
608
609 PHINode *NewIndPHI = PHINode::Create(IndPHI->getType(), 1, "ind", IndPHI);
610 NewIndPHI->addIncoming(IndPHI->getIncomingValueForBlock(IBRPred),
611 IBRPred);
612
613 // Create a PHI in the body block, to merge the direct and indirect
614 // predecessors.
615 PHINode *MergePHI =
616 PHINode::Create(IndPHI->getType(), 2, "merge", &*MergeInsert);
617 MergePHI->addIncoming(NewIndPHI, Target);
618 MergePHI->addIncoming(DirPHI, DirectSucc);
619
620 IndPHI->replaceAllUsesWith(MergePHI);
621 IndPHI->eraseFromParent();
622 }
623
624 Changed = true;
625 }
626
627 return Changed;
628}
629
630/// Eliminate blocks that contain only PHI nodes, debug info directives, and an
631/// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split
632/// edges in ways that are non-optimal for isel. Start by eliminating these
633/// blocks so we can split them the way we want them.
634bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) {
635 SmallPtrSet<BasicBlock *, 16> Preheaders;
636 SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end());
637 while (!LoopList.empty()) {
638 Loop *L = LoopList.pop_back_val();
639 LoopList.insert(LoopList.end(), L->begin(), L->end());
640 if (BasicBlock *Preheader = L->getLoopPreheader())
641 Preheaders.insert(Preheader);
642 }
643
644 bool MadeChange = false;
645 // Note that this intentionally skips the entry block.
646 for (Function::iterator I = std::next(F.begin()), E = F.end(); I != E;) {
647 BasicBlock *BB = &*I++;
648 BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB);
649 if (!DestBB ||
650 !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB)))
651 continue;
652
653 eliminateMostlyEmptyBlock(BB);
654 MadeChange = true;
655 }
656 return MadeChange;
657}
658
659bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB,
660 BasicBlock *DestBB,
661 bool isPreheader) {
662 // Do not delete loop preheaders if doing so would create a critical edge.
663 // Loop preheaders can be good locations to spill registers. If the
664 // preheader is deleted and we create a critical edge, registers may be
665 // spilled in the loop body instead.
666 if (!DisablePreheaderProtect && isPreheader &&
667 !(BB->getSinglePredecessor() &&
668 BB->getSinglePredecessor()->getSingleSuccessor()))
669 return false;
670
671 // Try to skip merging if the unique predecessor of BB is terminated by a
672 // switch or indirect branch instruction, and BB is used as an incoming block
673 // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to
674 // add COPY instructions in the predecessor of BB instead of BB (if it is not
675 // merged). Note that the critical edge created by merging such blocks wont be
676 // split in MachineSink because the jump table is not analyzable. By keeping
677 // such empty block (BB), ISel will place COPY instructions in BB, not in the
678 // predecessor of BB.
679 BasicBlock *Pred = BB->getUniquePredecessor();
680 if (!Pred ||
681 !(isa<SwitchInst>(Pred->getTerminator()) ||
682 isa<IndirectBrInst>(Pred->getTerminator())))
683 return true;
684
685 if (BB->getTerminator() != BB->getFirstNonPHI())
686 return true;
687
688 // We use a simple cost heuristic which determine skipping merging is
689 // profitable if the cost of skipping merging is less than the cost of
690 // merging : Cost(skipping merging) < Cost(merging BB), where the
691 // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and
692 // the Cost(merging BB) is Freq(Pred) * Cost(Copy).
693 // Assuming Cost(Copy) == Cost(Branch), we could simplify it to :
694 // Freq(Pred) / Freq(BB) > 2.
695 // Note that if there are multiple empty blocks sharing the same incoming
696 // value for the PHIs in the DestBB, we consider them together. In such
697 // case, Cost(merging BB) will be the sum of their frequencies.
698
699 if (!isa<PHINode>(DestBB->begin()))
700 return true;
701
702 SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs;
703
704 // Find all other incoming blocks from which incoming values of all PHIs in
705 // DestBB are the same as the ones from BB.
706 for (pred_iterator PI = pred_begin(DestBB), E = pred_end(DestBB); PI != E;
707 ++PI) {
708 BasicBlock *DestBBPred = *PI;
709 if (DestBBPred == BB)
710 continue;
711
712 bool HasAllSameValue = true;
713 BasicBlock::const_iterator DestBBI = DestBB->begin();
714 while (const PHINode *DestPN = dyn_cast<PHINode>(DestBBI++)) {
715 if (DestPN->getIncomingValueForBlock(BB) !=
716 DestPN->getIncomingValueForBlock(DestBBPred)) {
717 HasAllSameValue = false;
718 break;
719 }
720 }
721 if (HasAllSameValue)
722 SameIncomingValueBBs.insert(DestBBPred);
723 }
724
725 // See if all BB's incoming values are same as the value from Pred. In this
726 // case, no reason to skip merging because COPYs are expected to be place in
727 // Pred already.
728 if (SameIncomingValueBBs.count(Pred))
729 return true;
730
731 if (!BFI) {
732 Function &F = *BB->getParent();
733 LoopInfo LI{DominatorTree(F)};
734 BPI.reset(new BranchProbabilityInfo(F, LI));
735 BFI.reset(new BlockFrequencyInfo(F, *BPI, LI));
736 }
737
738 BlockFrequency PredFreq = BFI->getBlockFreq(Pred);
739 BlockFrequency BBFreq = BFI->getBlockFreq(BB);
740
741 for (auto SameValueBB : SameIncomingValueBBs)
742 if (SameValueBB->getUniquePredecessor() == Pred &&
743 DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB))
744 BBFreq += BFI->getBlockFreq(SameValueBB);
745
746 return PredFreq.getFrequency() <=
747 BBFreq.getFrequency() * FreqRatioToSkipMerge;
748}
749
750/// Return true if we can merge BB into DestBB if there is a single
751/// unconditional branch between them, and BB contains no other non-phi
752/// instructions.
753bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB,
754 const BasicBlock *DestBB) const {
755 // We only want to eliminate blocks whose phi nodes are used by phi nodes in
756 // the successor. If there are more complex condition (e.g. preheaders),
757 // don't mess around with them.
758 BasicBlock::const_iterator BBI = BB->begin();
759 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
760 for (const User *U : PN->users()) {
761 const Instruction *UI = cast<Instruction>(U);
762 if (UI->getParent() != DestBB || !isa<PHINode>(UI))
763 return false;
764 // If User is inside DestBB block and it is a PHINode then check
765 // incoming value. If incoming value is not from BB then this is
766 // a complex condition (e.g. preheaders) we want to avoid here.
767 if (UI->getParent() == DestBB) {
768 if (const PHINode *UPN = dyn_cast<PHINode>(UI))
769 for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) {
770 Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I));
771 if (Insn && Insn->getParent() == BB &&
772 Insn->getParent() != UPN->getIncomingBlock(I))
773 return false;
774 }
775 }
776 }
777 }
778
779 // If BB and DestBB contain any common predecessors, then the phi nodes in BB
780 // and DestBB may have conflicting incoming values for the block. If so, we
781 // can't merge the block.
782 const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin());
783 if (!DestBBPN) return true; // no conflict.
784
785 // Collect the preds of BB.
786 SmallPtrSet<const BasicBlock*, 16> BBPreds;
787 if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
788 // It is faster to get preds from a PHI than with pred_iterator.
789 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
790 BBPreds.insert(BBPN->getIncomingBlock(i));
791 } else {
792 BBPreds.insert(pred_begin(BB), pred_end(BB));
793 }
794
795 // Walk the preds of DestBB.
796 for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) {
797 BasicBlock *Pred = DestBBPN->getIncomingBlock(i);
798 if (BBPreds.count(Pred)) { // Common predecessor?
799 BBI = DestBB->begin();
800 while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
801 const Value *V1 = PN->getIncomingValueForBlock(Pred);
802 const Value *V2 = PN->getIncomingValueForBlock(BB);
803
804 // If V2 is a phi node in BB, look up what the mapped value will be.
805 if (const PHINode *V2PN = dyn_cast<PHINode>(V2))
806 if (V2PN->getParent() == BB)
807 V2 = V2PN->getIncomingValueForBlock(Pred);
808
809 // If there is a conflict, bail out.
810 if (V1 != V2) return false;
811 }
812 }
813 }
814
815 return true;
816}
817
818
819/// Eliminate a basic block that has only phi's and an unconditional branch in
820/// it.
821void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) {
822 BranchInst *BI = cast<BranchInst>(BB->getTerminator());
823 BasicBlock *DestBB = BI->getSuccessor(0);
824
825 DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n" << *BB << *DestBB)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n"
<< *BB << *DestBB; } } while (false)
;
826
827 // If the destination block has a single pred, then this is a trivial edge,
828 // just collapse it.
829 if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) {
830 if (SinglePred != DestBB) {
831 // Remember if SinglePred was the entry block of the function. If so, we
832 // will need to move BB back to the entry position.
833 bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock();
834 MergeBasicBlockIntoOnlyPred(DestBB, nullptr);
835
836 if (isEntry && BB != &BB->getParent()->getEntryBlock())
837 BB->moveBefore(&BB->getParent()->getEntryBlock());
838
839 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "AFTER:\n" << *DestBB
<< "\n\n\n"; } } while (false)
;
840 return;
841 }
842 }
843
844 // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB
845 // to handle the new incoming edges it is about to have.
846 PHINode *PN;
847 for (BasicBlock::iterator BBI = DestBB->begin();
848 (PN = dyn_cast<PHINode>(BBI)); ++BBI) {
849 // Remove the incoming value for BB, and remember it.
850 Value *InVal = PN->removeIncomingValue(BB, false);
851
852 // Two options: either the InVal is a phi node defined in BB or it is some
853 // value that dominates BB.
854 PHINode *InValPhi = dyn_cast<PHINode>(InVal);
855 if (InValPhi && InValPhi->getParent() == BB) {
856 // Add all of the input values of the input PHI as inputs of this phi.
857 for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i)
858 PN->addIncoming(InValPhi->getIncomingValue(i),
859 InValPhi->getIncomingBlock(i));
860 } else {
861 // Otherwise, add one instance of the dominating value for each edge that
862 // we will be adding.
863 if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
864 for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
865 PN->addIncoming(InVal, BBPN->getIncomingBlock(i));
866 } else {
867 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
868 PN->addIncoming(InVal, *PI);
869 }
870 }
871 }
872
873 // The PHIs are now updated, change everything that refers to BB to use
874 // DestBB and remove BB.
875 BB->replaceAllUsesWith(DestBB);
876 BB->eraseFromParent();
877 ++NumBlocksElim;
878
879 DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "AFTER:\n" << *DestBB
<< "\n\n\n"; } } while (false)
;
880}
881
882// Computes a map of base pointer relocation instructions to corresponding
883// derived pointer relocation instructions given a vector of all relocate calls
884static void computeBaseDerivedRelocateMap(
885 const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls,
886 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>>
887 &RelocateInstMap) {
888 // Collect information in two maps: one primarily for locating the base object
889 // while filling the second map; the second map is the final structure holding
890 // a mapping between Base and corresponding Derived relocate calls
891 DenseMap<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap;
892 for (auto *ThisRelocate : AllRelocateCalls) {
893 auto K = std::make_pair(ThisRelocate->getBasePtrIndex(),
894 ThisRelocate->getDerivedPtrIndex());
895 RelocateIdxMap.insert(std::make_pair(K, ThisRelocate));
896 }
897 for (auto &Item : RelocateIdxMap) {
898 std::pair<unsigned, unsigned> Key = Item.first;
899 if (Key.first == Key.second)
900 // Base relocation: nothing to insert
901 continue;
902
903 GCRelocateInst *I = Item.second;
904 auto BaseKey = std::make_pair(Key.first, Key.first);
905
906 // We're iterating over RelocateIdxMap so we cannot modify it.
907 auto MaybeBase = RelocateIdxMap.find(BaseKey);
908 if (MaybeBase == RelocateIdxMap.end())
909 // TODO: We might want to insert a new base object relocate and gep off
910 // that, if there are enough derived object relocates.
911 continue;
912
913 RelocateInstMap[MaybeBase->second].push_back(I);
914 }
915}
916
917// Accepts a GEP and extracts the operands into a vector provided they're all
918// small integer constants
919static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP,
920 SmallVectorImpl<Value *> &OffsetV) {
921 for (unsigned i = 1; i < GEP->getNumOperands(); i++) {
922 // Only accept small constant integer operands
923 auto Op = dyn_cast<ConstantInt>(GEP->getOperand(i));
924 if (!Op || Op->getZExtValue() > 20)
925 return false;
926 }
927
928 for (unsigned i = 1; i < GEP->getNumOperands(); i++)
929 OffsetV.push_back(GEP->getOperand(i));
930 return true;
931}
932
933// Takes a RelocatedBase (base pointer relocation instruction) and Targets to
934// replace, computes a replacement, and affects it.
935static bool
936simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase,
937 const SmallVectorImpl<GCRelocateInst *> &Targets) {
938 bool MadeChange = false;
939 for (GCRelocateInst *ToReplace : Targets) {
940 assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() &&((ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex
() && "Not relocating a derived object of the original base object"
) ? static_cast<void> (0) : __assert_fail ("ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() && \"Not relocating a derived object of the original base object\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 941, __PRETTY_FUNCTION__))
941 "Not relocating a derived object of the original base object")((ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex
() && "Not relocating a derived object of the original base object"
) ? static_cast<void> (0) : __assert_fail ("ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() && \"Not relocating a derived object of the original base object\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 941, __PRETTY_FUNCTION__))
;
942 if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) {
943 // A duplicate relocate call. TODO: coalesce duplicates.
944 continue;
945 }
946
947 if (RelocatedBase->getParent() != ToReplace->getParent()) {
948 // Base and derived relocates are in different basic blocks.
949 // In this case transform is only valid when base dominates derived
950 // relocate. However it would be too expensive to check dominance
951 // for each such relocate, so we skip the whole transformation.
952 continue;
953 }
954
955 Value *Base = ToReplace->getBasePtr();
956 auto Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr());
957 if (!Derived || Derived->getPointerOperand() != Base)
958 continue;
959
960 SmallVector<Value *, 2> OffsetV;
961 if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV))
962 continue;
963
964 // Create a Builder and replace the target callsite with a gep
965 assert(RelocatedBase->getNextNode() &&((RelocatedBase->getNextNode() && "Should always have one since it's not a terminator"
) ? static_cast<void> (0) : __assert_fail ("RelocatedBase->getNextNode() && \"Should always have one since it's not a terminator\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 966, __PRETTY_FUNCTION__))
966 "Should always have one since it's not a terminator")((RelocatedBase->getNextNode() && "Should always have one since it's not a terminator"
) ? static_cast<void> (0) : __assert_fail ("RelocatedBase->getNextNode() && \"Should always have one since it's not a terminator\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 966, __PRETTY_FUNCTION__))
;
967
968 // Insert after RelocatedBase
969 IRBuilder<> Builder(RelocatedBase->getNextNode());
970 Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc());
971
972 // If gc_relocate does not match the actual type, cast it to the right type.
973 // In theory, there must be a bitcast after gc_relocate if the type does not
974 // match, and we should reuse it to get the derived pointer. But it could be
975 // cases like this:
976 // bb1:
977 // ...
978 // %g1 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...)
979 // br label %merge
980 //
981 // bb2:
982 // ...
983 // %g2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...)
984 // br label %merge
985 //
986 // merge:
987 // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ]
988 // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)*
989 //
990 // In this case, we can not find the bitcast any more. So we insert a new bitcast
991 // no matter there is already one or not. In this way, we can handle all cases, and
992 // the extra bitcast should be optimized away in later passes.
993 Value *ActualRelocatedBase = RelocatedBase;
994 if (RelocatedBase->getType() != Base->getType()) {
995 ActualRelocatedBase =
996 Builder.CreateBitCast(RelocatedBase, Base->getType());
997 }
998 Value *Replacement = Builder.CreateGEP(
999 Derived->getSourceElementType(), ActualRelocatedBase, makeArrayRef(OffsetV));
1000 Replacement->takeName(ToReplace);
1001 // If the newly generated derived pointer's type does not match the original derived
1002 // pointer's type, cast the new derived pointer to match it. Same reasoning as above.
1003 Value *ActualReplacement = Replacement;
1004 if (Replacement->getType() != ToReplace->getType()) {
1005 ActualReplacement =
1006 Builder.CreateBitCast(Replacement, ToReplace->getType());
1007 }
1008 ToReplace->replaceAllUsesWith(ActualReplacement);
1009 ToReplace->eraseFromParent();
1010
1011 MadeChange = true;
1012 }
1013 return MadeChange;
1014}
1015
1016// Turns this:
1017//
1018// %base = ...
1019// %ptr = gep %base + 15
1020// %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1021// %base' = relocate(%tok, i32 4, i32 4)
1022// %ptr' = relocate(%tok, i32 4, i32 5)
1023// %val = load %ptr'
1024//
1025// into this:
1026//
1027// %base = ...
1028// %ptr = gep %base + 15
1029// %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1030// %base' = gc.relocate(%tok, i32 4, i32 4)
1031// %ptr' = gep %base' + 15
1032// %val = load %ptr'
1033bool CodeGenPrepare::simplifyOffsetableRelocate(Instruction &I) {
1034 bool MadeChange = false;
1035 SmallVector<GCRelocateInst *, 2> AllRelocateCalls;
1036
1037 for (auto *U : I.users())
1038 if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U))
1039 // Collect all the relocate calls associated with a statepoint
1040 AllRelocateCalls.push_back(Relocate);
1041
1042 // We need atleast one base pointer relocation + one derived pointer
1043 // relocation to mangle
1044 if (AllRelocateCalls.size() < 2)
1045 return false;
1046
1047 // RelocateInstMap is a mapping from the base relocate instruction to the
1048 // corresponding derived relocate instructions
1049 DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> RelocateInstMap;
1050 computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap);
1051 if (RelocateInstMap.empty())
1052 return false;
1053
1054 for (auto &Item : RelocateInstMap)
1055 // Item.first is the RelocatedBase to offset against
1056 // Item.second is the vector of Targets to replace
1057 MadeChange = simplifyRelocatesOffABase(Item.first, Item.second);
1058 return MadeChange;
1059}
1060
1061/// SinkCast - Sink the specified cast instruction into its user blocks
1062static bool SinkCast(CastInst *CI) {
1063 BasicBlock *DefBB = CI->getParent();
1064
1065 /// InsertedCasts - Only insert a cast in each block once.
1066 DenseMap<BasicBlock*, CastInst*> InsertedCasts;
1067
1068 bool MadeChange = false;
1069 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end();
1070 UI != E; ) {
1071 Use &TheUse = UI.getUse();
1072 Instruction *User = cast<Instruction>(*UI);
1073
1074 // Figure out which BB this cast is used in. For PHI's this is the
1075 // appropriate predecessor block.
1076 BasicBlock *UserBB = User->getParent();
1077 if (PHINode *PN = dyn_cast<PHINode>(User)) {
1078 UserBB = PN->getIncomingBlock(TheUse);
1079 }
1080
1081 // Preincrement use iterator so we don't invalidate it.
1082 ++UI;
1083
1084 // The first insertion point of a block containing an EH pad is after the
1085 // pad. If the pad is the user, we cannot sink the cast past the pad.
1086 if (User->isEHPad())
1087 continue;
1088
1089 // If the block selected to receive the cast is an EH pad that does not
1090 // allow non-PHI instructions before the terminator, we can't sink the
1091 // cast.
1092 if (UserBB->getTerminator()->isEHPad())
1093 continue;
1094
1095 // If this user is in the same block as the cast, don't change the cast.
1096 if (UserBB == DefBB) continue;
1097
1098 // If we have already inserted a cast into this block, use it.
1099 CastInst *&InsertedCast = InsertedCasts[UserBB];
1100
1101 if (!InsertedCast) {
1102 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1103 assert(InsertPt != UserBB->end())((InsertPt != UserBB->end()) ? static_cast<void> (0)
: __assert_fail ("InsertPt != UserBB->end()", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 1103, __PRETTY_FUNCTION__))
;
1104 InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0),
1105 CI->getType(), "", &*InsertPt);
1106 }
1107
1108 // Replace a use of the cast with a use of the new cast.
1109 TheUse = InsertedCast;
1110 MadeChange = true;
1111 ++NumCastUses;
1112 }
1113
1114 // If we removed all uses, nuke the cast.
1115 if (CI->use_empty()) {
1116 CI->eraseFromParent();
1117 MadeChange = true;
1118 }
1119
1120 return MadeChange;
1121}
1122
1123/// If the specified cast instruction is a noop copy (e.g. it's casting from
1124/// one pointer type to another, i32->i8 on PPC), sink it into user blocks to
1125/// reduce the number of virtual registers that must be created and coalesced.
1126///
1127/// Return true if any changes are made.
1128///
1129static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI,
1130 const DataLayout &DL) {
1131 // Sink only "cheap" (or nop) address-space casts. This is a weaker condition
1132 // than sinking only nop casts, but is helpful on some platforms.
1133 if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) {
1134 if (!TLI.isCheapAddrSpaceCast(ASC->getSrcAddressSpace(),
1135 ASC->getDestAddressSpace()))
1136 return false;
1137 }
1138
1139 // If this is a noop copy,
1140 EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType());
1141 EVT DstVT = TLI.getValueType(DL, CI->getType());
1142
1143 // This is an fp<->int conversion?
1144 if (SrcVT.isInteger() != DstVT.isInteger())
1145 return false;
1146
1147 // If this is an extension, it will be a zero or sign extension, which
1148 // isn't a noop.
1149 if (SrcVT.bitsLT(DstVT)) return false;
1150
1151 // If these values will be promoted, find out what they will be promoted
1152 // to. This helps us consider truncates on PPC as noop copies when they
1153 // are.
1154 if (TLI.getTypeAction(CI->getContext(), SrcVT) ==
1155 TargetLowering::TypePromoteInteger)
1156 SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT);
1157 if (TLI.getTypeAction(CI->getContext(), DstVT) ==
1158 TargetLowering::TypePromoteInteger)
1159 DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT);
1160
1161 // If, after promotion, these are the same types, this is a noop copy.
1162 if (SrcVT != DstVT)
1163 return false;
1164
1165 return SinkCast(CI);
1166}
1167
1168/// Try to combine CI into a call to the llvm.uadd.with.overflow intrinsic if
1169/// possible.
1170///
1171/// Return true if any changes were made.
1172static bool CombineUAddWithOverflow(CmpInst *CI) {
1173 Value *A, *B;
1174 Instruction *AddI;
1175 if (!match(CI,
1176 m_UAddWithOverflow(m_Value(A), m_Value(B), m_Instruction(AddI))))
1177 return false;
1178
1179 Type *Ty = AddI->getType();
1180 if (!isa<IntegerType>(Ty))
1181 return false;
1182
1183 // We don't want to move around uses of condition values this late, so we we
1184 // check if it is legal to create the call to the intrinsic in the basic
1185 // block containing the icmp:
1186
1187 if (AddI->getParent() != CI->getParent() && !AddI->hasOneUse())
1188 return false;
1189
1190#ifndef NDEBUG
1191 // Someday m_UAddWithOverflow may get smarter, but this is a safe assumption
1192 // for now:
1193 if (AddI->hasOneUse())
1194 assert(*AddI->user_begin() == CI && "expected!")((*AddI->user_begin() == CI && "expected!") ? static_cast
<void> (0) : __assert_fail ("*AddI->user_begin() == CI && \"expected!\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 1194, __PRETTY_FUNCTION__))
;
1195#endif
1196
1197 Module *M = CI->getModule();
1198 Value *F = Intrinsic::getDeclaration(M, Intrinsic::uadd_with_overflow, Ty);
1199
1200 auto *InsertPt = AddI->hasOneUse() ? CI : AddI;
1201
1202 auto *UAddWithOverflow =
1203 CallInst::Create(F, {A, B}, "uadd.overflow", InsertPt);
1204 auto *UAdd = ExtractValueInst::Create(UAddWithOverflow, 0, "uadd", InsertPt);
1205 auto *Overflow =
1206 ExtractValueInst::Create(UAddWithOverflow, 1, "overflow", InsertPt);
1207
1208 CI->replaceAllUsesWith(Overflow);
1209 AddI->replaceAllUsesWith(UAdd);
1210 CI->eraseFromParent();
1211 AddI->eraseFromParent();
1212 return true;
1213}
1214
1215/// Sink the given CmpInst into user blocks to reduce the number of virtual
1216/// registers that must be created and coalesced. This is a clear win except on
1217/// targets with multiple condition code registers (PowerPC), where it might
1218/// lose; some adjustment may be wanted there.
1219///
1220/// Return true if any changes are made.
1221static bool SinkCmpExpression(CmpInst *CI, const TargetLowering *TLI) {
1222 BasicBlock *DefBB = CI->getParent();
1223
1224 // Avoid sinking soft-FP comparisons, since this can move them into a loop.
1225 if (TLI && TLI->useSoftFloat() && isa<FCmpInst>(CI))
1226 return false;
1227
1228 // Only insert a cmp in each block once.
1229 DenseMap<BasicBlock*, CmpInst*> InsertedCmps;
1230
1231 bool MadeChange = false;
1232 for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end();
1233 UI != E; ) {
1234 Use &TheUse = UI.getUse();
1235 Instruction *User = cast<Instruction>(*UI);
1236
1237 // Preincrement use iterator so we don't invalidate it.
1238 ++UI;
1239
1240 // Don't bother for PHI nodes.
1241 if (isa<PHINode>(User))
1242 continue;
1243
1244 // Figure out which BB this cmp is used in.
1245 BasicBlock *UserBB = User->getParent();
1246
1247 // If this user is in the same block as the cmp, don't change the cmp.
1248 if (UserBB == DefBB) continue;
1249
1250 // If we have already inserted a cmp into this block, use it.
1251 CmpInst *&InsertedCmp = InsertedCmps[UserBB];
1252
1253 if (!InsertedCmp) {
1254 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1255 assert(InsertPt != UserBB->end())((InsertPt != UserBB->end()) ? static_cast<void> (0)
: __assert_fail ("InsertPt != UserBB->end()", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 1255, __PRETTY_FUNCTION__))
;
1256 InsertedCmp =
1257 CmpInst::Create(CI->getOpcode(), CI->getPredicate(),
1258 CI->getOperand(0), CI->getOperand(1), "", &*InsertPt);
1259 // Propagate the debug info.
1260 InsertedCmp->setDebugLoc(CI->getDebugLoc());
1261 }
1262
1263 // Replace a use of the cmp with a use of the new cmp.
1264 TheUse = InsertedCmp;
1265 MadeChange = true;
1266 ++NumCmpUses;
1267 }
1268
1269 // If we removed all uses, nuke the cmp.
1270 if (CI->use_empty()) {
1271 CI->eraseFromParent();
1272 MadeChange = true;
1273 }
1274
1275 return MadeChange;
1276}
1277
1278static bool OptimizeCmpExpression(CmpInst *CI, const TargetLowering *TLI) {
1279 if (SinkCmpExpression(CI, TLI))
1280 return true;
1281
1282 if (CombineUAddWithOverflow(CI))
1283 return true;
1284
1285 return false;
1286}
1287
1288/// Duplicate and sink the given 'and' instruction into user blocks where it is
1289/// used in a compare to allow isel to generate better code for targets where
1290/// this operation can be combined.
1291///
1292/// Return true if any changes are made.
1293static bool sinkAndCmp0Expression(Instruction *AndI,
1294 const TargetLowering &TLI,
1295 SetOfInstrs &InsertedInsts) {
1296 // Double-check that we're not trying to optimize an instruction that was
1297 // already optimized by some other part of this pass.
1298 assert(!InsertedInsts.count(AndI) &&((!InsertedInsts.count(AndI) && "Attempting to optimize already optimized and instruction"
) ? static_cast<void> (0) : __assert_fail ("!InsertedInsts.count(AndI) && \"Attempting to optimize already optimized and instruction\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 1299, __PRETTY_FUNCTION__))
1299 "Attempting to optimize already optimized and instruction")((!InsertedInsts.count(AndI) && "Attempting to optimize already optimized and instruction"
) ? static_cast<void> (0) : __assert_fail ("!InsertedInsts.count(AndI) && \"Attempting to optimize already optimized and instruction\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 1299, __PRETTY_FUNCTION__))
;
1300 (void) InsertedInsts;
1301
1302 // Nothing to do for single use in same basic block.
1303 if (AndI->hasOneUse() &&
1304 AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent())
1305 return false;
1306
1307 // Try to avoid cases where sinking/duplicating is likely to increase register
1308 // pressure.
1309 if (!isa<ConstantInt>(AndI->getOperand(0)) &&
1310 !isa<ConstantInt>(AndI->getOperand(1)) &&
1311 AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse())
1312 return false;
1313
1314 for (auto *U : AndI->users()) {
1315 Instruction *User = cast<Instruction>(U);
1316
1317 // Only sink for and mask feeding icmp with 0.
1318 if (!isa<ICmpInst>(User))
1319 return false;
1320
1321 auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1));
1322 if (!CmpC || !CmpC->isZero())
1323 return false;
1324 }
1325
1326 if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI))
1327 return false;
1328
1329 DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "found 'and' feeding only icmp 0;\n"
; } } while (false)
;
1330 DEBUG(AndI->getParent()->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { AndI->getParent()->dump(); } } while
(false)
;
1331
1332 // Push the 'and' into the same block as the icmp 0. There should only be
1333 // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any
1334 // others, so we don't need to keep track of which BBs we insert into.
1335 for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end();
1336 UI != E; ) {
1337 Use &TheUse = UI.getUse();
1338 Instruction *User = cast<Instruction>(*UI);
1339
1340 // Preincrement use iterator so we don't invalidate it.
1341 ++UI;
1342
1343 DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "sinking 'and' use: " <<
*User << "\n"; } } while (false)
;
1344
1345 // Keep the 'and' in the same place if the use is already in the same block.
1346 Instruction *InsertPt =
1347 User->getParent() == AndI->getParent() ? AndI : User;
1348 Instruction *InsertedAnd =
1349 BinaryOperator::Create(Instruction::And, AndI->getOperand(0),
1350 AndI->getOperand(1), "", InsertPt);
1351 // Propagate the debug info.
1352 InsertedAnd->setDebugLoc(AndI->getDebugLoc());
1353
1354 // Replace a use of the 'and' with a use of the new 'and'.
1355 TheUse = InsertedAnd;
1356 ++NumAndUses;
1357 DEBUG(User->getParent()->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { User->getParent()->dump(); } } while
(false)
;
1358 }
1359
1360 // We removed all uses, nuke the and.
1361 AndI->eraseFromParent();
1362 return true;
1363}
1364
1365/// Check if the candidates could be combined with a shift instruction, which
1366/// includes:
1367/// 1. Truncate instruction
1368/// 2. And instruction and the imm is a mask of the low bits:
1369/// imm & (imm+1) == 0
1370static bool isExtractBitsCandidateUse(Instruction *User) {
1371 if (!isa<TruncInst>(User)) {
1372 if (User->getOpcode() != Instruction::And ||
1373 !isa<ConstantInt>(User->getOperand(1)))
1374 return false;
1375
1376 const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue();
1377
1378 if ((Cimm & (Cimm + 1)).getBoolValue())
1379 return false;
1380 }
1381 return true;
1382}
1383
1384/// Sink both shift and truncate instruction to the use of truncate's BB.
1385static bool
1386SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI,
1387 DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts,
1388 const TargetLowering &TLI, const DataLayout &DL) {
1389 BasicBlock *UserBB = User->getParent();
1390 DenseMap<BasicBlock *, CastInst *> InsertedTruncs;
1391 TruncInst *TruncI = dyn_cast<TruncInst>(User);
1392 bool MadeChange = false;
1393
1394 for (Value::user_iterator TruncUI = TruncI->user_begin(),
1395 TruncE = TruncI->user_end();
1396 TruncUI != TruncE;) {
1397
1398 Use &TruncTheUse = TruncUI.getUse();
1399 Instruction *TruncUser = cast<Instruction>(*TruncUI);
1400 // Preincrement use iterator so we don't invalidate it.
1401
1402 ++TruncUI;
1403
1404 int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode());
1405 if (!ISDOpcode)
1406 continue;
1407
1408 // If the use is actually a legal node, there will not be an
1409 // implicit truncate.
1410 // FIXME: always querying the result type is just an
1411 // approximation; some nodes' legality is determined by the
1412 // operand or other means. There's no good way to find out though.
1413 if (TLI.isOperationLegalOrCustom(
1414 ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true)))
1415 continue;
1416
1417 // Don't bother for PHI nodes.
1418 if (isa<PHINode>(TruncUser))
1419 continue;
1420
1421 BasicBlock *TruncUserBB = TruncUser->getParent();
1422
1423 if (UserBB == TruncUserBB)
1424 continue;
1425
1426 BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB];
1427 CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB];
1428
1429 if (!InsertedShift && !InsertedTrunc) {
1430 BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt();
1431 assert(InsertPt != TruncUserBB->end())((InsertPt != TruncUserBB->end()) ? static_cast<void>
(0) : __assert_fail ("InsertPt != TruncUserBB->end()", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 1431, __PRETTY_FUNCTION__))
;
1432 // Sink the shift
1433 if (ShiftI->getOpcode() == Instruction::AShr)
1434 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI,
1435 "", &*InsertPt);
1436 else
1437 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI,
1438 "", &*InsertPt);
1439
1440 // Sink the trunc
1441 BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt();
1442 TruncInsertPt++;
1443 assert(TruncInsertPt != TruncUserBB->end())((TruncInsertPt != TruncUserBB->end()) ? static_cast<void
> (0) : __assert_fail ("TruncInsertPt != TruncUserBB->end()"
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 1443, __PRETTY_FUNCTION__))
;
1444
1445 InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift,
1446 TruncI->getType(), "", &*TruncInsertPt);
1447
1448 MadeChange = true;
1449
1450 TruncTheUse = InsertedTrunc;
1451 }
1452 }
1453 return MadeChange;
1454}
1455
1456/// Sink the shift *right* instruction into user blocks if the uses could
1457/// potentially be combined with this shift instruction and generate BitExtract
1458/// instruction. It will only be applied if the architecture supports BitExtract
1459/// instruction. Here is an example:
1460/// BB1:
1461/// %x.extract.shift = lshr i64 %arg1, 32
1462/// BB2:
1463/// %x.extract.trunc = trunc i64 %x.extract.shift to i16
1464/// ==>
1465///
1466/// BB2:
1467/// %x.extract.shift.1 = lshr i64 %arg1, 32
1468/// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16
1469///
1470/// CodeGen will recoginze the pattern in BB2 and generate BitExtract
1471/// instruction.
1472/// Return true if any changes are made.
1473static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI,
1474 const TargetLowering &TLI,
1475 const DataLayout &DL) {
1476 BasicBlock *DefBB = ShiftI->getParent();
1477
1478 /// Only insert instructions in each block once.
1479 DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts;
1480
1481 bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType()));
1482
1483 bool MadeChange = false;
1484 for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end();
1485 UI != E;) {
1486 Use &TheUse = UI.getUse();
1487 Instruction *User = cast<Instruction>(*UI);
1488 // Preincrement use iterator so we don't invalidate it.
1489 ++UI;
1490
1491 // Don't bother for PHI nodes.
1492 if (isa<PHINode>(User))
1493 continue;
1494
1495 if (!isExtractBitsCandidateUse(User))
1496 continue;
1497
1498 BasicBlock *UserBB = User->getParent();
1499
1500 if (UserBB == DefBB) {
1501 // If the shift and truncate instruction are in the same BB. The use of
1502 // the truncate(TruncUse) may still introduce another truncate if not
1503 // legal. In this case, we would like to sink both shift and truncate
1504 // instruction to the BB of TruncUse.
1505 // for example:
1506 // BB1:
1507 // i64 shift.result = lshr i64 opnd, imm
1508 // trunc.result = trunc shift.result to i16
1509 //
1510 // BB2:
1511 // ----> We will have an implicit truncate here if the architecture does
1512 // not have i16 compare.
1513 // cmp i16 trunc.result, opnd2
1514 //
1515 if (isa<TruncInst>(User) && shiftIsLegal
1516 // If the type of the truncate is legal, no trucate will be
1517 // introduced in other basic blocks.
1518 &&
1519 (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType()))))
1520 MadeChange =
1521 SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL);
1522
1523 continue;
1524 }
1525 // If we have already inserted a shift into this block, use it.
1526 BinaryOperator *&InsertedShift = InsertedShifts[UserBB];
1527
1528 if (!InsertedShift) {
1529 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1530 assert(InsertPt != UserBB->end())((InsertPt != UserBB->end()) ? static_cast<void> (0)
: __assert_fail ("InsertPt != UserBB->end()", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 1530, __PRETTY_FUNCTION__))
;
1531
1532 if (ShiftI->getOpcode() == Instruction::AShr)
1533 InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI,
1534 "", &*InsertPt);
1535 else
1536 InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI,
1537 "", &*InsertPt);
1538
1539 MadeChange = true;
1540 }
1541
1542 // Replace a use of the shift with a use of the new shift.
1543 TheUse = InsertedShift;
1544 }
1545
1546 // If we removed all uses, nuke the shift.
1547 if (ShiftI->use_empty())
1548 ShiftI->eraseFromParent();
1549
1550 return MadeChange;
1551}
1552
1553/// If counting leading or trailing zeros is an expensive operation and a zero
1554/// input is defined, add a check for zero to avoid calling the intrinsic.
1555///
1556/// We want to transform:
1557/// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false)
1558///
1559/// into:
1560/// entry:
1561/// %cmpz = icmp eq i64 %A, 0
1562/// br i1 %cmpz, label %cond.end, label %cond.false
1563/// cond.false:
1564/// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true)
1565/// br label %cond.end
1566/// cond.end:
1567/// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ]
1568///
1569/// If the transform is performed, return true and set ModifiedDT to true.
1570static bool despeculateCountZeros(IntrinsicInst *CountZeros,
1571 const TargetLowering *TLI,
1572 const DataLayout *DL,
1573 bool &ModifiedDT) {
1574 if (!TLI || !DL)
1575 return false;
1576
1577 // If a zero input is undefined, it doesn't make sense to despeculate that.
1578 if (match(CountZeros->getOperand(1), m_One()))
1579 return false;
1580
1581 // If it's cheap to speculate, there's nothing to do.
1582 auto IntrinsicID = CountZeros->getIntrinsicID();
1583 if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz()) ||
1584 (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz()))
1585 return false;
1586
1587 // Only handle legal scalar cases. Anything else requires too much work.
1588 Type *Ty = CountZeros->getType();
1589 unsigned SizeInBits = Ty->getPrimitiveSizeInBits();
1590 if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits())
1591 return false;
1592
1593 // The intrinsic will be sunk behind a compare against zero and branch.
1594 BasicBlock *StartBlock = CountZeros->getParent();
1595 BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false");
1596
1597 // Create another block after the count zero intrinsic. A PHI will be added
1598 // in this block to select the result of the intrinsic or the bit-width
1599 // constant if the input to the intrinsic is zero.
1600 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros));
1601 BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end");
1602
1603 // Set up a builder to create a compare, conditional branch, and PHI.
1604 IRBuilder<> Builder(CountZeros->getContext());
1605 Builder.SetInsertPoint(StartBlock->getTerminator());
1606 Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc());
1607
1608 // Replace the unconditional branch that was created by the first split with
1609 // a compare against zero and a conditional branch.
1610 Value *Zero = Constant::getNullValue(Ty);
1611 Value *Cmp = Builder.CreateICmpEQ(CountZeros->getOperand(0), Zero, "cmpz");
1612 Builder.CreateCondBr(Cmp, EndBlock, CallBlock);
1613 StartBlock->getTerminator()->eraseFromParent();
1614
1615 // Create a PHI in the end block to select either the output of the intrinsic
1616 // or the bit width of the operand.
1617 Builder.SetInsertPoint(&EndBlock->front());
1618 PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz");
1619 CountZeros->replaceAllUsesWith(PN);
1620 Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits));
1621 PN->addIncoming(BitWidth, StartBlock);
1622 PN->addIncoming(CountZeros, CallBlock);
1623
1624 // We are explicitly handling the zero case, so we can set the intrinsic's
1625 // undefined zero argument to 'true'. This will also prevent reprocessing the
1626 // intrinsic; we only despeculate when a zero input is defined.
1627 CountZeros->setArgOperand(1, Builder.getTrue());
1628 ModifiedDT = true;
1629 return true;
1630}
1631
1632bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool& ModifiedDT) {
1633 BasicBlock *BB = CI->getParent();
1634
1635 // Lower inline assembly if we can.
1636 // If we found an inline asm expession, and if the target knows how to
1637 // lower it to normal LLVM code, do so now.
1638 if (TLI && isa<InlineAsm>(CI->getCalledValue())) {
1639 if (TLI->ExpandInlineAsm(CI)) {
1640 // Avoid invalidating the iterator.
1641 CurInstIterator = BB->begin();
1642 // Avoid processing instructions out of order, which could cause
1643 // reuse before a value is defined.
1644 SunkAddrs.clear();
1645 return true;
1646 }
1647 // Sink address computing for memory operands into the block.
1648 if (optimizeInlineAsmInst(CI))
1649 return true;
1650 }
1651
1652 // Align the pointer arguments to this call if the target thinks it's a good
1653 // idea
1654 unsigned MinSize, PrefAlign;
1655 if (TLI && TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) {
1656 for (auto &Arg : CI->arg_operands()) {
1657 // We want to align both objects whose address is used directly and
1658 // objects whose address is used in casts and GEPs, though it only makes
1659 // sense for GEPs if the offset is a multiple of the desired alignment and
1660 // if size - offset meets the size threshold.
1661 if (!Arg->getType()->isPointerTy())
1662 continue;
1663 APInt Offset(DL->getPointerSizeInBits(
1664 cast<PointerType>(Arg->getType())->getAddressSpace()),
1665 0);
1666 Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset);
1667 uint64_t Offset2 = Offset.getLimitedValue();
1668 if ((Offset2 & (PrefAlign-1)) != 0)
1669 continue;
1670 AllocaInst *AI;
1671 if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlignment() < PrefAlign &&
1672 DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2)
1673 AI->setAlignment(PrefAlign);
1674 // Global variables can only be aligned if they are defined in this
1675 // object (i.e. they are uniquely initialized in this object), and
1676 // over-aligning global variables that have an explicit section is
1677 // forbidden.
1678 GlobalVariable *GV;
1679 if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() &&
1680 GV->getPointerAlignment(*DL) < PrefAlign &&
1681 DL->getTypeAllocSize(GV->getValueType()) >=
1682 MinSize + Offset2)
1683 GV->setAlignment(PrefAlign);
1684 }
1685 // If this is a memcpy (or similar) then we may be able to improve the
1686 // alignment
1687 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) {
1688 unsigned Align = getKnownAlignment(MI->getDest(), *DL);
1689 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
1690 Align = std::min(Align, getKnownAlignment(MTI->getSource(), *DL));
1691 if (Align > MI->getAlignment())
1692 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), Align));
1693 }
1694 }
1695
1696 // If we have a cold call site, try to sink addressing computation into the
1697 // cold block. This interacts with our handling for loads and stores to
1698 // ensure that we can fold all uses of a potential addressing computation
1699 // into their uses. TODO: generalize this to work over profiling data
1700 if (!OptSize && CI->hasFnAttr(Attribute::Cold))
1701 for (auto &Arg : CI->arg_operands()) {
1702 if (!Arg->getType()->isPointerTy())
1703 continue;
1704 unsigned AS = Arg->getType()->getPointerAddressSpace();
1705 return optimizeMemoryInst(CI, Arg, Arg->getType(), AS);
1706 }
1707
1708 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
1709 if (II) {
1710 switch (II->getIntrinsicID()) {
1711 default: break;
1712 case Intrinsic::objectsize: {
1713 // Lower all uses of llvm.objectsize.*
1714 ConstantInt *RetVal =
1715 lowerObjectSizeCall(II, *DL, TLInfo, /*MustSucceed=*/true);
1716 // Substituting this can cause recursive simplifications, which can
1717 // invalidate our iterator. Use a WeakTrackingVH to hold onto it in case
1718 // this
1719 // happens.
1720 Value *CurValue = &*CurInstIterator;
1721 WeakTrackingVH IterHandle(CurValue);
1722
1723 replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr);
1724
1725 // If the iterator instruction was recursively deleted, start over at the
1726 // start of the block.
1727 if (IterHandle != CurValue) {
1728 CurInstIterator = BB->begin();
1729 SunkAddrs.clear();
1730 }
1731 return true;
1732 }
1733 case Intrinsic::aarch64_stlxr:
1734 case Intrinsic::aarch64_stxr: {
1735 ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0));
1736 if (!ExtVal || !ExtVal->hasOneUse() ||
1737 ExtVal->getParent() == CI->getParent())
1738 return false;
1739 // Sink a zext feeding stlxr/stxr before it, so it can be folded into it.
1740 ExtVal->moveBefore(CI);
1741 // Mark this instruction as "inserted by CGP", so that other
1742 // optimizations don't touch it.
1743 InsertedInsts.insert(ExtVal);
1744 return true;
1745 }
1746 case Intrinsic::invariant_group_barrier:
1747 II->replaceAllUsesWith(II->getArgOperand(0));
1748 II->eraseFromParent();
1749 return true;
1750
1751 case Intrinsic::cttz:
1752 case Intrinsic::ctlz:
1753 // If counting zeros is expensive, try to avoid it.
1754 return despeculateCountZeros(II, TLI, DL, ModifiedDT);
1755 }
1756
1757 if (TLI) {
1758 SmallVector<Value*, 2> PtrOps;
1759 Type *AccessTy;
1760 if (TLI->getAddrModeArguments(II, PtrOps, AccessTy))
1761 while (!PtrOps.empty()) {
1762 Value *PtrVal = PtrOps.pop_back_val();
1763 unsigned AS = PtrVal->getType()->getPointerAddressSpace();
1764 if (optimizeMemoryInst(II, PtrVal, AccessTy, AS))
1765 return true;
1766 }
1767 }
1768 }
1769
1770 // From here on out we're working with named functions.
1771 if (!CI->getCalledFunction()) return false;
1772
1773 // Lower all default uses of _chk calls. This is very similar
1774 // to what InstCombineCalls does, but here we are only lowering calls
1775 // to fortified library functions (e.g. __memcpy_chk) that have the default
1776 // "don't know" as the objectsize. Anything else should be left alone.
1777 FortifiedLibCallSimplifier Simplifier(TLInfo, true);
1778 if (Value *V = Simplifier.optimizeCall(CI)) {
1779 CI->replaceAllUsesWith(V);
1780 CI->eraseFromParent();
1781 return true;
1782 }
1783 return false;
1784}
1785
1786/// Look for opportunities to duplicate return instructions to the predecessor
1787/// to enable tail call optimizations. The case it is currently looking for is:
1788/// @code
1789/// bb0:
1790/// %tmp0 = tail call i32 @f0()
1791/// br label %return
1792/// bb1:
1793/// %tmp1 = tail call i32 @f1()
1794/// br label %return
1795/// bb2:
1796/// %tmp2 = tail call i32 @f2()
1797/// br label %return
1798/// return:
1799/// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
1800/// ret i32 %retval
1801/// @endcode
1802///
1803/// =>
1804///
1805/// @code
1806/// bb0:
1807/// %tmp0 = tail call i32 @f0()
1808/// ret i32 %tmp0
1809/// bb1:
1810/// %tmp1 = tail call i32 @f1()
1811/// ret i32 %tmp1
1812/// bb2:
1813/// %tmp2 = tail call i32 @f2()
1814/// ret i32 %tmp2
1815/// @endcode
1816bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB) {
1817 if (!TLI)
1818 return false;
1819
1820 ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator());
1821 if (!RetI)
1822 return false;
1823
1824 PHINode *PN = nullptr;
1825 BitCastInst *BCI = nullptr;
1826 Value *V = RetI->getReturnValue();
1827 if (V) {
1828 BCI = dyn_cast<BitCastInst>(V);
1829 if (BCI)
1830 V = BCI->getOperand(0);
1831
1832 PN = dyn_cast<PHINode>(V);
1833 if (!PN)
1834 return false;
1835 }
1836
1837 if (PN && PN->getParent() != BB)
1838 return false;
1839
1840 // Make sure there are no instructions between the PHI and return, or that the
1841 // return is the first instruction in the block.
1842 if (PN) {
1843 BasicBlock::iterator BI = BB->begin();
1844 do { ++BI; } while (isa<DbgInfoIntrinsic>(BI));
1845 if (&*BI == BCI)
1846 // Also skip over the bitcast.
1847 ++BI;
1848 if (&*BI != RetI)
1849 return false;
1850 } else {
1851 BasicBlock::iterator BI = BB->begin();
1852 while (isa<DbgInfoIntrinsic>(BI)) ++BI;
1853 if (&*BI != RetI)
1854 return false;
1855 }
1856
1857 /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail
1858 /// call.
1859 const Function *F = BB->getParent();
1860 SmallVector<CallInst*, 4> TailCalls;
1861 if (PN) {
1862 for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) {
1863 CallInst *CI = dyn_cast<CallInst>(PN->getIncomingValue(I));
1864 // Make sure the phi value is indeed produced by the tail call.
1865 if (CI && CI->hasOneUse() && CI->getParent() == PN->getIncomingBlock(I) &&
1866 TLI->mayBeEmittedAsTailCall(CI) &&
1867 attributesPermitTailCall(F, CI, RetI, *TLI))
1868 TailCalls.push_back(CI);
1869 }
1870 } else {
1871 SmallPtrSet<BasicBlock*, 4> VisitedBBs;
1872 for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) {
1873 if (!VisitedBBs.insert(*PI).second)
1874 continue;
1875
1876 BasicBlock::InstListType &InstList = (*PI)->getInstList();
1877 BasicBlock::InstListType::reverse_iterator RI = InstList.rbegin();
1878 BasicBlock::InstListType::reverse_iterator RE = InstList.rend();
1879 do { ++RI; } while (RI != RE && isa<DbgInfoIntrinsic>(&*RI));
1880 if (RI == RE)
1881 continue;
1882
1883 CallInst *CI = dyn_cast<CallInst>(&*RI);
1884 if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) &&
1885 attributesPermitTailCall(F, CI, RetI, *TLI))
1886 TailCalls.push_back(CI);
1887 }
1888 }
1889
1890 bool Changed = false;
1891 for (unsigned i = 0, e = TailCalls.size(); i != e; ++i) {
1892 CallInst *CI = TailCalls[i];
1893 CallSite CS(CI);
1894
1895 // Conservatively require the attributes of the call to match those of the
1896 // return. Ignore noalias because it doesn't affect the call sequence.
1897 AttributeList CalleeAttrs = CS.getAttributes();
1898 if (AttrBuilder(CalleeAttrs, AttributeList::ReturnIndex)
1899 .removeAttribute(Attribute::NoAlias) !=
1900 AttrBuilder(CalleeAttrs, AttributeList::ReturnIndex)
1901 .removeAttribute(Attribute::NoAlias))
1902 continue;
1903
1904 // Make sure the call instruction is followed by an unconditional branch to
1905 // the return block.
1906 BasicBlock *CallBB = CI->getParent();
1907 BranchInst *BI = dyn_cast<BranchInst>(CallBB->getTerminator());
1908 if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB)
1909 continue;
1910
1911 // Duplicate the return into CallBB.
1912 (void)FoldReturnIntoUncondBranch(RetI, BB, CallBB);
1913 ModifiedDT = Changed = true;
1914 ++NumRetsDup;
1915 }
1916
1917 // If we eliminated all predecessors of the block, delete the block now.
1918 if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB))
1919 BB->eraseFromParent();
1920
1921 return Changed;
1922}
1923
1924//===----------------------------------------------------------------------===//
1925// Memory Optimization
1926//===----------------------------------------------------------------------===//
1927
1928namespace {
1929
1930/// This is an extended version of TargetLowering::AddrMode
1931/// which holds actual Value*'s for register values.
1932struct ExtAddrMode : public TargetLowering::AddrMode {
1933 Value *BaseReg;
1934 Value *ScaledReg;
1935 ExtAddrMode() : BaseReg(nullptr), ScaledReg(nullptr) {}
1936 void print(raw_ostream &OS) const;
1937 void dump() const;
1938
1939 bool operator==(const ExtAddrMode& O) const {
1940 return (BaseReg == O.BaseReg) && (ScaledReg == O.ScaledReg) &&
1941 (BaseGV == O.BaseGV) && (BaseOffs == O.BaseOffs) &&
1942 (HasBaseReg == O.HasBaseReg) && (Scale == O.Scale);
1943 }
1944};
1945
1946#ifndef NDEBUG
1947static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) {
1948 AM.print(OS);
1949 return OS;
1950}
1951#endif
1952
1953void ExtAddrMode::print(raw_ostream &OS) const {
1954 bool NeedPlus = false;
1955 OS << "[";
1956 if (BaseGV) {
1957 OS << (NeedPlus ? " + " : "")
1958 << "GV:";
1959 BaseGV->printAsOperand(OS, /*PrintType=*/false);
1960 NeedPlus = true;
1961 }
1962
1963 if (BaseOffs) {
1964 OS << (NeedPlus ? " + " : "")
1965 << BaseOffs;
1966 NeedPlus = true;
1967 }
1968
1969 if (BaseReg) {
1970 OS << (NeedPlus ? " + " : "")
1971 << "Base:";
1972 BaseReg->printAsOperand(OS, /*PrintType=*/false);
1973 NeedPlus = true;
1974 }
1975 if (Scale) {
1976 OS << (NeedPlus ? " + " : "")
1977 << Scale << "*";
1978 ScaledReg->printAsOperand(OS, /*PrintType=*/false);
1979 }
1980
1981 OS << ']';
1982}
1983
1984#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1985LLVM_DUMP_METHOD__attribute__((noinline)) __attribute__((__used__)) void ExtAddrMode::dump() const {
1986 print(dbgs());
1987 dbgs() << '\n';
1988}
1989#endif
1990
1991/// \brief This class provides transaction based operation on the IR.
1992/// Every change made through this class is recorded in the internal state and
1993/// can be undone (rollback) until commit is called.
1994class TypePromotionTransaction {
1995
1996 /// \brief This represents the common interface of the individual transaction.
1997 /// Each class implements the logic for doing one specific modification on
1998 /// the IR via the TypePromotionTransaction.
1999 class TypePromotionAction {
2000 protected:
2001 /// The Instruction modified.
2002 Instruction *Inst;
2003
2004 public:
2005 /// \brief Constructor of the action.
2006 /// The constructor performs the related action on the IR.
2007 TypePromotionAction(Instruction *Inst) : Inst(Inst) {}
2008
2009 virtual ~TypePromotionAction() {}
2010
2011 /// \brief Undo the modification done by this action.
2012 /// When this method is called, the IR must be in the same state as it was
2013 /// before this action was applied.
2014 /// \pre Undoing the action works if and only if the IR is in the exact same
2015 /// state as it was directly after this action was applied.
2016 virtual void undo() = 0;
2017
2018 /// \brief Advocate every change made by this action.
2019 /// When the results on the IR of the action are to be kept, it is important
2020 /// to call this function, otherwise hidden information may be kept forever.
2021 virtual void commit() {
2022 // Nothing to be done, this action is not doing anything.
2023 }
2024 };
2025
2026 /// \brief Utility to remember the position of an instruction.
2027 class InsertionHandler {
2028 /// Position of an instruction.
2029 /// Either an instruction:
2030 /// - Is the first in a basic block: BB is used.
2031 /// - Has a previous instructon: PrevInst is used.
2032 union {
2033 Instruction *PrevInst;
2034 BasicBlock *BB;
2035 } Point;
2036 /// Remember whether or not the instruction had a previous instruction.
2037 bool HasPrevInstruction;
2038
2039 public:
2040 /// \brief Record the position of \p Inst.
2041 InsertionHandler(Instruction *Inst) {
2042 BasicBlock::iterator It = Inst->getIterator();
2043 HasPrevInstruction = (It != (Inst->getParent()->begin()));
2044 if (HasPrevInstruction)
2045 Point.PrevInst = &*--It;
2046 else
2047 Point.BB = Inst->getParent();
2048 }
2049
2050 /// \brief Insert \p Inst at the recorded position.
2051 void insert(Instruction *Inst) {
2052 if (HasPrevInstruction) {
2053 if (Inst->getParent())
2054 Inst->removeFromParent();
2055 Inst->insertAfter(Point.PrevInst);
2056 } else {
2057 Instruction *Position = &*Point.BB->getFirstInsertionPt();
2058 if (Inst->getParent())
2059 Inst->moveBefore(Position);
2060 else
2061 Inst->insertBefore(Position);
2062 }
2063 }
2064 };
2065
2066 /// \brief Move an instruction before another.
2067 class InstructionMoveBefore : public TypePromotionAction {
2068 /// Original position of the instruction.
2069 InsertionHandler Position;
2070
2071 public:
2072 /// \brief Move \p Inst before \p Before.
2073 InstructionMoveBefore(Instruction *Inst, Instruction *Before)
2074 : TypePromotionAction(Inst), Position(Inst) {
2075 DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Do: move: " << *
Inst << "\nbefore: " << *Before << "\n"; } }
while (false)
;
2076 Inst->moveBefore(Before);
2077 }
2078
2079 /// \brief Move the instruction back to its original position.
2080 void undo() override {
2081 DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Undo: moveBefore: " <<
*Inst << "\n"; } } while (false)
;
2082 Position.insert(Inst);
2083 }
2084 };
2085
2086 /// \brief Set the operand of an instruction with a new value.
2087 class OperandSetter : public TypePromotionAction {
2088 /// Original operand of the instruction.
2089 Value *Origin;
2090 /// Index of the modified instruction.
2091 unsigned Idx;
2092
2093 public:
2094 /// \brief Set \p Idx operand of \p Inst with \p NewVal.
2095 OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal)
2096 : TypePromotionAction(Inst), Idx(Idx) {
2097 DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Do: setOperand: " <<
Idx << "\n" << "for:" << *Inst << "\n"
<< "with:" << *NewVal << "\n"; } } while (
false)
2098 << "for:" << *Inst << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Do: setOperand: " <<
Idx << "\n" << "for:" << *Inst << "\n"
<< "with:" << *NewVal << "\n"; } } while (
false)
2099 << "with:" << *NewVal << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Do: setOperand: " <<
Idx << "\n" << "for:" << *Inst << "\n"
<< "with:" << *NewVal << "\n"; } } while (
false)
;
2100 Origin = Inst->getOperand(Idx);
2101 Inst->setOperand(Idx, NewVal);
2102 }
2103
2104 /// \brief Restore the original value of the instruction.
2105 void undo() override {
2106 DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Undo: setOperand:" <<
Idx << "\n" << "for: " << *Inst << "\n"
<< "with: " << *Origin << "\n"; } } while (
false)
2107 << "for: " << *Inst << "\n"do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Undo: setOperand:" <<
Idx << "\n" << "for: " << *Inst << "\n"
<< "with: " << *Origin << "\n"; } } while (
false)
2108 << "with: " << *Origin << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Undo: setOperand:" <<
Idx << "\n" << "for: " << *Inst << "\n"
<< "with: " << *Origin << "\n"; } } while (
false)
;
2109 Inst->setOperand(Idx, Origin);
2110 }
2111 };
2112
2113 /// \brief Hide the operands of an instruction.
2114 /// Do as if this instruction was not using any of its operands.
2115 class OperandsHider : public TypePromotionAction {
2116 /// The list of original operands.
2117 SmallVector<Value *, 4> OriginalValues;
2118
2119 public:
2120 /// \brief Remove \p Inst from the uses of the operands of \p Inst.
2121 OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) {
2122 DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Do: OperandsHider: " <<
*Inst << "\n"; } } while (false)
;
2123 unsigned NumOpnds = Inst->getNumOperands();
2124 OriginalValues.reserve(NumOpnds);
2125 for (unsigned It = 0; It < NumOpnds; ++It) {
2126 // Save the current operand.
2127 Value *Val = Inst->getOperand(It);
2128 OriginalValues.push_back(Val);
2129 // Set a dummy one.
2130 // We could use OperandSetter here, but that would imply an overhead
2131 // that we are not willing to pay.
2132 Inst->setOperand(It, UndefValue::get(Val->getType()));
2133 }
2134 }
2135
2136 /// \brief Restore the original list of uses.
2137 void undo() override {
2138 DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Undo: OperandsHider: "
<< *Inst << "\n"; } } while (false)
;
2139 for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It)
2140 Inst->setOperand(It, OriginalValues[It]);
2141 }
2142 };
2143
2144 /// \brief Build a truncate instruction.
2145 class TruncBuilder : public TypePromotionAction {
2146 Value *Val;
2147 public:
2148 /// \brief Build a truncate instruction of \p Opnd producing a \p Ty
2149 /// result.
2150 /// trunc Opnd to Ty.
2151 TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) {
2152 IRBuilder<> Builder(Opnd);
2153 Val = Builder.CreateTrunc(Opnd, Ty, "promoted");
2154 DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Do: TruncBuilder: " <<
*Val << "\n"; } } while (false)
;
2155 }
2156
2157 /// \brief Get the built value.
2158 Value *getBuiltValue() { return Val; }
2159
2160 /// \brief Remove the built instruction.
2161 void undo() override {
2162 DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Undo: TruncBuilder: " <<
*Val << "\n"; } } while (false)
;
2163 if (Instruction *IVal = dyn_cast<Instruction>(Val))
2164 IVal->eraseFromParent();
2165 }
2166 };
2167
2168 /// \brief Build a sign extension instruction.
2169 class SExtBuilder : public TypePromotionAction {
2170 Value *Val;
2171 public:
2172 /// \brief Build a sign extension instruction of \p Opnd producing a \p Ty
2173 /// result.
2174 /// sext Opnd to Ty.
2175 SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
2176 : TypePromotionAction(InsertPt) {
2177 IRBuilder<> Builder(InsertPt);
2178 Val = Builder.CreateSExt(Opnd, Ty, "promoted");
2179 DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Do: SExtBuilder: " <<
*Val << "\n"; } } while (false)
;
2180 }
2181
2182 /// \brief Get the built value.
2183 Value *getBuiltValue() { return Val; }
2184
2185 /// \brief Remove the built instruction.
2186 void undo() override {
2187 DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Undo: SExtBuilder: " <<
*Val << "\n"; } } while (false)
;
2188 if (Instruction *IVal = dyn_cast<Instruction>(Val))
2189 IVal->eraseFromParent();
2190 }
2191 };
2192
2193 /// \brief Build a zero extension instruction.
2194 class ZExtBuilder : public TypePromotionAction {
2195 Value *Val;
2196 public:
2197 /// \brief Build a zero extension instruction of \p Opnd producing a \p Ty
2198 /// result.
2199 /// zext Opnd to Ty.
2200 ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
2201 : TypePromotionAction(InsertPt) {
2202 IRBuilder<> Builder(InsertPt);
2203 Val = Builder.CreateZExt(Opnd, Ty, "promoted");
2204 DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Do: ZExtBuilder: " <<
*Val << "\n"; } } while (false)
;
2205 }
2206
2207 /// \brief Get the built value.
2208 Value *getBuiltValue() { return Val; }
2209
2210 /// \brief Remove the built instruction.
2211 void undo() override {
2212 DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Undo: ZExtBuilder: " <<
*Val << "\n"; } } while (false)
;
2213 if (Instruction *IVal = dyn_cast<Instruction>(Val))
2214 IVal->eraseFromParent();
2215 }
2216 };
2217
2218 /// \brief Mutate an instruction to another type.
2219 class TypeMutator : public TypePromotionAction {
2220 /// Record the original type.
2221 Type *OrigTy;
2222
2223 public:
2224 /// \brief Mutate the type of \p Inst into \p NewTy.
2225 TypeMutator(Instruction *Inst, Type *NewTy)
2226 : TypePromotionAction(Inst), OrigTy(Inst->getType()) {
2227 DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTydo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Do: MutateType: " <<
*Inst << " with " << *NewTy << "\n"; } } while
(false)
2228 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Do: MutateType: " <<
*Inst << " with " << *NewTy << "\n"; } } while
(false)
;
2229 Inst->mutateType(NewTy);
2230 }
2231
2232 /// \brief Mutate the instruction back to its original type.
2233 void undo() override {
2234 DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTydo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Undo: MutateType: " <<
*Inst << " with " << *OrigTy << "\n"; } } while
(false)
2235 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Undo: MutateType: " <<
*Inst << " with " << *OrigTy << "\n"; } } while
(false)
;
2236 Inst->mutateType(OrigTy);
2237 }
2238 };
2239
2240 /// \brief Replace the uses of an instruction by another instruction.
2241 class UsesReplacer : public TypePromotionAction {
2242 /// Helper structure to keep track of the replaced uses.
2243 struct InstructionAndIdx {
2244 /// The instruction using the instruction.
2245 Instruction *Inst;
2246 /// The index where this instruction is used for Inst.
2247 unsigned Idx;
2248 InstructionAndIdx(Instruction *Inst, unsigned Idx)
2249 : Inst(Inst), Idx(Idx) {}
2250 };
2251
2252 /// Keep track of the original uses (pair Instruction, Index).
2253 SmallVector<InstructionAndIdx, 4> OriginalUses;
2254 typedef SmallVectorImpl<InstructionAndIdx>::iterator use_iterator;
2255
2256 public:
2257 /// \brief Replace all the use of \p Inst by \p New.
2258 UsesReplacer(Instruction *Inst, Value *New) : TypePromotionAction(Inst) {
2259 DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *Newdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Do: UsersReplacer: " <<
*Inst << " with " << *New << "\n"; } } while
(false)
2260 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Do: UsersReplacer: " <<
*Inst << " with " << *New << "\n"; } } while
(false)
;
2261 // Record the original uses.
2262 for (Use &U : Inst->uses()) {
2263 Instruction *UserI = cast<Instruction>(U.getUser());
2264 OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo()));
2265 }
2266 // Now, we can replace the uses.
2267 Inst->replaceAllUsesWith(New);
2268 }
2269
2270 /// \brief Reassign the original uses of Inst to Inst.
2271 void undo() override {
2272 DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Undo: UsersReplacer: "
<< *Inst << "\n"; } } while (false)
;
2273 for (use_iterator UseIt = OriginalUses.begin(),
2274 EndIt = OriginalUses.end();
2275 UseIt != EndIt; ++UseIt) {
2276 UseIt->Inst->setOperand(UseIt->Idx, Inst);
2277 }
2278 }
2279 };
2280
2281 /// \brief Remove an instruction from the IR.
2282 class InstructionRemover : public TypePromotionAction {
2283 /// Original position of the instruction.
2284 InsertionHandler Inserter;
2285 /// Helper structure to hide all the link to the instruction. In other
2286 /// words, this helps to do as if the instruction was removed.
2287 OperandsHider Hider;
2288 /// Keep track of the uses replaced, if any.
2289 UsesReplacer *Replacer;
2290 /// Keep track of instructions removed.
2291 SetOfInstrs &RemovedInsts;
2292
2293 public:
2294 /// \brief Remove all reference of \p Inst and optinally replace all its
2295 /// uses with New.
2296 /// \p RemovedInsts Keep track of the instructions removed by this Action.
2297 /// \pre If !Inst->use_empty(), then New != nullptr
2298 InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts,
2299 Value *New = nullptr)
2300 : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst),
2301 Replacer(nullptr), RemovedInsts(RemovedInsts) {
2302 if (New)
2303 Replacer = new UsesReplacer(Inst, New);
2304 DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Do: InstructionRemover: "
<< *Inst << "\n"; } } while (false)
;
2305 RemovedInsts.insert(Inst);
2306 /// The instructions removed here will be freed after completing
2307 /// optimizeBlock() for all blocks as we need to keep track of the
2308 /// removed instructions during promotion.
2309 Inst->removeFromParent();
2310 }
2311
2312 ~InstructionRemover() override { delete Replacer; }
2313
2314 /// \brief Resurrect the instruction and reassign it to the proper uses if
2315 /// new value was provided when build this action.
2316 void undo() override {
2317 DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Undo: InstructionRemover: "
<< *Inst << "\n"; } } while (false)
;
2318 Inserter.insert(Inst);
2319 if (Replacer)
2320 Replacer->undo();
2321 Hider.undo();
2322 RemovedInsts.erase(Inst);
2323 }
2324 };
2325
2326public:
2327 /// Restoration point.
2328 /// The restoration point is a pointer to an action instead of an iterator
2329 /// because the iterator may be invalidated but not the pointer.
2330 typedef const TypePromotionAction *ConstRestorationPt;
2331
2332 TypePromotionTransaction(SetOfInstrs &RemovedInsts)
2333 : RemovedInsts(RemovedInsts) {}
2334
2335 /// Advocate every changes made in that transaction.
2336 void commit();
2337 /// Undo all the changes made after the given point.
2338 void rollback(ConstRestorationPt Point);
2339 /// Get the current restoration point.
2340 ConstRestorationPt getRestorationPoint() const;
2341
2342 /// \name API for IR modification with state keeping to support rollback.
2343 /// @{
2344 /// Same as Instruction::setOperand.
2345 void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal);
2346 /// Same as Instruction::eraseFromParent.
2347 void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr);
2348 /// Same as Value::replaceAllUsesWith.
2349 void replaceAllUsesWith(Instruction *Inst, Value *New);
2350 /// Same as Value::mutateType.
2351 void mutateType(Instruction *Inst, Type *NewTy);
2352 /// Same as IRBuilder::createTrunc.
2353 Value *createTrunc(Instruction *Opnd, Type *Ty);
2354 /// Same as IRBuilder::createSExt.
2355 Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty);
2356 /// Same as IRBuilder::createZExt.
2357 Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty);
2358 /// Same as Instruction::moveBefore.
2359 void moveBefore(Instruction *Inst, Instruction *Before);
2360 /// @}
2361
2362private:
2363 /// The ordered list of actions made so far.
2364 SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions;
2365 typedef SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator CommitPt;
2366 SetOfInstrs &RemovedInsts;
2367};
2368
2369void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx,
2370 Value *NewVal) {
2371 Actions.push_back(
2372 make_unique<TypePromotionTransaction::OperandSetter>(Inst, Idx, NewVal));
2373}
2374
2375void TypePromotionTransaction::eraseInstruction(Instruction *Inst,
2376 Value *NewVal) {
2377 Actions.push_back(
2378 make_unique<TypePromotionTransaction::InstructionRemover>(Inst,
2379 RemovedInsts, NewVal));
2380}
2381
2382void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst,
2383 Value *New) {
2384 Actions.push_back(make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New));
2385}
2386
2387void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) {
2388 Actions.push_back(make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy));
2389}
2390
2391Value *TypePromotionTransaction::createTrunc(Instruction *Opnd,
2392 Type *Ty) {
2393 std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty));
2394 Value *Val = Ptr->getBuiltValue();
2395 Actions.push_back(std::move(Ptr));
2396 return Val;
2397}
2398
2399Value *TypePromotionTransaction::createSExt(Instruction *Inst,
2400 Value *Opnd, Type *Ty) {
2401 std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty));
2402 Value *Val = Ptr->getBuiltValue();
2403 Actions.push_back(std::move(Ptr));
2404 return Val;
2405}
2406
2407Value *TypePromotionTransaction::createZExt(Instruction *Inst,
2408 Value *Opnd, Type *Ty) {
2409 std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty));
2410 Value *Val = Ptr->getBuiltValue();
2411 Actions.push_back(std::move(Ptr));
2412 return Val;
2413}
2414
2415void TypePromotionTransaction::moveBefore(Instruction *Inst,
2416 Instruction *Before) {
2417 Actions.push_back(
2418 make_unique<TypePromotionTransaction::InstructionMoveBefore>(Inst, Before));
2419}
2420
2421TypePromotionTransaction::ConstRestorationPt
2422TypePromotionTransaction::getRestorationPoint() const {
2423 return !Actions.empty() ? Actions.back().get() : nullptr;
2424}
2425
2426void TypePromotionTransaction::commit() {
2427 for (CommitPt It = Actions.begin(), EndIt = Actions.end(); It != EndIt;
2428 ++It)
2429 (*It)->commit();
2430 Actions.clear();
2431}
2432
2433void TypePromotionTransaction::rollback(
2434 TypePromotionTransaction::ConstRestorationPt Point) {
2435 while (!Actions.empty() && Point != Actions.back().get()) {
2436 std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val();
2437 Curr->undo();
2438 }
2439}
2440
2441/// \brief A helper class for matching addressing modes.
2442///
2443/// This encapsulates the logic for matching the target-legal addressing modes.
2444class AddressingModeMatcher {
2445 SmallVectorImpl<Instruction*> &AddrModeInsts;
2446 const TargetLowering &TLI;
2447 const TargetRegisterInfo &TRI;
2448 const DataLayout &DL;
2449
2450 /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and
2451 /// the memory instruction that we're computing this address for.
2452 Type *AccessTy;
2453 unsigned AddrSpace;
2454 Instruction *MemoryInst;
2455
2456 /// This is the addressing mode that we're building up. This is
2457 /// part of the return value of this addressing mode matching stuff.
2458 ExtAddrMode &AddrMode;
2459
2460 /// The instructions inserted by other CodeGenPrepare optimizations.
2461 const SetOfInstrs &InsertedInsts;
2462 /// A map from the instructions to their type before promotion.
2463 InstrToOrigTy &PromotedInsts;
2464 /// The ongoing transaction where every action should be registered.
2465 TypePromotionTransaction &TPT;
2466
2467 /// This is set to true when we should not do profitability checks.
2468 /// When true, IsProfitableToFoldIntoAddressingMode always returns true.
2469 bool IgnoreProfitability;
2470
2471 AddressingModeMatcher(SmallVectorImpl<Instruction *> &AMI,
2472 const TargetLowering &TLI,
2473 const TargetRegisterInfo &TRI,
2474 Type *AT, unsigned AS,
2475 Instruction *MI, ExtAddrMode &AM,
2476 const SetOfInstrs &InsertedInsts,
2477 InstrToOrigTy &PromotedInsts,
2478 TypePromotionTransaction &TPT)
2479 : AddrModeInsts(AMI), TLI(TLI), TRI(TRI),
2480 DL(MI->getModule()->getDataLayout()), AccessTy(AT), AddrSpace(AS),
2481 MemoryInst(MI), AddrMode(AM), InsertedInsts(InsertedInsts),
2482 PromotedInsts(PromotedInsts), TPT(TPT) {
2483 IgnoreProfitability = false;
2484 }
2485public:
2486
2487 /// Find the maximal addressing mode that a load/store of V can fold,
2488 /// give an access type of AccessTy. This returns a list of involved
2489 /// instructions in AddrModeInsts.
2490 /// \p InsertedInsts The instructions inserted by other CodeGenPrepare
2491 /// optimizations.
2492 /// \p PromotedInsts maps the instructions to their type before promotion.
2493 /// \p The ongoing transaction where every action should be registered.
2494 static ExtAddrMode Match(Value *V, Type *AccessTy, unsigned AS,
2495 Instruction *MemoryInst,
2496 SmallVectorImpl<Instruction*> &AddrModeInsts,
2497 const TargetLowering &TLI,
2498 const TargetRegisterInfo &TRI,
2499 const SetOfInstrs &InsertedInsts,
2500 InstrToOrigTy &PromotedInsts,
2501 TypePromotionTransaction &TPT) {
2502 ExtAddrMode Result;
2503
2504 bool Success = AddressingModeMatcher(AddrModeInsts, TLI, TRI,
2505 AccessTy, AS,
2506 MemoryInst, Result, InsertedInsts,
2507 PromotedInsts, TPT).matchAddr(V, 0);
2508 (void)Success; assert(Success && "Couldn't select *anything*?")((Success && "Couldn't select *anything*?") ? static_cast
<void> (0) : __assert_fail ("Success && \"Couldn't select *anything*?\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 2508, __PRETTY_FUNCTION__))
;
2509 return Result;
2510 }
2511private:
2512 bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth);
2513 bool matchAddr(Value *V, unsigned Depth);
2514 bool matchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth,
2515 bool *MovedAway = nullptr);
2516 bool isProfitableToFoldIntoAddressingMode(Instruction *I,
2517 ExtAddrMode &AMBefore,
2518 ExtAddrMode &AMAfter);
2519 bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2);
2520 bool isPromotionProfitable(unsigned NewCost, unsigned OldCost,
2521 Value *PromotedOperand) const;
2522};
2523
2524/// Try adding ScaleReg*Scale to the current addressing mode.
2525/// Return true and update AddrMode if this addr mode is legal for the target,
2526/// false if not.
2527bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale,
2528 unsigned Depth) {
2529 // If Scale is 1, then this is the same as adding ScaleReg to the addressing
2530 // mode. Just process that directly.
2531 if (Scale == 1)
2532 return matchAddr(ScaleReg, Depth);
2533
2534 // If the scale is 0, it takes nothing to add this.
2535 if (Scale == 0)
2536 return true;
2537
2538 // If we already have a scale of this value, we can add to it, otherwise, we
2539 // need an available scale field.
2540 if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg)
2541 return false;
2542
2543 ExtAddrMode TestAddrMode = AddrMode;
2544
2545 // Add scale to turn X*4+X*3 -> X*7. This could also do things like
2546 // [A+B + A*7] -> [B+A*8].
2547 TestAddrMode.Scale += Scale;
2548 TestAddrMode.ScaledReg = ScaleReg;
2549
2550 // If the new address isn't legal, bail out.
2551 if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace))
2552 return false;
2553
2554 // It was legal, so commit it.
2555 AddrMode = TestAddrMode;
2556
2557 // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now
2558 // to see if ScaleReg is actually X+C. If so, we can turn this into adding
2559 // X*Scale + C*Scale to addr mode.
2560 ConstantInt *CI = nullptr; Value *AddLHS = nullptr;
2561 if (isa<Instruction>(ScaleReg) && // not a constant expr.
2562 match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) {
2563 TestAddrMode.ScaledReg = AddLHS;
2564 TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale;
2565
2566 // If this addressing mode is legal, commit it and remember that we folded
2567 // this instruction.
2568 if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) {
2569 AddrModeInsts.push_back(cast<Instruction>(ScaleReg));
2570 AddrMode = TestAddrMode;
2571 return true;
2572 }
2573 }
2574
2575 // Otherwise, not (x+c)*scale, just return what we have.
2576 return true;
2577}
2578
2579/// This is a little filter, which returns true if an addressing computation
2580/// involving I might be folded into a load/store accessing it.
2581/// This doesn't need to be perfect, but needs to accept at least
2582/// the set of instructions that MatchOperationAddr can.
2583static bool MightBeFoldableInst(Instruction *I) {
2584 switch (I->getOpcode()) {
2585 case Instruction::BitCast:
2586 case Instruction::AddrSpaceCast:
2587 // Don't touch identity bitcasts.
2588 if (I->getType() == I->getOperand(0)->getType())
2589 return false;
2590 return I->getType()->isPointerTy() || I->getType()->isIntegerTy();
2591 case Instruction::PtrToInt:
2592 // PtrToInt is always a noop, as we know that the int type is pointer sized.
2593 return true;
2594 case Instruction::IntToPtr:
2595 // We know the input is intptr_t, so this is foldable.
2596 return true;
2597 case Instruction::Add:
2598 return true;
2599 case Instruction::Mul:
2600 case Instruction::Shl:
2601 // Can only handle X*C and X << C.
2602 return isa<ConstantInt>(I->getOperand(1));
2603 case Instruction::GetElementPtr:
2604 return true;
2605 default:
2606 return false;
2607 }
2608}
2609
2610/// \brief Check whether or not \p Val is a legal instruction for \p TLI.
2611/// \note \p Val is assumed to be the product of some type promotion.
2612/// Therefore if \p Val has an undefined state in \p TLI, this is assumed
2613/// to be legal, as the non-promoted value would have had the same state.
2614static bool isPromotedInstructionLegal(const TargetLowering &TLI,
2615 const DataLayout &DL, Value *Val) {
2616 Instruction *PromotedInst = dyn_cast<Instruction>(Val);
2617 if (!PromotedInst)
2618 return false;
2619 int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode());
2620 // If the ISDOpcode is undefined, it was undefined before the promotion.
2621 if (!ISDOpcode)
2622 return true;
2623 // Otherwise, check if the promoted instruction is legal or not.
2624 return TLI.isOperationLegalOrCustom(
2625 ISDOpcode, TLI.getValueType(DL, PromotedInst->getType()));
2626}
2627
2628/// \brief Hepler class to perform type promotion.
2629class TypePromotionHelper {
2630 /// \brief Utility function to check whether or not a sign or zero extension
2631 /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by
2632 /// either using the operands of \p Inst or promoting \p Inst.
2633 /// The type of the extension is defined by \p IsSExt.
2634 /// In other words, check if:
2635 /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType.
2636 /// #1 Promotion applies:
2637 /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...).
2638 /// #2 Operand reuses:
2639 /// ext opnd1 to ConsideredExtType.
2640 /// \p PromotedInsts maps the instructions to their type before promotion.
2641 static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType,
2642 const InstrToOrigTy &PromotedInsts, bool IsSExt);
2643
2644 /// \brief Utility function to determine if \p OpIdx should be promoted when
2645 /// promoting \p Inst.
2646 static bool shouldExtOperand(const Instruction *Inst, int OpIdx) {
2647 return !(isa<SelectInst>(Inst) && OpIdx == 0);
2648 }
2649
2650 /// \brief Utility function to promote the operand of \p Ext when this
2651 /// operand is a promotable trunc or sext or zext.
2652 /// \p PromotedInsts maps the instructions to their type before promotion.
2653 /// \p CreatedInstsCost[out] contains the cost of all instructions
2654 /// created to promote the operand of Ext.
2655 /// Newly added extensions are inserted in \p Exts.
2656 /// Newly added truncates are inserted in \p Truncs.
2657 /// Should never be called directly.
2658 /// \return The promoted value which is used instead of Ext.
2659 static Value *promoteOperandForTruncAndAnyExt(
2660 Instruction *Ext, TypePromotionTransaction &TPT,
2661 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
2662 SmallVectorImpl<Instruction *> *Exts,
2663 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI);
2664
2665 /// \brief Utility function to promote the operand of \p Ext when this
2666 /// operand is promotable and is not a supported trunc or sext.
2667 /// \p PromotedInsts maps the instructions to their type before promotion.
2668 /// \p CreatedInstsCost[out] contains the cost of all the instructions
2669 /// created to promote the operand of Ext.
2670 /// Newly added extensions are inserted in \p Exts.
2671 /// Newly added truncates are inserted in \p Truncs.
2672 /// Should never be called directly.
2673 /// \return The promoted value which is used instead of Ext.
2674 static Value *promoteOperandForOther(Instruction *Ext,
2675 TypePromotionTransaction &TPT,
2676 InstrToOrigTy &PromotedInsts,
2677 unsigned &CreatedInstsCost,
2678 SmallVectorImpl<Instruction *> *Exts,
2679 SmallVectorImpl<Instruction *> *Truncs,
2680 const TargetLowering &TLI, bool IsSExt);
2681
2682 /// \see promoteOperandForOther.
2683 static Value *signExtendOperandForOther(
2684 Instruction *Ext, TypePromotionTransaction &TPT,
2685 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
2686 SmallVectorImpl<Instruction *> *Exts,
2687 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
2688 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
2689 Exts, Truncs, TLI, true);
2690 }
2691
2692 /// \see promoteOperandForOther.
2693 static Value *zeroExtendOperandForOther(
2694 Instruction *Ext, TypePromotionTransaction &TPT,
2695 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
2696 SmallVectorImpl<Instruction *> *Exts,
2697 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
2698 return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
2699 Exts, Truncs, TLI, false);
2700 }
2701
2702public:
2703 /// Type for the utility function that promotes the operand of Ext.
2704 typedef Value *(*Action)(Instruction *Ext, TypePromotionTransaction &TPT,
2705 InstrToOrigTy &PromotedInsts,
2706 unsigned &CreatedInstsCost,
2707 SmallVectorImpl<Instruction *> *Exts,
2708 SmallVectorImpl<Instruction *> *Truncs,
2709 const TargetLowering &TLI);
2710 /// \brief Given a sign/zero extend instruction \p Ext, return the approriate
2711 /// action to promote the operand of \p Ext instead of using Ext.
2712 /// \return NULL if no promotable action is possible with the current
2713 /// sign extension.
2714 /// \p InsertedInsts keeps track of all the instructions inserted by the
2715 /// other CodeGenPrepare optimizations. This information is important
2716 /// because we do not want to promote these instructions as CodeGenPrepare
2717 /// will reinsert them later. Thus creating an infinite loop: create/remove.
2718 /// \p PromotedInsts maps the instructions to their type before promotion.
2719 static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts,
2720 const TargetLowering &TLI,
2721 const InstrToOrigTy &PromotedInsts);
2722};
2723
2724bool TypePromotionHelper::canGetThrough(const Instruction *Inst,
2725 Type *ConsideredExtType,
2726 const InstrToOrigTy &PromotedInsts,
2727 bool IsSExt) {
2728 // The promotion helper does not know how to deal with vector types yet.
2729 // To be able to fix that, we would need to fix the places where we
2730 // statically extend, e.g., constants and such.
2731 if (Inst->getType()->isVectorTy())
2732 return false;
2733
2734 // We can always get through zext.
2735 if (isa<ZExtInst>(Inst))
2736 return true;
2737
2738 // sext(sext) is ok too.
2739 if (IsSExt && isa<SExtInst>(Inst))
2740 return true;
2741
2742 // We can get through binary operator, if it is legal. In other words, the
2743 // binary operator must have a nuw or nsw flag.
2744 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst);
2745 if (BinOp && isa<OverflowingBinaryOperator>(BinOp) &&
2746 ((!IsSExt && BinOp->hasNoUnsignedWrap()) ||
2747 (IsSExt && BinOp->hasNoSignedWrap())))
2748 return true;
2749
2750 // Check if we can do the following simplification.
2751 // ext(trunc(opnd)) --> ext(opnd)
2752 if (!isa<TruncInst>(Inst))
2753 return false;
2754
2755 Value *OpndVal = Inst->getOperand(0);
2756 // Check if we can use this operand in the extension.
2757 // If the type is larger than the result type of the extension, we cannot.
2758 if (!OpndVal->getType()->isIntegerTy() ||
2759 OpndVal->getType()->getIntegerBitWidth() >
2760 ConsideredExtType->getIntegerBitWidth())
2761 return false;
2762
2763 // If the operand of the truncate is not an instruction, we will not have
2764 // any information on the dropped bits.
2765 // (Actually we could for constant but it is not worth the extra logic).
2766 Instruction *Opnd = dyn_cast<Instruction>(OpndVal);
2767 if (!Opnd)
2768 return false;
2769
2770 // Check if the source of the type is narrow enough.
2771 // I.e., check that trunc just drops extended bits of the same kind of
2772 // the extension.
2773 // #1 get the type of the operand and check the kind of the extended bits.
2774 const Type *OpndType;
2775 InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd);
2776 if (It != PromotedInsts.end() && It->second.getInt() == IsSExt)
2777 OpndType = It->second.getPointer();
2778 else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd)))
2779 OpndType = Opnd->getOperand(0)->getType();
2780 else
2781 return false;
2782
2783 // #2 check that the truncate just drops extended bits.
2784 return Inst->getType()->getIntegerBitWidth() >=
2785 OpndType->getIntegerBitWidth();
2786}
2787
2788TypePromotionHelper::Action TypePromotionHelper::getAction(
2789 Instruction *Ext, const SetOfInstrs &InsertedInsts,
2790 const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) {
2791 assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&(((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
"Unexpected instruction type") ? static_cast<void> (0)
: __assert_fail ("(isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && \"Unexpected instruction type\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 2792, __PRETTY_FUNCTION__))
2792 "Unexpected instruction type")(((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
"Unexpected instruction type") ? static_cast<void> (0)
: __assert_fail ("(isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) && \"Unexpected instruction type\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 2792, __PRETTY_FUNCTION__))
;
2793 Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0));
2794 Type *ExtTy = Ext->getType();
2795 bool IsSExt = isa<SExtInst>(Ext);
2796 // If the operand of the extension is not an instruction, we cannot
2797 // get through.
2798 // If it, check we can get through.
2799 if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt))
2800 return nullptr;
2801
2802 // Do not promote if the operand has been added by codegenprepare.
2803 // Otherwise, it means we are undoing an optimization that is likely to be
2804 // redone, thus causing potential infinite loop.
2805 if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd))
2806 return nullptr;
2807
2808 // SExt or Trunc instructions.
2809 // Return the related handler.
2810 if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) ||
2811 isa<ZExtInst>(ExtOpnd))
2812 return promoteOperandForTruncAndAnyExt;
2813
2814 // Regular instruction.
2815 // Abort early if we will have to insert non-free instructions.
2816 if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType()))
2817 return nullptr;
2818 return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther;
2819}
2820
2821Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt(
2822 llvm::Instruction *SExt, TypePromotionTransaction &TPT,
2823 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
2824 SmallVectorImpl<Instruction *> *Exts,
2825 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
2826 // By construction, the operand of SExt is an instruction. Otherwise we cannot
2827 // get through it and this method should not be called.
2828 Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0));
2829 Value *ExtVal = SExt;
2830 bool HasMergedNonFreeExt = false;
2831 if (isa<ZExtInst>(SExtOpnd)) {
2832 // Replace s|zext(zext(opnd))
2833 // => zext(opnd).
2834 HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd);
2835 Value *ZExt =
2836 TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType());
2837 TPT.replaceAllUsesWith(SExt, ZExt);
2838 TPT.eraseInstruction(SExt);
2839 ExtVal = ZExt;
2840 } else {
2841 // Replace z|sext(trunc(opnd)) or sext(sext(opnd))
2842 // => z|sext(opnd).
2843 TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0));
2844 }
2845 CreatedInstsCost = 0;
2846
2847 // Remove dead code.
2848 if (SExtOpnd->use_empty())
2849 TPT.eraseInstruction(SExtOpnd);
2850
2851 // Check if the extension is still needed.
2852 Instruction *ExtInst = dyn_cast<Instruction>(ExtVal);
2853 if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) {
2854 if (ExtInst) {
2855 if (Exts)
2856 Exts->push_back(ExtInst);
2857 CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt;
2858 }
2859 return ExtVal;
2860 }
2861
2862 // At this point we have: ext ty opnd to ty.
2863 // Reassign the uses of ExtInst to the opnd and remove ExtInst.
2864 Value *NextVal = ExtInst->getOperand(0);
2865 TPT.eraseInstruction(ExtInst, NextVal);
2866 return NextVal;
2867}
2868
2869Value *TypePromotionHelper::promoteOperandForOther(
2870 Instruction *Ext, TypePromotionTransaction &TPT,
2871 InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
2872 SmallVectorImpl<Instruction *> *Exts,
2873 SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI,
2874 bool IsSExt) {
2875 // By construction, the operand of Ext is an instruction. Otherwise we cannot
2876 // get through it and this method should not be called.
2877 Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0));
2878 CreatedInstsCost = 0;
2879 if (!ExtOpnd->hasOneUse()) {
2880 // ExtOpnd will be promoted.
2881 // All its uses, but Ext, will need to use a truncated value of the
2882 // promoted version.
2883 // Create the truncate now.
2884 Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType());
2885 if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) {
2886 ITrunc->removeFromParent();
2887 // Insert it just after the definition.
2888 ITrunc->insertAfter(ExtOpnd);
2889 if (Truncs)
2890 Truncs->push_back(ITrunc);
2891 }
2892
2893 TPT.replaceAllUsesWith(ExtOpnd, Trunc);
2894 // Restore the operand of Ext (which has been replaced by the previous call
2895 // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext.
2896 TPT.setOperand(Ext, 0, ExtOpnd);
2897 }
2898
2899 // Get through the Instruction:
2900 // 1. Update its type.
2901 // 2. Replace the uses of Ext by Inst.
2902 // 3. Extend each operand that needs to be extended.
2903
2904 // Remember the original type of the instruction before promotion.
2905 // This is useful to know that the high bits are sign extended bits.
2906 PromotedInsts.insert(std::pair<Instruction *, TypeIsSExt>(
2907 ExtOpnd, TypeIsSExt(ExtOpnd->getType(), IsSExt)));
2908 // Step #1.
2909 TPT.mutateType(ExtOpnd, Ext->getType());
2910 // Step #2.
2911 TPT.replaceAllUsesWith(Ext, ExtOpnd);
2912 // Step #3.
2913 Instruction *ExtForOpnd = Ext;
2914
2915 DEBUG(dbgs() << "Propagate Ext to operands\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Propagate Ext to operands\n"
; } } while (false)
;
2916 for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx;
2917 ++OpIdx) {
2918 DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Operand:\n" << *
(ExtOpnd->getOperand(OpIdx)) << '\n'; } } while (false
)
;
2919 if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() ||
2920 !shouldExtOperand(ExtOpnd, OpIdx)) {
2921 DEBUG(dbgs() << "No need to propagate\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "No need to propagate\n"
; } } while (false)
;
2922 continue;
2923 }
2924 // Check if we can statically extend the operand.
2925 Value *Opnd = ExtOpnd->getOperand(OpIdx);
2926 if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) {
2927 DEBUG(dbgs() << "Statically extend\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Statically extend\n"; }
} while (false)
;
2928 unsigned BitWidth = Ext->getType()->getIntegerBitWidth();
2929 APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth)
2930 : Cst->getValue().zext(BitWidth);
2931 TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal));
2932 continue;
2933 }
2934 // UndefValue are typed, so we have to statically sign extend them.
2935 if (isa<UndefValue>(Opnd)) {
2936 DEBUG(dbgs() << "Statically extend\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Statically extend\n"; }
} while (false)
;
2937 TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType()));
2938 continue;
2939 }
2940
2941 // Otherwise we have to explicity sign extend the operand.
2942 // Check if Ext was reused to extend an operand.
2943 if (!ExtForOpnd) {
2944 // If yes, create a new one.
2945 DEBUG(dbgs() << "More operands to ext\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "More operands to ext\n"
; } } while (false)
;
2946 Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType())
2947 : TPT.createZExt(Ext, Opnd, Ext->getType());
2948 if (!isa<Instruction>(ValForExtOpnd)) {
2949 TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd);
2950 continue;
2951 }
2952 ExtForOpnd = cast<Instruction>(ValForExtOpnd);
2953 }
2954 if (Exts)
2955 Exts->push_back(ExtForOpnd);
2956 TPT.setOperand(ExtForOpnd, 0, Opnd);
2957
2958 // Move the sign extension before the insertion point.
2959 TPT.moveBefore(ExtForOpnd, ExtOpnd);
2960 TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd);
2961 CreatedInstsCost += !TLI.isExtFree(ExtForOpnd);
2962 // If more sext are required, new instructions will have to be created.
2963 ExtForOpnd = nullptr;
2964 }
2965 if (ExtForOpnd == Ext) {
2966 DEBUG(dbgs() << "Extension is useless now\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Extension is useless now\n"
; } } while (false)
;
2967 TPT.eraseInstruction(Ext);
2968 }
2969 return ExtOpnd;
2970}
2971
2972/// Check whether or not promoting an instruction to a wider type is profitable.
2973/// \p NewCost gives the cost of extension instructions created by the
2974/// promotion.
2975/// \p OldCost gives the cost of extension instructions before the promotion
2976/// plus the number of instructions that have been
2977/// matched in the addressing mode the promotion.
2978/// \p PromotedOperand is the value that has been promoted.
2979/// \return True if the promotion is profitable, false otherwise.
2980bool AddressingModeMatcher::isPromotionProfitable(
2981 unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const {
2982 DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "OldCost: " << OldCost
<< "\tNewCost: " << NewCost << '\n'; } } while
(false)
;
2983 // The cost of the new extensions is greater than the cost of the
2984 // old extension plus what we folded.
2985 // This is not profitable.
2986 if (NewCost > OldCost)
2987 return false;
2988 if (NewCost < OldCost)
2989 return true;
2990 // The promotion is neutral but it may help folding the sign extension in
2991 // loads for instance.
2992 // Check that we did not create an illegal instruction.
2993 return isPromotedInstructionLegal(TLI, DL, PromotedOperand);
2994}
2995
2996/// Given an instruction or constant expr, see if we can fold the operation
2997/// into the addressing mode. If so, update the addressing mode and return
2998/// true, otherwise return false without modifying AddrMode.
2999/// If \p MovedAway is not NULL, it contains the information of whether or
3000/// not AddrInst has to be folded into the addressing mode on success.
3001/// If \p MovedAway == true, \p AddrInst will not be part of the addressing
3002/// because it has been moved away.
3003/// Thus AddrInst must not be added in the matched instructions.
3004/// This state can happen when AddrInst is a sext, since it may be moved away.
3005/// Therefore, AddrInst may not be valid when MovedAway is true and it must
3006/// not be referenced anymore.
3007bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode,
3008 unsigned Depth,
3009 bool *MovedAway) {
3010 // Avoid exponential behavior on extremely deep expression trees.
3011 if (Depth >= 5) return false;
3012
3013 // By default, all matched instructions stay in place.
3014 if (MovedAway)
3015 *MovedAway = false;
3016
3017 switch (Opcode) {
3018 case Instruction::PtrToInt:
3019 // PtrToInt is always a noop, as we know that the int type is pointer sized.
3020 return matchAddr(AddrInst->getOperand(0), Depth);
3021 case Instruction::IntToPtr: {
3022 auto AS = AddrInst->getType()->getPointerAddressSpace();
3023 auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
3024 // This inttoptr is a no-op if the integer type is pointer sized.
3025 if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy)
3026 return matchAddr(AddrInst->getOperand(0), Depth);
3027 return false;
3028 }
3029 case Instruction::BitCast:
3030 // BitCast is always a noop, and we can handle it as long as it is
3031 // int->int or pointer->pointer (we don't want int<->fp or something).
3032 if ((AddrInst->getOperand(0)->getType()->isPointerTy() ||
3033 AddrInst->getOperand(0)->getType()->isIntegerTy()) &&
3034 // Don't touch identity bitcasts. These were probably put here by LSR,
3035 // and we don't want to mess around with them. Assume it knows what it
3036 // is doing.
3037 AddrInst->getOperand(0)->getType() != AddrInst->getType())
3038 return matchAddr(AddrInst->getOperand(0), Depth);
3039 return false;
3040 case Instruction::AddrSpaceCast: {
3041 unsigned SrcAS
3042 = AddrInst->getOperand(0)->getType()->getPointerAddressSpace();
3043 unsigned DestAS = AddrInst->getType()->getPointerAddressSpace();
3044 if (TLI.isNoopAddrSpaceCast(SrcAS, DestAS))
3045 return matchAddr(AddrInst->getOperand(0), Depth);
3046 return false;
3047 }
3048 case Instruction::Add: {
3049 // Check to see if we can merge in the RHS then the LHS. If so, we win.
3050 ExtAddrMode BackupAddrMode = AddrMode;
3051 unsigned OldSize = AddrModeInsts.size();
3052 // Start a transaction at this point.
3053 // The LHS may match but not the RHS.
3054 // Therefore, we need a higher level restoration point to undo partially
3055 // matched operation.
3056 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
3057 TPT.getRestorationPoint();
3058
3059 if (matchAddr(AddrInst->getOperand(1), Depth+1) &&
3060 matchAddr(AddrInst->getOperand(0), Depth+1))
3061 return true;
3062
3063 // Restore the old addr mode info.
3064 AddrMode = BackupAddrMode;
3065 AddrModeInsts.resize(OldSize);
3066 TPT.rollback(LastKnownGood);
3067
3068 // Otherwise this was over-aggressive. Try merging in the LHS then the RHS.
3069 if (matchAddr(AddrInst->getOperand(0), Depth+1) &&
3070 matchAddr(AddrInst->getOperand(1), Depth+1))
3071 return true;
3072
3073 // Otherwise we definitely can't merge the ADD in.
3074 AddrMode = BackupAddrMode;
3075 AddrModeInsts.resize(OldSize);
3076 TPT.rollback(LastKnownGood);
3077 break;
3078 }
3079 //case Instruction::Or:
3080 // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
3081 //break;
3082 case Instruction::Mul:
3083 case Instruction::Shl: {
3084 // Can only handle X*C and X << C.
3085 ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1));
3086 if (!RHS)
3087 return false;
3088 int64_t Scale = RHS->getSExtValue();
3089 if (Opcode == Instruction::Shl)
3090 Scale = 1LL << Scale;
3091
3092 return matchScaledValue(AddrInst->getOperand(0), Scale, Depth);
3093 }
3094 case Instruction::GetElementPtr: {
3095 // Scan the GEP. We check it if it contains constant offsets and at most
3096 // one variable offset.
3097 int VariableOperand = -1;
3098 unsigned VariableScale = 0;
3099
3100 int64_t ConstantOffset = 0;
3101 gep_type_iterator GTI = gep_type_begin(AddrInst);
3102 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
3103 if (StructType *STy = GTI.getStructTypeOrNull()) {
3104 const StructLayout *SL = DL.getStructLayout(STy);
3105 unsigned Idx =
3106 cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
3107 ConstantOffset += SL->getElementOffset(Idx);
3108 } else {
3109 uint64_t TypeSize = DL.getTypeAllocSize(GTI.getIndexedType());
3110 if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) {
3111 ConstantOffset += CI->getSExtValue()*TypeSize;
3112 } else if (TypeSize) { // Scales of zero don't do anything.
3113 // We only allow one variable index at the moment.
3114 if (VariableOperand != -1)
3115 return false;
3116
3117 // Remember the variable index.
3118 VariableOperand = i;
3119 VariableScale = TypeSize;
3120 }
3121 }
3122 }
3123
3124 // A common case is for the GEP to only do a constant offset. In this case,
3125 // just add it to the disp field and check validity.
3126 if (VariableOperand == -1) {
3127 AddrMode.BaseOffs += ConstantOffset;
3128 if (ConstantOffset == 0 ||
3129 TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) {
3130 // Check to see if we can fold the base pointer in too.
3131 if (matchAddr(AddrInst->getOperand(0), Depth+1))
3132 return true;
3133 }
3134 AddrMode.BaseOffs -= ConstantOffset;
3135 return false;
3136 }
3137
3138 // Save the valid addressing mode in case we can't match.
3139 ExtAddrMode BackupAddrMode = AddrMode;
3140 unsigned OldSize = AddrModeInsts.size();
3141
3142 // See if the scale and offset amount is valid for this target.
3143 AddrMode.BaseOffs += ConstantOffset;
3144
3145 // Match the base operand of the GEP.
3146 if (!matchAddr(AddrInst->getOperand(0), Depth+1)) {
3147 // If it couldn't be matched, just stuff the value in a register.
3148 if (AddrMode.HasBaseReg) {
3149 AddrMode = BackupAddrMode;
3150 AddrModeInsts.resize(OldSize);
3151 return false;
3152 }
3153 AddrMode.HasBaseReg = true;
3154 AddrMode.BaseReg = AddrInst->getOperand(0);
3155 }
3156
3157 // Match the remaining variable portion of the GEP.
3158 if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale,
3159 Depth)) {
3160 // If it couldn't be matched, try stuffing the base into a register
3161 // instead of matching it, and retrying the match of the scale.
3162 AddrMode = BackupAddrMode;
3163 AddrModeInsts.resize(OldSize);
3164 if (AddrMode.HasBaseReg)
3165 return false;
3166 AddrMode.HasBaseReg = true;
3167 AddrMode.BaseReg = AddrInst->getOperand(0);
3168 AddrMode.BaseOffs += ConstantOffset;
3169 if (!matchScaledValue(AddrInst->getOperand(VariableOperand),
3170 VariableScale, Depth)) {
3171 // If even that didn't work, bail.
3172 AddrMode = BackupAddrMode;
3173 AddrModeInsts.resize(OldSize);
3174 return false;
3175 }
3176 }
3177
3178 return true;
3179 }
3180 case Instruction::SExt:
3181 case Instruction::ZExt: {
3182 Instruction *Ext = dyn_cast<Instruction>(AddrInst);
3183 if (!Ext)
3184 return false;
3185
3186 // Try to move this ext out of the way of the addressing mode.
3187 // Ask for a method for doing so.
3188 TypePromotionHelper::Action TPH =
3189 TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts);
3190 if (!TPH)
3191 return false;
3192
3193 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
3194 TPT.getRestorationPoint();
3195 unsigned CreatedInstsCost = 0;
3196 unsigned ExtCost = !TLI.isExtFree(Ext);
3197 Value *PromotedOperand =
3198 TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI);
3199 // SExt has been moved away.
3200 // Thus either it will be rematched later in the recursive calls or it is
3201 // gone. Anyway, we must not fold it into the addressing mode at this point.
3202 // E.g.,
3203 // op = add opnd, 1
3204 // idx = ext op
3205 // addr = gep base, idx
3206 // is now:
3207 // promotedOpnd = ext opnd <- no match here
3208 // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls)
3209 // addr = gep base, op <- match
3210 if (MovedAway)
3211 *MovedAway = true;
3212
3213 assert(PromotedOperand &&((PromotedOperand && "TypePromotionHelper should have filtered out those cases"
) ? static_cast<void> (0) : __assert_fail ("PromotedOperand && \"TypePromotionHelper should have filtered out those cases\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 3214, __PRETTY_FUNCTION__))
3214 "TypePromotionHelper should have filtered out those cases")((PromotedOperand && "TypePromotionHelper should have filtered out those cases"
) ? static_cast<void> (0) : __assert_fail ("PromotedOperand && \"TypePromotionHelper should have filtered out those cases\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 3214, __PRETTY_FUNCTION__))
;
3215
3216 ExtAddrMode BackupAddrMode = AddrMode;
3217 unsigned OldSize = AddrModeInsts.size();
3218
3219 if (!matchAddr(PromotedOperand, Depth) ||
3220 // The total of the new cost is equal to the cost of the created
3221 // instructions.
3222 // The total of the old cost is equal to the cost of the extension plus
3223 // what we have saved in the addressing mode.
3224 !isPromotionProfitable(CreatedInstsCost,
3225 ExtCost + (AddrModeInsts.size() - OldSize),
3226 PromotedOperand)) {
3227 AddrMode = BackupAddrMode;
3228 AddrModeInsts.resize(OldSize);
3229 DEBUG(dbgs() << "Sign extension does not pay off: rollback\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Sign extension does not pay off: rollback\n"
; } } while (false)
;
3230 TPT.rollback(LastKnownGood);
3231 return false;
3232 }
3233 return true;
3234 }
3235 }
3236 return false;
3237}
3238
3239/// If we can, try to add the value of 'Addr' into the current addressing mode.
3240/// If Addr can't be added to AddrMode this returns false and leaves AddrMode
3241/// unmodified. This assumes that Addr is either a pointer type or intptr_t
3242/// for the target.
3243///
3244bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) {
3245 // Start a transaction at this point that we will rollback if the matching
3246 // fails.
3247 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
3248 TPT.getRestorationPoint();
3249 if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) {
3250 // Fold in immediates if legal for the target.
3251 AddrMode.BaseOffs += CI->getSExtValue();
3252 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
3253 return true;
3254 AddrMode.BaseOffs -= CI->getSExtValue();
3255 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
3256 // If this is a global variable, try to fold it into the addressing mode.
3257 if (!AddrMode.BaseGV) {
3258 AddrMode.BaseGV = GV;
3259 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
3260 return true;
3261 AddrMode.BaseGV = nullptr;
3262 }
3263 } else if (Instruction *I = dyn_cast<Instruction>(Addr)) {
3264 ExtAddrMode BackupAddrMode = AddrMode;
3265 unsigned OldSize = AddrModeInsts.size();
3266
3267 // Check to see if it is possible to fold this operation.
3268 bool MovedAway = false;
3269 if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) {
3270 // This instruction may have been moved away. If so, there is nothing
3271 // to check here.
3272 if (MovedAway)
3273 return true;
3274 // Okay, it's possible to fold this. Check to see if it is actually
3275 // *profitable* to do so. We use a simple cost model to avoid increasing
3276 // register pressure too much.
3277 if (I->hasOneUse() ||
3278 isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) {
3279 AddrModeInsts.push_back(I);
3280 return true;
3281 }
3282
3283 // It isn't profitable to do this, roll back.
3284 //cerr << "NOT FOLDING: " << *I;
3285 AddrMode = BackupAddrMode;
3286 AddrModeInsts.resize(OldSize);
3287 TPT.rollback(LastKnownGood);
3288 }
3289 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
3290 if (matchOperationAddr(CE, CE->getOpcode(), Depth))
3291 return true;
3292 TPT.rollback(LastKnownGood);
3293 } else if (isa<ConstantPointerNull>(Addr)) {
3294 // Null pointer gets folded without affecting the addressing mode.
3295 return true;
3296 }
3297
3298 // Worse case, the target should support [reg] addressing modes. :)
3299 if (!AddrMode.HasBaseReg) {
3300 AddrMode.HasBaseReg = true;
3301 AddrMode.BaseReg = Addr;
3302 // Still check for legality in case the target supports [imm] but not [i+r].
3303 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
3304 return true;
3305 AddrMode.HasBaseReg = false;
3306 AddrMode.BaseReg = nullptr;
3307 }
3308
3309 // If the base register is already taken, see if we can do [r+r].
3310 if (AddrMode.Scale == 0) {
3311 AddrMode.Scale = 1;
3312 AddrMode.ScaledReg = Addr;
3313 if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
3314 return true;
3315 AddrMode.Scale = 0;
3316 AddrMode.ScaledReg = nullptr;
3317 }
3318 // Couldn't match.
3319 TPT.rollback(LastKnownGood);
3320 return false;
3321}
3322
3323/// Check to see if all uses of OpVal by the specified inline asm call are due
3324/// to memory operands. If so, return true, otherwise return false.
3325static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
3326 const TargetLowering &TLI,
3327 const TargetRegisterInfo &TRI) {
3328 const Function *F = CI->getParent()->getParent();
3329 TargetLowering::AsmOperandInfoVector TargetConstraints =
3330 TLI.ParseConstraints(F->getParent()->getDataLayout(), &TRI,
3331 ImmutableCallSite(CI));
3332
3333 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
3334 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
3335
3336 // Compute the constraint code and ConstraintType to use.
3337 TLI.ComputeConstraintToUse(OpInfo, SDValue());
3338
3339 // If this asm operand is our Value*, and if it isn't an indirect memory
3340 // operand, we can't fold it!
3341 if (OpInfo.CallOperandVal == OpVal &&
3342 (OpInfo.ConstraintType != TargetLowering::C_Memory ||
3343 !OpInfo.isIndirect))
3344 return false;
3345 }
3346
3347 return true;
3348}
3349
3350/// Recursively walk all the uses of I until we find a memory use.
3351/// If we find an obviously non-foldable instruction, return true.
3352/// Add the ultimately found memory instructions to MemoryUses.
3353static bool FindAllMemoryUses(
3354 Instruction *I,
3355 SmallVectorImpl<std::pair<Instruction *, unsigned>> &MemoryUses,
3356 SmallPtrSetImpl<Instruction *> &ConsideredInsts,
3357 const TargetLowering &TLI, const TargetRegisterInfo &TRI) {
3358 // If we already considered this instruction, we're done.
3359 if (!ConsideredInsts.insert(I).second)
3360 return false;
3361
3362 // If this is an obviously unfoldable instruction, bail out.
3363 if (!MightBeFoldableInst(I))
3364 return true;
3365
3366 const bool OptSize = I->getFunction()->optForSize();
3367
3368 // Loop over all the uses, recursively processing them.
3369 for (Use &U : I->uses()) {
3370 Instruction *UserI = cast<Instruction>(U.getUser());
3371
3372 if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) {
3373 MemoryUses.push_back(std::make_pair(LI, U.getOperandNo()));
3374 continue;
3375 }
3376
3377 if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
3378 unsigned opNo = U.getOperandNo();
3379 if (opNo != StoreInst::getPointerOperandIndex())
3380 return true; // Storing addr, not into addr.
3381 MemoryUses.push_back(std::make_pair(SI, opNo));
3382 continue;
3383 }
3384
3385 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) {
3386 unsigned opNo = U.getOperandNo();
3387 if (opNo != AtomicRMWInst::getPointerOperandIndex())
3388 return true; // Storing addr, not into addr.
3389 MemoryUses.push_back(std::make_pair(RMW, opNo));
3390 continue;
3391 }
3392
3393 if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) {
3394 unsigned opNo = U.getOperandNo();
3395 if (opNo != AtomicCmpXchgInst::getPointerOperandIndex())
3396 return true; // Storing addr, not into addr.
3397 MemoryUses.push_back(std::make_pair(CmpX, opNo));
3398 continue;
3399 }
3400
3401 if (CallInst *CI = dyn_cast<CallInst>(UserI)) {
3402 // If this is a cold call, we can sink the addressing calculation into
3403 // the cold path. See optimizeCallInst
3404 if (!OptSize && CI->hasFnAttr(Attribute::Cold))
3405 continue;
3406
3407 InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue());
3408 if (!IA) return true;
3409
3410 // If this is a memory operand, we're cool, otherwise bail out.
3411 if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI))
3412 return true;
3413 continue;
3414 }
3415
3416 if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI))
3417 return true;
3418 }
3419
3420 return false;
3421}
3422
3423/// Return true if Val is already known to be live at the use site that we're
3424/// folding it into. If so, there is no cost to include it in the addressing
3425/// mode. KnownLive1 and KnownLive2 are two values that we know are live at the
3426/// instruction already.
3427bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,Value *KnownLive1,
3428 Value *KnownLive2) {
3429 // If Val is either of the known-live values, we know it is live!
3430 if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2)
3431 return true;
3432
3433 // All values other than instructions and arguments (e.g. constants) are live.
3434 if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true;
3435
3436 // If Val is a constant sized alloca in the entry block, it is live, this is
3437 // true because it is just a reference to the stack/frame pointer, which is
3438 // live for the whole function.
3439 if (AllocaInst *AI = dyn_cast<AllocaInst>(Val))
3440 if (AI->isStaticAlloca())
3441 return true;
3442
3443 // Check to see if this value is already used in the memory instruction's
3444 // block. If so, it's already live into the block at the very least, so we
3445 // can reasonably fold it.
3446 return Val->isUsedInBasicBlock(MemoryInst->getParent());
3447}
3448
3449/// It is possible for the addressing mode of the machine to fold the specified
3450/// instruction into a load or store that ultimately uses it.
3451/// However, the specified instruction has multiple uses.
3452/// Given this, it may actually increase register pressure to fold it
3453/// into the load. For example, consider this code:
3454///
3455/// X = ...
3456/// Y = X+1
3457/// use(Y) -> nonload/store
3458/// Z = Y+1
3459/// load Z
3460///
3461/// In this case, Y has multiple uses, and can be folded into the load of Z
3462/// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to
3463/// be live at the use(Y) line. If we don't fold Y into load Z, we use one
3464/// fewer register. Since Y can't be folded into "use(Y)" we don't increase the
3465/// number of computations either.
3466///
3467/// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If
3468/// X was live across 'load Z' for other reasons, we actually *would* want to
3469/// fold the addressing mode in the Z case. This would make Y die earlier.
3470bool AddressingModeMatcher::
3471isProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
3472 ExtAddrMode &AMAfter) {
3473 if (IgnoreProfitability) return true;
3474
3475 // AMBefore is the addressing mode before this instruction was folded into it,
3476 // and AMAfter is the addressing mode after the instruction was folded. Get
3477 // the set of registers referenced by AMAfter and subtract out those
3478 // referenced by AMBefore: this is the set of values which folding in this
3479 // address extends the lifetime of.
3480 //
3481 // Note that there are only two potential values being referenced here,
3482 // BaseReg and ScaleReg (global addresses are always available, as are any
3483 // folded immediates).
3484 Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg;
3485
3486 // If the BaseReg or ScaledReg was referenced by the previous addrmode, their
3487 // lifetime wasn't extended by adding this instruction.
3488 if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
3489 BaseReg = nullptr;
3490 if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
3491 ScaledReg = nullptr;
3492
3493 // If folding this instruction (and it's subexprs) didn't extend any live
3494 // ranges, we're ok with it.
3495 if (!BaseReg && !ScaledReg)
3496 return true;
3497
3498 // If all uses of this instruction can have the address mode sunk into them,
3499 // we can remove the addressing mode and effectively trade one live register
3500 // for another (at worst.) In this context, folding an addressing mode into
3501 // the use is just a particularly nice way of sinking it.
3502 SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses;
3503 SmallPtrSet<Instruction*, 16> ConsideredInsts;
3504 if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI))
3505 return false; // Has a non-memory, non-foldable use!
3506
3507 // Now that we know that all uses of this instruction are part of a chain of
3508 // computation involving only operations that could theoretically be folded
3509 // into a memory use, loop over each of these memory operation uses and see
3510 // if they could *actually* fold the instruction. The assumption is that
3511 // addressing modes are cheap and that duplicating the computation involved
3512 // many times is worthwhile, even on a fastpath. For sinking candidates
3513 // (i.e. cold call sites), this serves as a way to prevent excessive code
3514 // growth since most architectures have some reasonable small and fast way to
3515 // compute an effective address. (i.e LEA on x86)
3516 SmallVector<Instruction*, 32> MatchedAddrModeInsts;
3517 for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) {
3518 Instruction *User = MemoryUses[i].first;
3519 unsigned OpNo = MemoryUses[i].second;
3520
3521 // Get the access type of this use. If the use isn't a pointer, we don't
3522 // know what it accesses.
3523 Value *Address = User->getOperand(OpNo);
3524 PointerType *AddrTy = dyn_cast<PointerType>(Address->getType());
3525 if (!AddrTy)
3526 return false;
3527 Type *AddressAccessTy = AddrTy->getElementType();
3528 unsigned AS = AddrTy->getAddressSpace();
3529
3530 // Do a match against the root of this address, ignoring profitability. This
3531 // will tell us if the addressing mode for the memory operation will
3532 // *actually* cover the shared instruction.
3533 ExtAddrMode Result;
3534 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
3535 TPT.getRestorationPoint();
3536 AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, TRI,
3537 AddressAccessTy, AS,
3538 MemoryInst, Result, InsertedInsts,
3539 PromotedInsts, TPT);
3540 Matcher.IgnoreProfitability = true;
3541 bool Success = Matcher.matchAddr(Address, 0);
3542 (void)Success; assert(Success && "Couldn't select *anything*?")((Success && "Couldn't select *anything*?") ? static_cast
<void> (0) : __assert_fail ("Success && \"Couldn't select *anything*?\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 3542, __PRETTY_FUNCTION__))
;
3543
3544 // The match was to check the profitability, the changes made are not
3545 // part of the original matcher. Therefore, they should be dropped
3546 // otherwise the original matcher will not present the right state.
3547 TPT.rollback(LastKnownGood);
3548
3549 // If the match didn't cover I, then it won't be shared by it.
3550 if (!is_contained(MatchedAddrModeInsts, I))
3551 return false;
3552
3553 MatchedAddrModeInsts.clear();
3554 }
3555
3556 return true;
3557}
3558
3559} // end anonymous namespace
3560
3561/// Return true if the specified values are defined in a
3562/// different basic block than BB.
3563static bool IsNonLocalValue(Value *V, BasicBlock *BB) {
3564 if (Instruction *I = dyn_cast<Instruction>(V))
3565 return I->getParent() != BB;
3566 return false;
3567}
3568
3569/// Sink addressing mode computation immediate before MemoryInst if doing so
3570/// can be done without increasing register pressure. The need for the
3571/// register pressure constraint means this can end up being an all or nothing
3572/// decision for all uses of the same addressing computation.
3573///
3574/// Load and Store Instructions often have addressing modes that can do
3575/// significant amounts of computation. As such, instruction selection will try
3576/// to get the load or store to do as much computation as possible for the
3577/// program. The problem is that isel can only see within a single block. As
3578/// such, we sink as much legal addressing mode work into the block as possible.
3579///
3580/// This method is used to optimize both load/store and inline asms with memory
3581/// operands. It's also used to sink addressing computations feeding into cold
3582/// call sites into their (cold) basic block.
3583///
3584/// The motivation for handling sinking into cold blocks is that doing so can
3585/// both enable other address mode sinking (by satisfying the register pressure
3586/// constraint above), and reduce register pressure globally (by removing the
3587/// addressing mode computation from the fast path entirely.).
3588bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
3589 Type *AccessTy, unsigned AddrSpace) {
3590 Value *Repl = Addr;
3591
3592 // Try to collapse single-value PHI nodes. This is necessary to undo
3593 // unprofitable PRE transformations.
3594 SmallVector<Value*, 8> worklist;
3595 SmallPtrSet<Value*, 16> Visited;
3596 worklist.push_back(Addr);
3597
3598 // Use a worklist to iteratively look through PHI nodes, and ensure that
3599 // the addressing mode obtained from the non-PHI roots of the graph
3600 // are equivalent.
3601 Value *Consensus = nullptr;
3602 unsigned NumUsesConsensus = 0;
3603 bool IsNumUsesConsensusValid = false;
3604 SmallVector<Instruction*, 16> AddrModeInsts;
3605 ExtAddrMode AddrMode;
3606 TypePromotionTransaction TPT(RemovedInsts);
3607 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
3608 TPT.getRestorationPoint();
3609 while (!worklist.empty()) {
3610 Value *V = worklist.back();
3611 worklist.pop_back();
3612
3613 // Break use-def graph loops.
3614 if (!Visited.insert(V).second) {
3615 Consensus = nullptr;
3616 break;
3617 }
3618
3619 // For a PHI node, push all of its incoming values.
3620 if (PHINode *P = dyn_cast<PHINode>(V)) {
3621 for (Value *IncValue : P->incoming_values())
3622 worklist.push_back(IncValue);
3623 continue;
3624 }
3625
3626 // For non-PHIs, determine the addressing mode being computed. Note that
3627 // the result may differ depending on what other uses our candidate
3628 // addressing instructions might have.
3629 SmallVector<Instruction*, 16> NewAddrModeInsts;
3630 ExtAddrMode NewAddrMode = AddressingModeMatcher::Match(
3631 V, AccessTy, AddrSpace, MemoryInst, NewAddrModeInsts, *TLI, *TRI,
3632 InsertedInsts, PromotedInsts, TPT);
3633
3634 // This check is broken into two cases with very similar code to avoid using
3635 // getNumUses() as much as possible. Some values have a lot of uses, so
3636 // calling getNumUses() unconditionally caused a significant compile-time
3637 // regression.
3638 if (!Consensus) {
3639 Consensus = V;
3640 AddrMode = NewAddrMode;
3641 AddrModeInsts = NewAddrModeInsts;
3642 continue;
3643 } else if (NewAddrMode == AddrMode) {
3644 if (!IsNumUsesConsensusValid) {
3645 NumUsesConsensus = Consensus->getNumUses();
3646 IsNumUsesConsensusValid = true;
3647 }
3648
3649 // Ensure that the obtained addressing mode is equivalent to that obtained
3650 // for all other roots of the PHI traversal. Also, when choosing one
3651 // such root as representative, select the one with the most uses in order
3652 // to keep the cost modeling heuristics in AddressingModeMatcher
3653 // applicable.
3654 unsigned NumUses = V->getNumUses();
3655 if (NumUses > NumUsesConsensus) {
3656 Consensus = V;
3657 NumUsesConsensus = NumUses;
3658 AddrModeInsts = NewAddrModeInsts;
3659 }
3660 continue;
3661 }
3662
3663 Consensus = nullptr;
3664 break;
3665 }
3666
3667 // If the addressing mode couldn't be determined, or if multiple different
3668 // ones were determined, bail out now.
3669 if (!Consensus) {
3670 TPT.rollback(LastKnownGood);
3671 return false;
3672 }
3673 TPT.commit();
3674
3675 // If all the instructions matched are already in this BB, don't do anything.
3676 if (none_of(AddrModeInsts, [&](Value *V) {
3677 return IsNonLocalValue(V, MemoryInst->getParent());
3678 })) {
3679 DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "CGP: Found local addrmode: "
<< AddrMode << "\n"; } } while (false)
;
3680 return false;
3681 }
3682
3683 // Insert this computation right after this user. Since our caller is
3684 // scanning from the top of the BB to the bottom, reuse of the expr are
3685 // guaranteed to happen later.
3686 IRBuilder<> Builder(MemoryInst);
3687
3688 // Now that we determined the addressing expression we want to use and know
3689 // that we have to sink it into this block. Check to see if we have already
3690 // done this for some other load/store instr in this block. If so, reuse the
3691 // computation.
3692 Value *&SunkAddr = SunkAddrs[Addr];
3693 if (SunkAddr) {
3694 DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "CGP: Reusing nonlocal addrmode: "
<< AddrMode << " for " << *MemoryInst <<
"\n"; } } while (false)
3695 << *MemoryInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "CGP: Reusing nonlocal addrmode: "
<< AddrMode << " for " << *MemoryInst <<
"\n"; } } while (false)
;
3696 if (SunkAddr->getType() != Addr->getType())
3697 SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
3698 } else if (AddrSinkUsingGEPs ||
3699 (!AddrSinkUsingGEPs.getNumOccurrences() && TM &&
3700 SubtargetInfo->useAA())) {
3701 // By default, we use the GEP-based method when AA is used later. This
3702 // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities.
3703 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "CGP: SINKING nonlocal addrmode: "
<< AddrMode << " for " << *MemoryInst <<
"\n"; } } while (false)
3704 << *MemoryInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "CGP: SINKING nonlocal addrmode: "
<< AddrMode << " for " << *MemoryInst <<
"\n"; } } while (false)
;
3705 Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
3706 Value *ResultPtr = nullptr, *ResultIndex = nullptr;
3707
3708 // First, find the pointer.
3709 if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) {
3710 ResultPtr = AddrMode.BaseReg;
3711 AddrMode.BaseReg = nullptr;
3712 }
3713
3714 if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) {
3715 // We can't add more than one pointer together, nor can we scale a
3716 // pointer (both of which seem meaningless).
3717 if (ResultPtr || AddrMode.Scale != 1)
3718 return false;
3719
3720 ResultPtr = AddrMode.ScaledReg;
3721 AddrMode.Scale = 0;
3722 }
3723
3724 if (AddrMode.BaseGV) {
3725 if (ResultPtr)
3726 return false;
3727
3728 ResultPtr = AddrMode.BaseGV;
3729 }
3730
3731 // If the real base value actually came from an inttoptr, then the matcher
3732 // will look through it and provide only the integer value. In that case,
3733 // use it here.
3734 if (!ResultPtr && AddrMode.BaseReg) {
3735 ResultPtr =
3736 Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), "sunkaddr");
3737 AddrMode.BaseReg = nullptr;
3738 } else if (!ResultPtr && AddrMode.Scale == 1) {
3739 ResultPtr =
3740 Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), "sunkaddr");
3741 AddrMode.Scale = 0;
3742 }
3743
3744 if (!ResultPtr &&
3745 !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) {
3746 SunkAddr = Constant::getNullValue(Addr->getType());
3747 } else if (!ResultPtr) {
3748 return false;
3749 } else {
3750 Type *I8PtrTy =
3751 Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace());
3752 Type *I8Ty = Builder.getInt8Ty();
3753
3754 // Start with the base register. Do this first so that subsequent address
3755 // matching finds it last, which will prevent it from trying to match it
3756 // as the scaled value in case it happens to be a mul. That would be
3757 // problematic if we've sunk a different mul for the scale, because then
3758 // we'd end up sinking both muls.
3759 if (AddrMode.BaseReg) {
3760 Value *V = AddrMode.BaseReg;
3761 if (V->getType() != IntPtrTy)
3762 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
3763
3764 ResultIndex = V;
3765 }
3766
3767 // Add the scale value.
3768 if (AddrMode.Scale) {
3769 Value *V = AddrMode.ScaledReg;
3770 if (V->getType() == IntPtrTy) {
3771 // done.
3772 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
3773 cast<IntegerType>(V->getType())->getBitWidth()) {
3774 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
3775 } else {
3776 // It is only safe to sign extend the BaseReg if we know that the math
3777 // required to create it did not overflow before we extend it. Since
3778 // the original IR value was tossed in favor of a constant back when
3779 // the AddrMode was created we need to bail out gracefully if widths
3780 // do not match instead of extending it.
3781 Instruction *I = dyn_cast_or_null<Instruction>(ResultIndex);
3782 if (I && (ResultIndex != AddrMode.BaseReg))
3783 I->eraseFromParent();
3784 return false;
3785 }
3786
3787 if (AddrMode.Scale != 1)
3788 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
3789 "sunkaddr");
3790 if (ResultIndex)
3791 ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr");
3792 else
3793 ResultIndex = V;
3794 }
3795
3796 // Add in the Base Offset if present.
3797 if (AddrMode.BaseOffs) {
3798 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
3799 if (ResultIndex) {
3800 // We need to add this separately from the scale above to help with
3801 // SDAG consecutive load/store merging.
3802 if (ResultPtr->getType() != I8PtrTy)
3803 ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy);
3804 ResultPtr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr");
3805 }
3806
3807 ResultIndex = V;
3808 }
3809
3810 if (!ResultIndex) {
3811 SunkAddr = ResultPtr;
3812 } else {
3813 if (ResultPtr->getType() != I8PtrTy)
3814 ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy);
3815 SunkAddr = Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr");
3816 }
3817
3818 if (SunkAddr->getType() != Addr->getType())
3819 SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
3820 }
3821 } else {
3822 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "CGP: SINKING nonlocal addrmode: "
<< AddrMode << " for " << *MemoryInst <<
"\n"; } } while (false)
3823 << *MemoryInst << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "CGP: SINKING nonlocal addrmode: "
<< AddrMode << " for " << *MemoryInst <<
"\n"; } } while (false)
;
3824 Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
3825 Value *Result = nullptr;
3826
3827 // Start with the base register. Do this first so that subsequent address
3828 // matching finds it last, which will prevent it from trying to match it
3829 // as the scaled value in case it happens to be a mul. That would be
3830 // problematic if we've sunk a different mul for the scale, because then
3831 // we'd end up sinking both muls.
3832 if (AddrMode.BaseReg) {
3833 Value *V = AddrMode.BaseReg;
3834 if (V->getType()->isPointerTy())
3835 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
3836 if (V->getType() != IntPtrTy)
3837 V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
3838 Result = V;
3839 }
3840
3841 // Add the scale value.
3842 if (AddrMode.Scale) {
3843 Value *V = AddrMode.ScaledReg;
3844 if (V->getType() == IntPtrTy) {
3845 // done.
3846 } else if (V->getType()->isPointerTy()) {
3847 V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
3848 } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
3849 cast<IntegerType>(V->getType())->getBitWidth()) {
3850 V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
3851 } else {
3852 // It is only safe to sign extend the BaseReg if we know that the math
3853 // required to create it did not overflow before we extend it. Since
3854 // the original IR value was tossed in favor of a constant back when
3855 // the AddrMode was created we need to bail out gracefully if widths
3856 // do not match instead of extending it.
3857 Instruction *I = dyn_cast_or_null<Instruction>(Result);
3858 if (I && (Result != AddrMode.BaseReg))
3859 I->eraseFromParent();
3860 return false;
3861 }
3862 if (AddrMode.Scale != 1)
3863 V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
3864 "sunkaddr");
3865 if (Result)
3866 Result = Builder.CreateAdd(Result, V, "sunkaddr");
3867 else
3868 Result = V;
3869 }
3870
3871 // Add in the BaseGV if present.
3872 if (AddrMode.BaseGV) {
3873 Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr");
3874 if (Result)
3875 Result = Builder.CreateAdd(Result, V, "sunkaddr");
3876 else
3877 Result = V;
3878 }
3879
3880 // Add in the Base Offset if present.
3881 if (AddrMode.BaseOffs) {
3882 Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
3883 if (Result)
3884 Result = Builder.CreateAdd(Result, V, "sunkaddr");
3885 else
3886 Result = V;
3887 }
3888
3889 if (!Result)
3890 SunkAddr = Constant::getNullValue(Addr->getType());
3891 else
3892 SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr");
3893 }
3894
3895 MemoryInst->replaceUsesOfWith(Repl, SunkAddr);
3896
3897 // If we have no uses, recursively delete the value and all dead instructions
3898 // using it.
3899 if (Repl->use_empty()) {
3900 // This can cause recursive deletion, which can invalidate our iterator.
3901 // Use a WeakTrackingVH to hold onto it in case this happens.
3902 Value *CurValue = &*CurInstIterator;
3903 WeakTrackingVH IterHandle(CurValue);
3904 BasicBlock *BB = CurInstIterator->getParent();
3905
3906 RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo);
3907
3908 if (IterHandle != CurValue) {
3909 // If the iterator instruction was recursively deleted, start over at the
3910 // start of the block.
3911 CurInstIterator = BB->begin();
3912 SunkAddrs.clear();
3913 }
3914 }
3915 ++NumMemoryInsts;
3916 return true;
3917}
3918
3919/// If there are any memory operands, use OptimizeMemoryInst to sink their
3920/// address computing into the block when possible / profitable.
3921bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) {
3922 bool MadeChange = false;
3923
3924 const TargetRegisterInfo *TRI =
3925 TM->getSubtargetImpl(*CS->getParent()->getParent())->getRegisterInfo();
3926 TargetLowering::AsmOperandInfoVector TargetConstraints =
3927 TLI->ParseConstraints(*DL, TRI, CS);
3928 unsigned ArgNo = 0;
3929 for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
3930 TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
3931
3932 // Compute the constraint code and ConstraintType to use.
3933 TLI->ComputeConstraintToUse(OpInfo, SDValue());
3934
3935 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
3936 OpInfo.isIndirect) {
3937 Value *OpVal = CS->getArgOperand(ArgNo++);
3938 MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u);
3939 } else if (OpInfo.Type == InlineAsm::isInput)
3940 ArgNo++;
3941 }
3942
3943 return MadeChange;
3944}
3945
3946/// \brief Check if all the uses of \p Val are equivalent (or free) zero or
3947/// sign extensions.
3948static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) {
3949 assert(!Val->use_empty() && "Input must have at least one use")((!Val->use_empty() && "Input must have at least one use"
) ? static_cast<void> (0) : __assert_fail ("!Val->use_empty() && \"Input must have at least one use\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 3949, __PRETTY_FUNCTION__))
;
3950 const Instruction *FirstUser = cast<Instruction>(*Val->user_begin());
3951 bool IsSExt = isa<SExtInst>(FirstUser);
3952 Type *ExtTy = FirstUser->getType();
3953 for (const User *U : Val->users()) {
3954 const Instruction *UI = cast<Instruction>(U);
3955 if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI)))
3956 return false;
3957 Type *CurTy = UI->getType();
3958 // Same input and output types: Same instruction after CSE.
3959 if (CurTy == ExtTy)
3960 continue;
3961
3962 // If IsSExt is true, we are in this situation:
3963 // a = Val
3964 // b = sext ty1 a to ty2
3965 // c = sext ty1 a to ty3
3966 // Assuming ty2 is shorter than ty3, this could be turned into:
3967 // a = Val
3968 // b = sext ty1 a to ty2
3969 // c = sext ty2 b to ty3
3970 // However, the last sext is not free.
3971 if (IsSExt)
3972 return false;
3973
3974 // This is a ZExt, maybe this is free to extend from one type to another.
3975 // In that case, we would not account for a different use.
3976 Type *NarrowTy;
3977 Type *LargeTy;
3978 if (ExtTy->getScalarType()->getIntegerBitWidth() >
3979 CurTy->getScalarType()->getIntegerBitWidth()) {
3980 NarrowTy = CurTy;
3981 LargeTy = ExtTy;
3982 } else {
3983 NarrowTy = ExtTy;
3984 LargeTy = CurTy;
3985 }
3986
3987 if (!TLI.isZExtFree(NarrowTy, LargeTy))
3988 return false;
3989 }
3990 // All uses are the same or can be derived from one another for free.
3991 return true;
3992}
3993
3994/// \brief Try to speculatively promote extensions in \p Exts and continue
3995/// promoting through newly promoted operands recursively as far as doing so is
3996/// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts.
3997/// When some promotion happened, \p TPT contains the proper state to revert
3998/// them.
3999///
4000/// \return true if some promotion happened, false otherwise.
4001bool CodeGenPrepare::tryToPromoteExts(
4002 TypePromotionTransaction &TPT, const SmallVectorImpl<Instruction *> &Exts,
4003 SmallVectorImpl<Instruction *> &ProfitablyMovedExts,
4004 unsigned CreatedInstsCost) {
4005 bool Promoted = false;
4006
4007 // Iterate over all the extensions to try to promote them.
4008 for (auto I : Exts) {
4009 // Early check if we directly have ext(load).
4010 if (isa<LoadInst>(I->getOperand(0))) {
4011 ProfitablyMovedExts.push_back(I);
4012 continue;
4013 }
4014
4015 // Check whether or not we want to do any promotion. The reason we have
4016 // this check inside the for loop is to catch the case where an extension
4017 // is directly fed by a load because in such case the extension can be moved
4018 // up without any promotion on its operands.
4019 if (!TLI || !TLI->enableExtLdPromotion() || DisableExtLdPromotion)
4020 return false;
4021
4022 // Get the action to perform the promotion.
4023 TypePromotionHelper::Action TPH =
4024 TypePromotionHelper::getAction(I, InsertedInsts, *TLI, PromotedInsts);
4025 // Check if we can promote.
4026 if (!TPH) {
4027 // Save the current extension as we cannot move up through its operand.
4028 ProfitablyMovedExts.push_back(I);
4029 continue;
4030 }
4031
4032 // Save the current state.
4033 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4034 TPT.getRestorationPoint();
4035 SmallVector<Instruction *, 4> NewExts;
4036 unsigned NewCreatedInstsCost = 0;
4037 unsigned ExtCost = !TLI->isExtFree(I);
4038 // Promote.
4039 Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost,
4040 &NewExts, nullptr, *TLI);
4041 assert(PromotedVal &&((PromotedVal && "TypePromotionHelper should have filtered out those cases"
) ? static_cast<void> (0) : __assert_fail ("PromotedVal && \"TypePromotionHelper should have filtered out those cases\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 4042, __PRETTY_FUNCTION__))
4042 "TypePromotionHelper should have filtered out those cases")((PromotedVal && "TypePromotionHelper should have filtered out those cases"
) ? static_cast<void> (0) : __assert_fail ("PromotedVal && \"TypePromotionHelper should have filtered out those cases\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 4042, __PRETTY_FUNCTION__))
;
4043
4044 // We would be able to merge only one extension in a load.
4045 // Therefore, if we have more than 1 new extension we heuristically
4046 // cut this search path, because it means we degrade the code quality.
4047 // With exactly 2, the transformation is neutral, because we will merge
4048 // one extension but leave one. However, we optimistically keep going,
4049 // because the new extension may be removed too.
4050 long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost;
4051 // FIXME: It would be possible to propagate a negative value instead of
4052 // conservatively ceiling it to 0.
4053 TotalCreatedInstsCost =
4054 std::max((long long)0, (TotalCreatedInstsCost - ExtCost));
4055 if (!StressExtLdPromotion &&
4056 (TotalCreatedInstsCost > 1 ||
4057 !isPromotedInstructionLegal(*TLI, *DL, PromotedVal))) {
4058 // This promotion is not profitable, rollback to the previous state, and
4059 // save the current extension in ProfitablyMovedExts as the latest
4060 // speculative promotion turned out to be unprofitable.
4061 TPT.rollback(LastKnownGood);
4062 ProfitablyMovedExts.push_back(I);
4063 continue;
4064 }
4065 // Continue promoting NewExts as far as doing so is profitable.
4066 SmallVector<Instruction *, 2> NewlyMovedExts;
4067 (void)tryToPromoteExts(TPT, NewExts, NewlyMovedExts, TotalCreatedInstsCost);
4068 bool NewPromoted = false;
4069 for (auto ExtInst : NewlyMovedExts) {
4070 Instruction *MovedExt = cast<Instruction>(ExtInst);
4071 Value *ExtOperand = MovedExt->getOperand(0);
4072 // If we have reached to a load, we need this extra profitability check
4073 // as it could potentially be merged into an ext(load).
4074 if (isa<LoadInst>(ExtOperand) &&
4075 !(StressExtLdPromotion || NewCreatedInstsCost <= ExtCost ||
4076 (ExtOperand->hasOneUse() || hasSameExtUse(ExtOperand, *TLI))))
4077 continue;
4078
4079 ProfitablyMovedExts.push_back(MovedExt);
4080 NewPromoted = true;
4081 }
4082
4083 // If none of speculative promotions for NewExts is profitable, rollback
4084 // and save the current extension (I) as the last profitable extension.
4085 if (!NewPromoted) {
4086 TPT.rollback(LastKnownGood);
4087 ProfitablyMovedExts.push_back(I);
4088 continue;
4089 }
4090 // The promotion is profitable.
4091 Promoted = true;
4092 }
4093 return Promoted;
4094}
4095
4096/// Merging redundant sexts when one is dominating the other.
4097bool CodeGenPrepare::mergeSExts(Function &F) {
4098 DominatorTree DT(F);
4099 bool Changed = false;
4100 for (auto &Entry : ValToSExtendedUses) {
4101 SExts &Insts = Entry.second;
4102 SExts CurPts;
4103 for (Instruction *Inst : Insts) {
4104 if (RemovedInsts.count(Inst) || !isa<SExtInst>(Inst) ||
4105 Inst->getOperand(0) != Entry.first)
4106 continue;
4107 bool inserted = false;
4108 for (auto &Pt : CurPts) {
4109 if (DT.dominates(Inst, Pt)) {
4110 Pt->replaceAllUsesWith(Inst);
4111 RemovedInsts.insert(Pt);
4112 Pt->removeFromParent();
4113 Pt = Inst;
4114 inserted = true;
4115 Changed = true;
4116 break;
4117 }
4118 if (!DT.dominates(Pt, Inst))
4119 // Give up if we need to merge in a common dominator as the
4120 // expermients show it is not profitable.
4121 continue;
4122 Inst->replaceAllUsesWith(Pt);
4123 RemovedInsts.insert(Inst);
4124 Inst->removeFromParent();
4125 inserted = true;
4126 Changed = true;
4127 break;
4128 }
4129 if (!inserted)
4130 CurPts.push_back(Inst);
4131 }
4132 }
4133 return Changed;
4134}
4135
4136/// Return true, if an ext(load) can be formed from an extension in
4137/// \p MovedExts.
4138bool CodeGenPrepare::canFormExtLd(
4139 const SmallVectorImpl<Instruction *> &MovedExts, LoadInst *&LI,
4140 Instruction *&Inst, bool HasPromoted) {
4141 for (auto *MovedExtInst : MovedExts) {
4142 if (isa<LoadInst>(MovedExtInst->getOperand(0))) {
4143 LI = cast<LoadInst>(MovedExtInst->getOperand(0));
4144 Inst = MovedExtInst;
4145 break;
4146 }
4147 }
4148 if (!LI)
4149 return false;
4150
4151 // If they're already in the same block, there's nothing to do.
4152 // Make the cheap checks first if we did not promote.
4153 // If we promoted, we need to check if it is indeed profitable.
4154 if (!HasPromoted && LI->getParent() == Inst->getParent())
4155 return false;
4156
4157 EVT VT = TLI->getValueType(*DL, Inst->getType());
4158 EVT LoadVT = TLI->getValueType(*DL, LI->getType());
4159
4160 // If the load has other users and the truncate is not free, this probably
4161 // isn't worthwhile.
4162 if (!LI->hasOneUse() && (TLI->isTypeLegal(LoadVT) || !TLI->isTypeLegal(VT)) &&
4163 !TLI->isTruncateFree(Inst->getType(), LI->getType()))
4164 return false;
4165
4166 // Check whether the target supports casts folded into loads.
4167 unsigned LType;
4168 if (isa<ZExtInst>(Inst))
4169 LType = ISD::ZEXTLOAD;
4170 else {
4171 assert(isa<SExtInst>(Inst) && "Unexpected ext type!")((isa<SExtInst>(Inst) && "Unexpected ext type!"
) ? static_cast<void> (0) : __assert_fail ("isa<SExtInst>(Inst) && \"Unexpected ext type!\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 4171, __PRETTY_FUNCTION__))
;
4172 LType = ISD::SEXTLOAD;
4173 }
4174
4175 return TLI->isLoadExtLegal(LType, VT, LoadVT);
4176}
4177
4178/// Move a zext or sext fed by a load into the same basic block as the load,
4179/// unless conditions are unfavorable. This allows SelectionDAG to fold the
4180/// extend into the load.
4181///
4182/// E.g.,
4183/// \code
4184/// %ld = load i32* %addr
4185/// %add = add nuw i32 %ld, 4
4186/// %zext = zext i32 %add to i64
4187// \endcode
4188/// =>
4189/// \code
4190/// %ld = load i32* %addr
4191/// %zext = zext i32 %ld to i64
4192/// %add = add nuw i64 %zext, 4
4193/// \encode
4194/// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which
4195/// allow us to match zext(load i32*) to i64.
4196///
4197/// Also, try to promote the computations used to obtain a sign extended
4198/// value used into memory accesses.
4199/// E.g.,
4200/// \code
4201/// a = add nsw i32 b, 3
4202/// d = sext i32 a to i64
4203/// e = getelementptr ..., i64 d
4204/// \endcode
4205/// =>
4206/// \code
4207/// f = sext i32 b to i64
4208/// a = add nsw i64 f, 3
4209/// e = getelementptr ..., i64 a
4210/// \endcode
4211///
4212/// \p Inst[in/out] the extension may be modified during the process if some
4213/// promotions apply.
4214bool CodeGenPrepare::optimizeExt(Instruction *&Inst) {
4215 // ExtLoad formation and address type promotion infrastructure requires TLI to
4216 // be effective.
4217 if (!TLI)
4218 return false;
4219
4220 bool AllowPromotionWithoutCommonHeader = false;
4221 /// See if it is an interesting sext operations for the address type
4222 /// promotion before trying to promote it, e.g., the ones with the right
4223 /// type and used in memory accesses.
4224 bool ATPConsiderable = TTI->shouldConsiderAddressTypePromotion(
4225 *Inst, AllowPromotionWithoutCommonHeader);
4226 TypePromotionTransaction TPT(RemovedInsts);
4227 TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4228 TPT.getRestorationPoint();
4229 SmallVector<Instruction *, 1> Exts;
4230 SmallVector<Instruction *, 2> SpeculativelyMovedExts;
4231 Exts.push_back(Inst);
4232
4233 bool HasPromoted = tryToPromoteExts(TPT, Exts, SpeculativelyMovedExts);
4234
4235 // Look for a load being extended.
4236 LoadInst *LI = nullptr;
4237 Instruction *ExtFedByLoad;
4238
4239 // Try to promote a chain of computation if it allows to form an extended
4240 // load.
4241 if (canFormExtLd(SpeculativelyMovedExts, LI, ExtFedByLoad, HasPromoted)) {
4242 assert(LI && ExtFedByLoad && "Expect a valid load and extension")((LI && ExtFedByLoad && "Expect a valid load and extension"
) ? static_cast<void> (0) : __assert_fail ("LI && ExtFedByLoad && \"Expect a valid load and extension\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 4242, __PRETTY_FUNCTION__))
;
4243 TPT.commit();
4244 // Move the extend into the same block as the load
4245 ExtFedByLoad->removeFromParent();
4246 ExtFedByLoad->insertAfter(LI);
4247 // CGP does not check if the zext would be speculatively executed when moved
4248 // to the same basic block as the load. Preserving its original location
4249 // would pessimize the debugging experience, as well as negatively impact
4250 // the quality of sample pgo. We don't want to use "line 0" as that has a
4251 // size cost in the line-table section and logically the zext can be seen as
4252 // part of the load. Therefore we conservatively reuse the same debug
4253 // location for the load and the zext.
4254 ExtFedByLoad->setDebugLoc(LI->getDebugLoc());
4255 ++NumExtsMoved;
4256 Inst = ExtFedByLoad;
4257 return true;
4258 }
4259
4260 // Continue promoting SExts if known as considerable depending on targets.
4261 if (ATPConsiderable &&
4262 performAddressTypePromotion(Inst, AllowPromotionWithoutCommonHeader,
4263 HasPromoted, TPT, SpeculativelyMovedExts))
4264 return true;
4265
4266 TPT.rollback(LastKnownGood);
4267 return false;
4268}
4269
4270// Perform address type promotion if doing so is profitable.
4271// If AllowPromotionWithoutCommonHeader == false, we should find other sext
4272// instructions that sign extended the same initial value. However, if
4273// AllowPromotionWithoutCommonHeader == true, we expect promoting the
4274// extension is just profitable.
4275bool CodeGenPrepare::performAddressTypePromotion(
4276 Instruction *&Inst, bool AllowPromotionWithoutCommonHeader,
4277 bool HasPromoted, TypePromotionTransaction &TPT,
4278 SmallVectorImpl<Instruction *> &SpeculativelyMovedExts) {
4279 bool Promoted = false;
4280 SmallPtrSet<Instruction *, 1> UnhandledExts;
4281 bool AllSeenFirst = true;
4282 for (auto I : SpeculativelyMovedExts) {
4283 Value *HeadOfChain = I->getOperand(0);
4284 DenseMap<Value *, Instruction *>::iterator AlreadySeen =
4285 SeenChainsForSExt.find(HeadOfChain);
4286 // If there is an unhandled SExt which has the same header, try to promote
4287 // it as well.
4288 if (AlreadySeen != SeenChainsForSExt.end()) {
4289 if (AlreadySeen->second != nullptr)
4290 UnhandledExts.insert(AlreadySeen->second);
4291 AllSeenFirst = false;
4292 }
4293 }
4294
4295 if (!AllSeenFirst || (AllowPromotionWithoutCommonHeader &&
4296 SpeculativelyMovedExts.size() == 1)) {
4297 TPT.commit();
4298 if (HasPromoted)
4299 Promoted = true;
4300 for (auto I : SpeculativelyMovedExts) {
4301 Value *HeadOfChain = I->getOperand(0);
4302 SeenChainsForSExt[HeadOfChain] = nullptr;
4303 ValToSExtendedUses[HeadOfChain].push_back(I);
4304 }
4305 // Update Inst as promotion happen.
4306 Inst = SpeculativelyMovedExts.pop_back_val();
4307 } else {
4308 // This is the first chain visited from the header, keep the current chain
4309 // as unhandled. Defer to promote this until we encounter another SExt
4310 // chain derived from the same header.
4311 for (auto I : SpeculativelyMovedExts) {
4312 Value *HeadOfChain = I->getOperand(0);
4313 SeenChainsForSExt[HeadOfChain] = Inst;
4314 }
4315 return false;
4316 }
4317
4318 if (!AllSeenFirst && !UnhandledExts.empty())
4319 for (auto VisitedSExt : UnhandledExts) {
4320 if (RemovedInsts.count(VisitedSExt))
4321 continue;
4322 TypePromotionTransaction TPT(RemovedInsts);
4323 SmallVector<Instruction *, 1> Exts;
4324 SmallVector<Instruction *, 2> Chains;
4325 Exts.push_back(VisitedSExt);
4326 bool HasPromoted = tryToPromoteExts(TPT, Exts, Chains);
4327 TPT.commit();
4328 if (HasPromoted)
4329 Promoted = true;
4330 for (auto I : Chains) {
4331 Value *HeadOfChain = I->getOperand(0);
4332 // Mark this as handled.
4333 SeenChainsForSExt[HeadOfChain] = nullptr;
4334 ValToSExtendedUses[HeadOfChain].push_back(I);
4335 }
4336 }
4337 return Promoted;
4338}
4339
4340bool CodeGenPrepare::optimizeExtUses(Instruction *I) {
4341 BasicBlock *DefBB = I->getParent();
4342
4343 // If the result of a {s|z}ext and its source are both live out, rewrite all
4344 // other uses of the source with result of extension.
4345 Value *Src = I->getOperand(0);
4346 if (Src->hasOneUse())
4347 return false;
4348
4349 // Only do this xform if truncating is free.
4350 if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType()))
4351 return false;
4352
4353 // Only safe to perform the optimization if the source is also defined in
4354 // this block.
4355 if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent())
4356 return false;
4357
4358 bool DefIsLiveOut = false;
4359 for (User *U : I->users()) {
4360 Instruction *UI = cast<Instruction>(U);
4361
4362 // Figure out which BB this ext is used in.
4363 BasicBlock *UserBB = UI->getParent();
4364 if (UserBB == DefBB) continue;
4365 DefIsLiveOut = true;
4366 break;
4367 }
4368 if (!DefIsLiveOut)
4369 return false;
4370
4371 // Make sure none of the uses are PHI nodes.
4372 for (User *U : Src->users()) {
4373 Instruction *UI = cast<Instruction>(U);
4374 BasicBlock *UserBB = UI->getParent();
4375 if (UserBB == DefBB) continue;
4376 // Be conservative. We don't want this xform to end up introducing
4377 // reloads just before load / store instructions.
4378 if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI))
4379 return false;
4380 }
4381
4382 // InsertedTruncs - Only insert one trunc in each block once.
4383 DenseMap<BasicBlock*, Instruction*> InsertedTruncs;
4384
4385 bool MadeChange = false;
4386 for (Use &U : Src->uses()) {
4387 Instruction *User = cast<Instruction>(U.getUser());
4388
4389 // Figure out which BB this ext is used in.
4390 BasicBlock *UserBB = User->getParent();
4391 if (UserBB == DefBB) continue;
4392
4393 // Both src and def are live in this block. Rewrite the use.
4394 Instruction *&InsertedTrunc = InsertedTruncs[UserBB];
4395
4396 if (!InsertedTrunc) {
4397 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
4398 assert(InsertPt != UserBB->end())((InsertPt != UserBB->end()) ? static_cast<void> (0)
: __assert_fail ("InsertPt != UserBB->end()", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 4398, __PRETTY_FUNCTION__))
;
4399 InsertedTrunc = new TruncInst(I, Src->getType(), "", &*InsertPt);
4400 InsertedInsts.insert(InsertedTrunc);
4401 }
4402
4403 // Replace a use of the {s|z}ext source with a use of the result.
4404 U = InsertedTrunc;
4405 ++NumExtUses;
4406 MadeChange = true;
4407 }
4408
4409 return MadeChange;
4410}
4411
4412// Find loads whose uses only use some of the loaded value's bits. Add an "and"
4413// just after the load if the target can fold this into one extload instruction,
4414// with the hope of eliminating some of the other later "and" instructions using
4415// the loaded value. "and"s that are made trivially redundant by the insertion
4416// of the new "and" are removed by this function, while others (e.g. those whose
4417// path from the load goes through a phi) are left for isel to potentially
4418// remove.
4419//
4420// For example:
4421//
4422// b0:
4423// x = load i32
4424// ...
4425// b1:
4426// y = and x, 0xff
4427// z = use y
4428//
4429// becomes:
4430//
4431// b0:
4432// x = load i32
4433// x' = and x, 0xff
4434// ...
4435// b1:
4436// z = use x'
4437//
4438// whereas:
4439//
4440// b0:
4441// x1 = load i32
4442// ...
4443// b1:
4444// x2 = load i32
4445// ...
4446// b2:
4447// x = phi x1, x2
4448// y = and x, 0xff
4449//
4450// becomes (after a call to optimizeLoadExt for each load):
4451//
4452// b0:
4453// x1 = load i32
4454// x1' = and x1, 0xff
4455// ...
4456// b1:
4457// x2 = load i32
4458// x2' = and x2, 0xff
4459// ...
4460// b2:
4461// x = phi x1', x2'
4462// y = and x, 0xff
4463//
4464
4465bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) {
4466
4467 if (!Load->isSimple() ||
4468 !(Load->getType()->isIntegerTy() || Load->getType()->isPointerTy()))
4469 return false;
4470
4471 // Skip loads we've already transformed.
4472 if (Load->hasOneUse() &&
4473 InsertedInsts.count(cast<Instruction>(*Load->user_begin())))
4474 return false;
4475
4476 // Look at all uses of Load, looking through phis, to determine how many bits
4477 // of the loaded value are needed.
4478 SmallVector<Instruction *, 8> WorkList;
4479 SmallPtrSet<Instruction *, 16> Visited;
4480 SmallVector<Instruction *, 8> AndsToMaybeRemove;
4481 for (auto *U : Load->users())
4482 WorkList.push_back(cast<Instruction>(U));
4483
4484 EVT LoadResultVT = TLI->getValueType(*DL, Load->getType());
4485 unsigned BitWidth = LoadResultVT.getSizeInBits();
4486 APInt DemandBits(BitWidth, 0);
4487 APInt WidestAndBits(BitWidth, 0);
4488
4489 while (!WorkList.empty()) {
4490 Instruction *I = WorkList.back();
4491 WorkList.pop_back();
4492
4493 // Break use-def graph loops.
4494 if (!Visited.insert(I).second)
4495 continue;
4496
4497 // For a PHI node, push all of its users.
4498 if (auto *Phi = dyn_cast<PHINode>(I)) {
4499 for (auto *U : Phi->users())
4500 WorkList.push_back(cast<Instruction>(U));
4501 continue;
4502 }
4503
4504 switch (I->getOpcode()) {
4505 case llvm::Instruction::And: {
4506 auto *AndC = dyn_cast<ConstantInt>(I->getOperand(1));
4507 if (!AndC)
4508 return false;
4509 APInt AndBits = AndC->getValue();
4510 DemandBits |= AndBits;
4511 // Keep track of the widest and mask we see.
4512 if (AndBits.ugt(WidestAndBits))
4513 WidestAndBits = AndBits;
4514 if (AndBits == WidestAndBits && I->getOperand(0) == Load)
4515 AndsToMaybeRemove.push_back(I);
4516 break;
4517 }
4518
4519 case llvm::Instruction::Shl: {
4520 auto *ShlC = dyn_cast<ConstantInt>(I->getOperand(1));
4521 if (!ShlC)
4522 return false;
4523 uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1);
4524 DemandBits.setLowBits(BitWidth - ShiftAmt);
4525 break;
4526 }
4527
4528 case llvm::Instruction::Trunc: {
4529 EVT TruncVT = TLI->getValueType(*DL, I->getType());
4530 unsigned TruncBitWidth = TruncVT.getSizeInBits();
4531 DemandBits.setLowBits(TruncBitWidth);
4532 break;
4533 }
4534
4535 default:
4536 return false;
4537 }
4538 }
4539
4540 uint32_t ActiveBits = DemandBits.getActiveBits();
4541 // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the
4542 // target even if isLoadExtLegal says an i1 EXTLOAD is valid. For example,
4543 // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but
4544 // (and (load x) 1) is not matched as a single instruction, rather as a LDR
4545 // followed by an AND.
4546 // TODO: Look into removing this restriction by fixing backends to either
4547 // return false for isLoadExtLegal for i1 or have them select this pattern to
4548 // a single instruction.
4549 //
4550 // Also avoid hoisting if we didn't see any ands with the exact DemandBits
4551 // mask, since these are the only ands that will be removed by isel.
4552 if (ActiveBits <= 1 || !DemandBits.isMask(ActiveBits) ||
4553 WidestAndBits != DemandBits)
4554 return false;
4555
4556 LLVMContext &Ctx = Load->getType()->getContext();
4557 Type *TruncTy = Type::getIntNTy(Ctx, ActiveBits);
4558 EVT TruncVT = TLI->getValueType(*DL, TruncTy);
4559
4560 // Reject cases that won't be matched as extloads.
4561 if (!LoadResultVT.bitsGT(TruncVT) || !TruncVT.isRound() ||
4562 !TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT))
4563 return false;
4564
4565 IRBuilder<> Builder(Load->getNextNode());
4566 auto *NewAnd = dyn_cast<Instruction>(
4567 Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits)));
4568 // Mark this instruction as "inserted by CGP", so that other
4569 // optimizations don't touch it.
4570 InsertedInsts.insert(NewAnd);
4571
4572 // Replace all uses of load with new and (except for the use of load in the
4573 // new and itself).
4574 Load->replaceAllUsesWith(NewAnd);
4575 NewAnd->setOperand(0, Load);
4576
4577 // Remove any and instructions that are now redundant.
4578 for (auto *And : AndsToMaybeRemove)
4579 // Check that the and mask is the same as the one we decided to put on the
4580 // new and.
4581 if (cast<ConstantInt>(And->getOperand(1))->getValue() == DemandBits) {
4582 And->replaceAllUsesWith(NewAnd);
4583 if (&*CurInstIterator == And)
4584 CurInstIterator = std::next(And->getIterator());
4585 And->eraseFromParent();
4586 ++NumAndUses;
4587 }
4588
4589 ++NumAndsAdded;
4590 return true;
4591}
4592
4593/// Check if V (an operand of a select instruction) is an expensive instruction
4594/// that is only used once.
4595static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V) {
4596 auto *I = dyn_cast<Instruction>(V);
4597 // If it's safe to speculatively execute, then it should not have side
4598 // effects; therefore, it's safe to sink and possibly *not* execute.
4599 return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) &&
4600 TTI->getUserCost(I) >= TargetTransformInfo::TCC_Expensive;
4601}
4602
4603/// Returns true if a SelectInst should be turned into an explicit branch.
4604static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI,
4605 const TargetLowering *TLI,
4606 SelectInst *SI) {
4607 // If even a predictable select is cheap, then a branch can't be cheaper.
4608 if (!TLI->isPredictableSelectExpensive())
4609 return false;
4610
4611 // FIXME: This should use the same heuristics as IfConversion to determine
4612 // whether a select is better represented as a branch.
4613
4614 // If metadata tells us that the select condition is obviously predictable,
4615 // then we want to replace the select with a branch.
4616 uint64_t TrueWeight, FalseWeight;
4617 if (SI->extractProfMetadata(TrueWeight, FalseWeight)) {
4618 uint64_t Max = std::max(TrueWeight, FalseWeight);
4619 uint64_t Sum = TrueWeight + FalseWeight;
4620 if (Sum != 0) {
4621 auto Probability = BranchProbability::getBranchProbability(Max, Sum);
4622 if (Probability > TLI->getPredictableBranchThreshold())
4623 return true;
4624 }
4625 }
4626
4627 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
4628
4629 // If a branch is predictable, an out-of-order CPU can avoid blocking on its
4630 // comparison condition. If the compare has more than one use, there's
4631 // probably another cmov or setcc around, so it's not worth emitting a branch.
4632 if (!Cmp || !Cmp->hasOneUse())
4633 return false;
4634
4635 // If either operand of the select is expensive and only needed on one side
4636 // of the select, we should form a branch.
4637 if (sinkSelectOperand(TTI, SI->getTrueValue()) ||
4638 sinkSelectOperand(TTI, SI->getFalseValue()))
4639 return true;
4640
4641 return false;
4642}
4643
4644/// If \p isTrue is true, return the true value of \p SI, otherwise return
4645/// false value of \p SI. If the true/false value of \p SI is defined by any
4646/// select instructions in \p Selects, look through the defining select
4647/// instruction until the true/false value is not defined in \p Selects.
4648static Value *getTrueOrFalseValue(
4649 SelectInst *SI, bool isTrue,
4650 const SmallPtrSet<const Instruction *, 2> &Selects) {
4651 Value *V;
15
'V' declared without an initial value
4652
4653 for (SelectInst *DefSI = SI; DefSI != nullptr && Selects.count(DefSI);
16
Loop condition is false. Execution continues on line 4659
4654 DefSI = dyn_cast<SelectInst>(V)) {
4655 assert(DefSI->getCondition() == SI->getCondition() &&((DefSI->getCondition() == SI->getCondition() &&
"The condition of DefSI does not match with SI") ? static_cast
<void> (0) : __assert_fail ("DefSI->getCondition() == SI->getCondition() && \"The condition of DefSI does not match with SI\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 4656, __PRETTY_FUNCTION__))
4656 "The condition of DefSI does not match with SI")((DefSI->getCondition() == SI->getCondition() &&
"The condition of DefSI does not match with SI") ? static_cast
<void> (0) : __assert_fail ("DefSI->getCondition() == SI->getCondition() && \"The condition of DefSI does not match with SI\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 4656, __PRETTY_FUNCTION__))
;
4657 V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue());
4658 }
4659 return V;
17
Undefined or garbage value returned to caller
4660}
4661
4662/// If we have a SelectInst that will likely profit from branch prediction,
4663/// turn it into a branch.
4664bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) {
4665 // Find all consecutive select instructions that share the same condition.
4666 SmallVector<SelectInst *, 2> ASI;
4667 ASI.push_back(SI);
4668 for (BasicBlock::iterator It = ++BasicBlock::iterator(SI);
1
Loop condition is false. Execution continues on line 4678
4669 It != SI->getParent()->end(); ++It) {
4670 SelectInst *I = dyn_cast<SelectInst>(&*It);
4671 if (I && SI->getCondition() == I->getCondition()) {
4672 ASI.push_back(I);
4673 } else {
4674 break;
4675 }
4676 }
4677
4678 SelectInst *LastSI = ASI.back();
4679 // Increment the current iterator to skip all the rest of select instructions
4680 // because they will be either "not lowered" or "all lowered" to branch.
4681 CurInstIterator = std::next(LastSI->getIterator());
4682
4683 bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1);
2
Assuming the condition is false
4684
4685 // Can we convert the 'select' to CF ?
4686 if (DisableSelectToBranch || OptSize || !TLI || VectorCond ||
3
Assuming the condition is false
4
Assuming the condition is false
5
Assuming the condition is false
6
Taking false branch
4687 SI->getMetadata(LLVMContext::MD_unpredictable))
4688 return false;
4689
4690 TargetLowering::SelectSupportKind SelectKind;
4691 if (VectorCond)
7
Taking false branch
4692 SelectKind = TargetLowering::VectorMaskSelect;
4693 else if (SI->getType()->isVectorTy())
8
Taking false branch
4694 SelectKind = TargetLowering::ScalarCondVectorVal;
4695 else
4696 SelectKind = TargetLowering::ScalarValSelect;
4697
4698 if (TLI->isSelectSupported(SelectKind) &&
9
Assuming the condition is false
4699 !isFormingBranchFromSelectProfitable(TTI, TLI, SI))
4700 return false;
4701
4702 ModifiedDT = true;
4703
4704 // Transform a sequence like this:
4705 // start:
4706 // %cmp = cmp uge i32 %a, %b
4707 // %sel = select i1 %cmp, i32 %c, i32 %d
4708 //
4709 // Into:
4710 // start:
4711 // %cmp = cmp uge i32 %a, %b
4712 // br i1 %cmp, label %select.true, label %select.false
4713 // select.true:
4714 // br label %select.end
4715 // select.false:
4716 // br label %select.end
4717 // select.end:
4718 // %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ]
4719 //
4720 // In addition, we may sink instructions that produce %c or %d from
4721 // the entry block into the destination(s) of the new branch.
4722 // If the true or false blocks do not contain a sunken instruction, that
4723 // block and its branch may be optimized away. In that case, one side of the
4724 // first branch will point directly to select.end, and the corresponding PHI
4725 // predecessor block will be the start block.
4726
4727 // First, we split the block containing the select into 2 blocks.
4728 BasicBlock *StartBlock = SI->getParent();
4729 BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(LastSI));
4730 BasicBlock *EndBlock = StartBlock->splitBasicBlock(SplitPt, "select.end");
4731
4732 // Delete the unconditional branch that was just created by the split.
4733 StartBlock->getTerminator()->eraseFromParent();
4734
4735 // These are the new basic blocks for the conditional branch.
4736 // At least one will become an actual new basic block.
4737 BasicBlock *TrueBlock = nullptr;
4738 BasicBlock *FalseBlock = nullptr;
4739 BranchInst *TrueBranch = nullptr;
4740 BranchInst *FalseBranch = nullptr;
4741
4742 // Sink expensive instructions into the conditional blocks to avoid executing
4743 // them speculatively.
4744 for (SelectInst *SI : ASI) {
10
Assuming '__begin' is equal to '__end'
4745 if (sinkSelectOperand(TTI, SI->getTrueValue())) {
4746 if (TrueBlock == nullptr) {
4747 TrueBlock = BasicBlock::Create(SI->getContext(), "select.true.sink",
4748 EndBlock->getParent(), EndBlock);
4749 TrueBranch = BranchInst::Create(EndBlock, TrueBlock);
4750 }
4751 auto *TrueInst = cast<Instruction>(SI->getTrueValue());
4752 TrueInst->moveBefore(TrueBranch);
4753 }
4754 if (sinkSelectOperand(TTI, SI->getFalseValue())) {
4755 if (FalseBlock == nullptr) {
4756 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false.sink",
4757 EndBlock->getParent(), EndBlock);
4758 FalseBranch = BranchInst::Create(EndBlock, FalseBlock);
4759 }
4760 auto *FalseInst = cast<Instruction>(SI->getFalseValue());
4761 FalseInst->moveBefore(FalseBranch);
4762 }
4763 }
4764
4765 // If there was nothing to sink, then arbitrarily choose the 'false' side
4766 // for a new input value to the PHI.
4767 if (TrueBlock == FalseBlock) {
11
Taking true branch
4768 assert(TrueBlock == nullptr &&((TrueBlock == nullptr && "Unexpected basic block transform while optimizing select"
) ? static_cast<void> (0) : __assert_fail ("TrueBlock == nullptr && \"Unexpected basic block transform while optimizing select\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 4769, __PRETTY_FUNCTION__))
4769 "Unexpected basic block transform while optimizing select")((TrueBlock == nullptr && "Unexpected basic block transform while optimizing select"
) ? static_cast<void> (0) : __assert_fail ("TrueBlock == nullptr && \"Unexpected basic block transform while optimizing select\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 4769, __PRETTY_FUNCTION__))
;
4770
4771 FalseBlock = BasicBlock::Create(SI->getContext(), "select.false",
4772 EndBlock->getParent(), EndBlock);
4773 BranchInst::Create(EndBlock, FalseBlock);
4774 }
4775
4776 // Insert the real conditional branch based on the original condition.
4777 // If we did not create a new block for one of the 'true' or 'false' paths
4778 // of the condition, it means that side of the branch goes to the end block
4779 // directly and the path originates from the start block from the point of
4780 // view of the new PHI.
4781 BasicBlock *TT, *FT;
4782 if (TrueBlock == nullptr) {
12
Taking true branch
4783 TT = EndBlock;
4784 FT = FalseBlock;
4785 TrueBlock = StartBlock;
4786 } else if (FalseBlock == nullptr) {
4787 TT = TrueBlock;
4788 FT = EndBlock;
4789 FalseBlock = StartBlock;
4790 } else {
4791 TT = TrueBlock;
4792 FT = FalseBlock;
4793 }
4794 IRBuilder<>(SI).CreateCondBr(SI->getCondition(), TT, FT, SI);
4795
4796 SmallPtrSet<const Instruction *, 2> INS;
4797 INS.insert(ASI.begin(), ASI.end());
4798 // Use reverse iterator because later select may use the value of the
4799 // earlier select, and we need to propagate value through earlier select
4800 // to get the PHI operand.
4801 for (auto It = ASI.rbegin(); It != ASI.rend(); ++It) {
13
Loop condition is true. Entering loop body
4802 SelectInst *SI = *It;
4803 // The select itself is replaced with a PHI Node.
4804 PHINode *PN = PHINode::Create(SI->getType(), 2, "", &EndBlock->front());
4805 PN->takeName(SI);
4806 PN->addIncoming(getTrueOrFalseValue(SI, true, INS), TrueBlock);
14
Calling 'getTrueOrFalseValue'
4807 PN->addIncoming(getTrueOrFalseValue(SI, false, INS), FalseBlock);
4808
4809 SI->replaceAllUsesWith(PN);
4810 SI->eraseFromParent();
4811 INS.erase(SI);
4812 ++NumSelectsExpanded;
4813 }
4814
4815 // Instruct OptimizeBlock to skip to the next block.
4816 CurInstIterator = StartBlock->end();
4817 return true;
4818}
4819
4820static bool isBroadcastShuffle(ShuffleVectorInst *SVI) {
4821 SmallVector<int, 16> Mask(SVI->getShuffleMask());
4822 int SplatElem = -1;
4823 for (unsigned i = 0; i < Mask.size(); ++i) {
4824 if (SplatElem != -1 && Mask[i] != -1 && Mask[i] != SplatElem)
4825 return false;
4826 SplatElem = Mask[i];
4827 }
4828
4829 return true;
4830}
4831
4832/// Some targets have expensive vector shifts if the lanes aren't all the same
4833/// (e.g. x86 only introduced "vpsllvd" and friends with AVX2). In these cases
4834/// it's often worth sinking a shufflevector splat down to its use so that
4835/// codegen can spot all lanes are identical.
4836bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) {
4837 BasicBlock *DefBB = SVI->getParent();
4838
4839 // Only do this xform if variable vector shifts are particularly expensive.
4840 if (!TLI || !TLI->isVectorShiftByScalarCheap(SVI->getType()))
4841 return false;
4842
4843 // We only expect better codegen by sinking a shuffle if we can recognise a
4844 // constant splat.
4845 if (!isBroadcastShuffle(SVI))
4846 return false;
4847
4848 // InsertedShuffles - Only insert a shuffle in each block once.
4849 DenseMap<BasicBlock*, Instruction*> InsertedShuffles;
4850
4851 bool MadeChange = false;
4852 for (User *U : SVI->users()) {
4853 Instruction *UI = cast<Instruction>(U);
4854
4855 // Figure out which BB this ext is used in.
4856 BasicBlock *UserBB = UI->getParent();
4857 if (UserBB == DefBB) continue;
4858
4859 // For now only apply this when the splat is used by a shift instruction.
4860 if (!UI->isShift()) continue;
4861
4862 // Everything checks out, sink the shuffle if the user's block doesn't
4863 // already have a copy.
4864 Instruction *&InsertedShuffle = InsertedShuffles[UserBB];
4865
4866 if (!InsertedShuffle) {
4867 BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
4868 assert(InsertPt != UserBB->end())((InsertPt != UserBB->end()) ? static_cast<void> (0)
: __assert_fail ("InsertPt != UserBB->end()", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 4868, __PRETTY_FUNCTION__))
;
4869 InsertedShuffle =
4870 new ShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1),
4871 SVI->getOperand(2), "", &*InsertPt);
4872 }
4873
4874 UI->replaceUsesOfWith(SVI, InsertedShuffle);
4875 MadeChange = true;
4876 }
4877
4878 // If we removed all uses, nuke the shuffle.
4879 if (SVI->use_empty()) {
4880 SVI->eraseFromParent();
4881 MadeChange = true;
4882 }
4883
4884 return MadeChange;
4885}
4886
4887bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) {
4888 if (!TLI || !DL)
4889 return false;
4890
4891 Value *Cond = SI->getCondition();
4892 Type *OldType = Cond->getType();
4893 LLVMContext &Context = Cond->getContext();
4894 MVT RegType = TLI->getRegisterType(Context, TLI->getValueType(*DL, OldType));
4895 unsigned RegWidth = RegType.getSizeInBits();
4896
4897 if (RegWidth <= cast<IntegerType>(OldType)->getBitWidth())
4898 return false;
4899
4900 // If the register width is greater than the type width, expand the condition
4901 // of the switch instruction and each case constant to the width of the
4902 // register. By widening the type of the switch condition, subsequent
4903 // comparisons (for case comparisons) will not need to be extended to the
4904 // preferred register width, so we will potentially eliminate N-1 extends,
4905 // where N is the number of cases in the switch.
4906 auto *NewType = Type::getIntNTy(Context, RegWidth);
4907
4908 // Zero-extend the switch condition and case constants unless the switch
4909 // condition is a function argument that is already being sign-extended.
4910 // In that case, we can avoid an unnecessary mask/extension by sign-extending
4911 // everything instead.
4912 Instruction::CastOps ExtType = Instruction::ZExt;
4913 if (auto *Arg = dyn_cast<Argument>(Cond))
4914 if (Arg->hasSExtAttr())
4915 ExtType = Instruction::SExt;
4916
4917 auto *ExtInst = CastInst::Create(ExtType, Cond, NewType);
4918 ExtInst->insertBefore(SI);
4919 SI->setCondition(ExtInst);
4920 for (auto Case : SI->cases()) {
4921 APInt NarrowConst = Case.getCaseValue()->getValue();
4922 APInt WideConst = (ExtType == Instruction::ZExt) ?
4923 NarrowConst.zext(RegWidth) : NarrowConst.sext(RegWidth);
4924 Case.setValue(ConstantInt::get(Context, WideConst));
4925 }
4926
4927 return true;
4928}
4929
4930namespace {
4931/// \brief Helper class to promote a scalar operation to a vector one.
4932/// This class is used to move downward extractelement transition.
4933/// E.g.,
4934/// a = vector_op <2 x i32>
4935/// b = extractelement <2 x i32> a, i32 0
4936/// c = scalar_op b
4937/// store c
4938///
4939/// =>
4940/// a = vector_op <2 x i32>
4941/// c = vector_op a (equivalent to scalar_op on the related lane)
4942/// * d = extractelement <2 x i32> c, i32 0
4943/// * store d
4944/// Assuming both extractelement and store can be combine, we get rid of the
4945/// transition.
4946class VectorPromoteHelper {
4947 /// DataLayout associated with the current module.
4948 const DataLayout &DL;
4949
4950 /// Used to perform some checks on the legality of vector operations.
4951 const TargetLowering &TLI;
4952
4953 /// Used to estimated the cost of the promoted chain.
4954 const TargetTransformInfo &TTI;
4955
4956 /// The transition being moved downwards.
4957 Instruction *Transition;
4958 /// The sequence of instructions to be promoted.
4959 SmallVector<Instruction *, 4> InstsToBePromoted;
4960 /// Cost of combining a store and an extract.
4961 unsigned StoreExtractCombineCost;
4962 /// Instruction that will be combined with the transition.
4963 Instruction *CombineInst;
4964
4965 /// \brief The instruction that represents the current end of the transition.
4966 /// Since we are faking the promotion until we reach the end of the chain
4967 /// of computation, we need a way to get the current end of the transition.
4968 Instruction *getEndOfTransition() const {
4969 if (InstsToBePromoted.empty())
4970 return Transition;
4971 return InstsToBePromoted.back();
4972 }
4973
4974 /// \brief Return the index of the original value in the transition.
4975 /// E.g., for "extractelement <2 x i32> c, i32 1" the original value,
4976 /// c, is at index 0.
4977 unsigned getTransitionOriginalValueIdx() const {
4978 assert(isa<ExtractElementInst>(Transition) &&((isa<ExtractElementInst>(Transition) && "Other kind of transitions are not supported yet"
) ? static_cast<void> (0) : __assert_fail ("isa<ExtractElementInst>(Transition) && \"Other kind of transitions are not supported yet\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 4979, __PRETTY_FUNCTION__))
4979 "Other kind of transitions are not supported yet")((isa<ExtractElementInst>(Transition) && "Other kind of transitions are not supported yet"
) ? static_cast<void> (0) : __assert_fail ("isa<ExtractElementInst>(Transition) && \"Other kind of transitions are not supported yet\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 4979, __PRETTY_FUNCTION__))
;
4980 return 0;
4981 }
4982
4983 /// \brief Return the index of the index in the transition.
4984 /// E.g., for "extractelement <2 x i32> c, i32 0" the index
4985 /// is at index 1.
4986 unsigned getTransitionIdx() const {
4987 assert(isa<ExtractElementInst>(Transition) &&((isa<ExtractElementInst>(Transition) && "Other kind of transitions are not supported yet"
) ? static_cast<void> (0) : __assert_fail ("isa<ExtractElementInst>(Transition) && \"Other kind of transitions are not supported yet\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 4988, __PRETTY_FUNCTION__))
4988 "Other kind of transitions are not supported yet")((isa<ExtractElementInst>(Transition) && "Other kind of transitions are not supported yet"
) ? static_cast<void> (0) : __assert_fail ("isa<ExtractElementInst>(Transition) && \"Other kind of transitions are not supported yet\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 4988, __PRETTY_FUNCTION__))
;
4989 return 1;
4990 }
4991
4992 /// \brief Get the type of the transition.
4993 /// This is the type of the original value.
4994 /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the
4995 /// transition is <2 x i32>.
4996 Type *getTransitionType() const {
4997 return Transition->getOperand(getTransitionOriginalValueIdx())->getType();
4998 }
4999
5000 /// \brief Promote \p ToBePromoted by moving \p Def downward through.
5001 /// I.e., we have the following sequence:
5002 /// Def = Transition <ty1> a to <ty2>
5003 /// b = ToBePromoted <ty2> Def, ...
5004 /// =>
5005 /// b = ToBePromoted <ty1> a, ...
5006 /// Def = Transition <ty1> ToBePromoted to <ty2>
5007 void promoteImpl(Instruction *ToBePromoted);
5008
5009 /// \brief Check whether or not it is profitable to promote all the
5010 /// instructions enqueued to be promoted.
5011 bool isProfitableToPromote() {
5012 Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx());
5013 unsigned Index = isa<ConstantInt>(ValIdx)
5014 ? cast<ConstantInt>(ValIdx)->getZExtValue()
5015 : -1;
5016 Type *PromotedType = getTransitionType();
5017
5018 StoreInst *ST = cast<StoreInst>(CombineInst);
5019 unsigned AS = ST->getPointerAddressSpace();
5020 unsigned Align = ST->getAlignment();
5021 // Check if this store is supported.
5022 if (!TLI.allowsMisalignedMemoryAccesses(
5023 TLI.getValueType(DL, ST->getValueOperand()->getType()), AS,
5024 Align)) {
5025 // If this is not supported, there is no way we can combine
5026 // the extract with the store.
5027 return false;
5028 }
5029
5030 // The scalar chain of computation has to pay for the transition
5031 // scalar to vector.
5032 // The vector chain has to account for the combining cost.
5033 uint64_t ScalarCost =
5034 TTI.getVectorInstrCost(Transition->getOpcode(), PromotedType, Index);
5035 uint64_t VectorCost = StoreExtractCombineCost;
5036 for (const auto &Inst : InstsToBePromoted) {
5037 // Compute the cost.
5038 // By construction, all instructions being promoted are arithmetic ones.
5039 // Moreover, one argument is a constant that can be viewed as a splat
5040 // constant.
5041 Value *Arg0 = Inst->getOperand(0);
5042 bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) ||
5043 isa<ConstantFP>(Arg0);
5044 TargetTransformInfo::OperandValueKind Arg0OVK =
5045 IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue
5046 : TargetTransformInfo::OK_AnyValue;
5047 TargetTransformInfo::OperandValueKind Arg1OVK =
5048 !IsArg0Constant ? TargetTransformInfo::OK_UniformConstantValue
5049 : TargetTransformInfo::OK_AnyValue;
5050 ScalarCost += TTI.getArithmeticInstrCost(
5051 Inst->getOpcode(), Inst->getType(), Arg0OVK, Arg1OVK);
5052 VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType,
5053 Arg0OVK, Arg1OVK);
5054 }
5055 DEBUG(dbgs() << "Estimated cost of computation to be promoted:\nScalar: "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Estimated cost of computation to be promoted:\nScalar: "
<< ScalarCost << "\nVector: " << VectorCost
<< '\n'; } } while (false)
5056 << ScalarCost << "\nVector: " << VectorCost << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Estimated cost of computation to be promoted:\nScalar: "
<< ScalarCost << "\nVector: " << VectorCost
<< '\n'; } } while (false)
;
5057 return ScalarCost > VectorCost;
5058 }
5059
5060 /// \brief Generate a constant vector with \p Val with the same
5061 /// number of elements as the transition.
5062 /// \p UseSplat defines whether or not \p Val should be replicated
5063 /// across the whole vector.
5064 /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>,
5065 /// otherwise we generate a vector with as many undef as possible:
5066 /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only
5067 /// used at the index of the extract.
5068 Value *getConstantVector(Constant *Val, bool UseSplat) const {
5069 unsigned ExtractIdx = UINT_MAX(2147483647 *2U +1U);
5070 if (!UseSplat) {
5071 // If we cannot determine where the constant must be, we have to
5072 // use a splat constant.
5073 Value *ValExtractIdx = Transition->getOperand(getTransitionIdx());
5074 if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx))
5075 ExtractIdx = CstVal->getSExtValue();
5076 else
5077 UseSplat = true;
5078 }
5079
5080 unsigned End = getTransitionType()->getVectorNumElements();
5081 if (UseSplat)
5082 return ConstantVector::getSplat(End, Val);
5083
5084 SmallVector<Constant *, 4> ConstVec;
5085 UndefValue *UndefVal = UndefValue::get(Val->getType());
5086 for (unsigned Idx = 0; Idx != End; ++Idx) {
5087 if (Idx == ExtractIdx)
5088 ConstVec.push_back(Val);
5089 else
5090 ConstVec.push_back(UndefVal);
5091 }
5092 return ConstantVector::get(ConstVec);
5093 }
5094
5095 /// \brief Check if promoting to a vector type an operand at \p OperandIdx
5096 /// in \p Use can trigger undefined behavior.
5097 static bool canCauseUndefinedBehavior(const Instruction *Use,
5098 unsigned OperandIdx) {
5099 // This is not safe to introduce undef when the operand is on
5100 // the right hand side of a division-like instruction.
5101 if (OperandIdx != 1)
5102 return false;
5103 switch (Use->getOpcode()) {
5104 default:
5105 return false;
5106 case Instruction::SDiv:
5107 case Instruction::UDiv:
5108 case Instruction::SRem:
5109 case Instruction::URem:
5110 return true;
5111 case Instruction::FDiv:
5112 case Instruction::FRem:
5113 return !Use->hasNoNaNs();
5114 }
5115 llvm_unreachable(nullptr)::llvm::llvm_unreachable_internal(nullptr, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 5115)
;
5116 }
5117
5118public:
5119 VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI,
5120 const TargetTransformInfo &TTI, Instruction *Transition,
5121 unsigned CombineCost)
5122 : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition),
5123 StoreExtractCombineCost(CombineCost), CombineInst(nullptr) {
5124 assert(Transition && "Do not know how to promote null")((Transition && "Do not know how to promote null") ? static_cast
<void> (0) : __assert_fail ("Transition && \"Do not know how to promote null\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 5124, __PRETTY_FUNCTION__))
;
5125 }
5126
5127 /// \brief Check if we can promote \p ToBePromoted to \p Type.
5128 bool canPromote(const Instruction *ToBePromoted) const {
5129 // We could support CastInst too.
5130 return isa<BinaryOperator>(ToBePromoted);
5131 }
5132
5133 /// \brief Check if it is profitable to promote \p ToBePromoted
5134 /// by moving downward the transition through.
5135 bool shouldPromote(const Instruction *ToBePromoted) const {
5136 // Promote only if all the operands can be statically expanded.
5137 // Indeed, we do not want to introduce any new kind of transitions.
5138 for (const Use &U : ToBePromoted->operands()) {
5139 const Value *Val = U.get();
5140 if (Val == getEndOfTransition()) {
5141 // If the use is a division and the transition is on the rhs,
5142 // we cannot promote the operation, otherwise we may create a
5143 // division by zero.
5144 if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo()))
5145 return false;
5146 continue;
5147 }
5148 if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) &&
5149 !isa<ConstantFP>(Val))
5150 return false;
5151 }
5152 // Check that the resulting operation is legal.
5153 int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode());
5154 if (!ISDOpcode)
5155 return false;
5156 return StressStoreExtract ||
5157 TLI.isOperationLegalOrCustom(
5158 ISDOpcode, TLI.getValueType(DL, getTransitionType(), true));
5159 }
5160
5161 /// \brief Check whether or not \p Use can be combined
5162 /// with the transition.
5163 /// I.e., is it possible to do Use(Transition) => AnotherUse?
5164 bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); }
5165
5166 /// \brief Record \p ToBePromoted as part of the chain to be promoted.
5167 void enqueueForPromotion(Instruction *ToBePromoted) {
5168 InstsToBePromoted.push_back(ToBePromoted);
5169 }
5170
5171 /// \brief Set the instruction that will be combined with the transition.
5172 void recordCombineInstruction(Instruction *ToBeCombined) {
5173 assert(canCombine(ToBeCombined) && "Unsupported instruction to combine")((canCombine(ToBeCombined) && "Unsupported instruction to combine"
) ? static_cast<void> (0) : __assert_fail ("canCombine(ToBeCombined) && \"Unsupported instruction to combine\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 5173, __PRETTY_FUNCTION__))
;
5174 CombineInst = ToBeCombined;
5175 }
5176
5177 /// \brief Promote all the instructions enqueued for promotion if it is
5178 /// is profitable.
5179 /// \return True if the promotion happened, false otherwise.
5180 bool promote() {
5181 // Check if there is something to promote.
5182 // Right now, if we do not have anything to combine with,
5183 // we assume the promotion is not profitable.
5184 if (InstsToBePromoted.empty() || !CombineInst)
5185 return false;
5186
5187 // Check cost.
5188 if (!StressStoreExtract && !isProfitableToPromote())
5189 return false;
5190
5191 // Promote.
5192 for (auto &ToBePromoted : InstsToBePromoted)
5193 promoteImpl(ToBePromoted);
5194 InstsToBePromoted.clear();
5195 return true;
5196 }
5197};
5198} // End of anonymous namespace.
5199
5200void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) {
5201 // At this point, we know that all the operands of ToBePromoted but Def
5202 // can be statically promoted.
5203 // For Def, we need to use its parameter in ToBePromoted:
5204 // b = ToBePromoted ty1 a
5205 // Def = Transition ty1 b to ty2
5206 // Move the transition down.
5207 // 1. Replace all uses of the promoted operation by the transition.
5208 // = ... b => = ... Def.
5209 assert(ToBePromoted->getType() == Transition->getType() &&((ToBePromoted->getType() == Transition->getType() &&
"The type of the result of the transition does not match " "the final type"
) ? static_cast<void> (0) : __assert_fail ("ToBePromoted->getType() == Transition->getType() && \"The type of the result of the transition does not match \" \"the final type\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 5211, __PRETTY_FUNCTION__))
5210 "The type of the result of the transition does not match "((ToBePromoted->getType() == Transition->getType() &&
"The type of the result of the transition does not match " "the final type"
) ? static_cast<void> (0) : __assert_fail ("ToBePromoted->getType() == Transition->getType() && \"The type of the result of the transition does not match \" \"the final type\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 5211, __PRETTY_FUNCTION__))
5211 "the final type")((ToBePromoted->getType() == Transition->getType() &&
"The type of the result of the transition does not match " "the final type"
) ? static_cast<void> (0) : __assert_fail ("ToBePromoted->getType() == Transition->getType() && \"The type of the result of the transition does not match \" \"the final type\""
, "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 5211, __PRETTY_FUNCTION__))
;
5212 ToBePromoted->replaceAllUsesWith(Transition);
5213 // 2. Update the type of the uses.
5214 // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def.
5215 Type *TransitionTy = getTransitionType();
5216 ToBePromoted->mutateType(TransitionTy);
5217 // 3. Update all the operands of the promoted operation with promoted
5218 // operands.
5219 // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a.
5220 for (Use &U : ToBePromoted->operands()) {
5221 Value *Val = U.get();
5222 Value *NewVal = nullptr;
5223 if (Val == Transition)
5224 NewVal = Transition->getOperand(getTransitionOriginalValueIdx());
5225 else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) ||
5226 isa<ConstantFP>(Val)) {
5227 // Use a splat constant if it is not safe to use undef.
5228 NewVal = getConstantVector(
5229 cast<Constant>(Val),
5230 isa<UndefValue>(Val) ||
5231 canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo()));
5232 } else
5233 llvm_unreachable("Did you modified shouldPromote and forgot to update "::llvm::llvm_unreachable_internal("Did you modified shouldPromote and forgot to update "
"this?", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 5234)
5234 "this?")::llvm::llvm_unreachable_internal("Did you modified shouldPromote and forgot to update "
"this?", "/tmp/buildd/llvm-toolchain-snapshot-5.0~svn303373/lib/CodeGen/CodeGenPrepare.cpp"
, 5234)
;
5235 ToBePromoted->setOperand(U.getOperandNo(), NewVal);
5236 }
5237 Transition->removeFromParent();
5238 Transition->insertAfter(ToBePromoted);
5239 Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted);
5240}
5241
5242/// Some targets can do store(extractelement) with one instruction.
5243/// Try to push the extractelement towards the stores when the target
5244/// has this feature and this is profitable.
5245bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) {
5246 unsigned CombineCost = UINT_MAX(2147483647 *2U +1U);
5247 if (DisableStoreExtract || !TLI ||
5248 (!StressStoreExtract &&
5249 !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(),
5250 Inst->getOperand(1), CombineCost)))
5251 return false;
5252
5253 // At this point we know that Inst is a vector to scalar transition.
5254 // Try to move it down the def-use chain, until:
5255 // - We can combine the transition with its single use
5256 // => we got rid of the transition.
5257 // - We escape the current basic block
5258 // => we would need to check that we are moving it at a cheaper place and
5259 // we do not do that for now.
5260 BasicBlock *Parent = Inst->getParent();
5261 DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Found an interesting transition: "
<< *Inst << '\n'; } } while (false)
;
5262 VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost);
5263 // If the transition has more than one use, assume this is not going to be
5264 // beneficial.
5265 while (Inst->hasOneUse()) {
5266 Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin());
5267 DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Use: " << *ToBePromoted
<< '\n'; } } while (false)
;
5268
5269 if (ToBePromoted->getParent() != Parent) {
5270 DEBUG(dbgs() << "Instruction to promote is in a different block ("do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Instruction to promote is in a different block ("
<< ToBePromoted->getParent()->getName() <<
") than the transition (" << Parent->getName() <<
").\n"; } } while (false)
5271 << ToBePromoted->getParent()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Instruction to promote is in a different block ("
<< ToBePromoted->getParent()->getName() <<
") than the transition (" << Parent->getName() <<
").\n"; } } while (false)
5272 << ") than the transition (" << Parent->getName() << ").\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Instruction to promote is in a different block ("
<< ToBePromoted->getParent()->getName() <<
") than the transition (" << Parent->getName() <<
").\n"; } } while (false)
;
5273 return false;
5274 }
5275
5276 if (VPH.canCombine(ToBePromoted)) {
5277 DEBUG(dbgs() << "Assume " << *Inst << '\n'do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Assume " << *Inst
<< '\n' << "will be combined with: " << *ToBePromoted
<< '\n'; } } while (false)
5278 << "will be combined with: " << *ToBePromoted << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Assume " << *Inst
<< '\n' << "will be combined with: " << *ToBePromoted
<< '\n'; } } while (false)
;
5279 VPH.recordCombineInstruction(ToBePromoted);
5280 bool Changed = VPH.promote();
5281 NumStoreExtractExposed += Changed;
5282 return Changed;
5283 }
5284
5285 DEBUG(dbgs() << "Try promoting.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Try promoting.\n"; } }
while (false)
;
5286 if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted))
5287 return false;
5288
5289 DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Promoting is possible... Enqueue for promotion!\n"
; } } while (false)
;
5290
5291 VPH.enqueueForPromotion(ToBePromoted);
5292 Inst = ToBePromoted;
5293 }
5294 return false;
5295}
5296
5297/// For the instruction sequence of store below, F and I values
5298/// are bundled together as an i64 value before being stored into memory.
5299/// Sometimes it is more efficent to generate separate stores for F and I,
5300/// which can remove the bitwise instructions or sink them to colder places.
5301///
5302/// (store (or (zext (bitcast F to i32) to i64),
5303/// (shl (zext I to i64), 32)), addr) -->
5304/// (store F, addr) and (store I, addr+4)
5305///
5306/// Similarly, splitting for other merged store can also be beneficial, like:
5307/// For pair of {i32, i32}, i64 store --> two i32 stores.
5308/// For pair of {i32, i16}, i64 store --> two i32 stores.
5309/// For pair of {i16, i16}, i32 store --> two i16 stores.
5310/// For pair of {i16, i8}, i32 store --> two i16 stores.
5311/// For pair of {i8, i8}, i16 store --> two i8 stores.
5312///
5313/// We allow each target to determine specifically which kind of splitting is
5314/// supported.
5315///
5316/// The store patterns are commonly seen from the simple code snippet below
5317/// if only std::make_pair(...) is sroa transformed before inlined into hoo.
5318/// void goo(const std::pair<int, float> &);
5319/// hoo() {
5320/// ...
5321/// goo(std::make_pair(tmp, ftmp));
5322/// ...
5323/// }
5324///
5325/// Although we already have similar splitting in DAG Combine, we duplicate
5326/// it in CodeGenPrepare to catch the case in which pattern is across
5327/// multiple BBs. The logic in DAG Combine is kept to catch case generated
5328/// during code expansion.
5329static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL,
5330 const TargetLowering &TLI) {
5331 // Handle simple but common cases only.
5332 Type *StoreType = SI.getValueOperand()->getType();
5333 if (DL.getTypeStoreSizeInBits(StoreType) != DL.getTypeSizeInBits(StoreType) ||
5334 DL.getTypeSizeInBits(StoreType) == 0)
5335 return false;
5336
5337 unsigned HalfValBitSize = DL.getTypeSizeInBits(StoreType) / 2;
5338 Type *SplitStoreType = Type::getIntNTy(SI.getContext(), HalfValBitSize);
5339 if (DL.getTypeStoreSizeInBits(SplitStoreType) !=
5340 DL.getTypeSizeInBits(SplitStoreType))
5341 return false;
5342
5343 // Match the following patterns:
5344 // (store (or (zext LValue to i64),
5345 // (shl (zext HValue to i64), 32)), HalfValBitSize)
5346 // or
5347 // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize)
5348 // (zext LValue to i64),
5349 // Expect both operands of OR and the first operand of SHL have only
5350 // one use.
5351 Value *LValue, *HValue;
5352 if (!match(SI.getValueOperand(),
5353 m_c_Or(m_OneUse(m_ZExt(m_Value(LValue))),
5354 m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue))),
5355 m_SpecificInt(HalfValBitSize))))))
5356 return false;
5357
5358 // Check LValue and HValue are int with size less or equal than 32.
5359 if (!LValue->getType()->isIntegerTy() ||
5360 DL.getTypeSizeInBits(LValue->getType()) > HalfValBitSize ||
5361 !HValue->getType()->isIntegerTy() ||
5362 DL.getTypeSizeInBits(HValue->getType()) > HalfValBitSize)
5363 return false;
5364
5365 // If LValue/HValue is a bitcast instruction, use the EVT before bitcast
5366 // as the input of target query.
5367 auto *LBC = dyn_cast<BitCastInst>(LValue);
5368 auto *HBC = dyn_cast<BitCastInst>(HValue);
5369 EVT LowTy = LBC ? EVT::getEVT(LBC->getOperand(0)->getType())
5370 : EVT::getEVT(LValue->getType());
5371 EVT HighTy = HBC ? EVT::getEVT(HBC->getOperand(0)->getType())
5372 : EVT::getEVT(HValue->getType());
5373 if (!ForceSplitStore && !TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy))
5374 return false;
5375
5376 // Start to split store.
5377 IRBuilder<> Builder(SI.getContext());
5378 Builder.SetInsertPoint(&SI);
5379
5380 // If LValue/HValue is a bitcast in another BB, create a new one in current
5381 // BB so it may be merged with the splitted stores by dag combiner.
5382 if (LBC && LBC->getParent() != SI.getParent())
5383 LValue = Builder.CreateBitCast(LBC->getOperand(0), LBC->getType());
5384 if (HBC && HBC->getParent() != SI.getParent())
5385 HValue = Builder.CreateBitCast(HBC->getOperand(0), HBC->getType());
5386
5387 auto CreateSplitStore = [&](Value *V, bool Upper) {
5388 V = Builder.CreateZExtOrBitCast(V, SplitStoreType);
5389 Value *Addr = Builder.CreateBitCast(
5390 SI.getOperand(1),
5391 SplitStoreType->getPointerTo(SI.getPointerAddressSpace()));
5392 if (Upper)
5393 Addr = Builder.CreateGEP(
5394 SplitStoreType, Addr,
5395 ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1));
5396 Builder.CreateAlignedStore(
5397 V, Addr, Upper ? SI.getAlignment() / 2 : SI.getAlignment());
5398 };
5399
5400 CreateSplitStore(LValue, false);
5401 CreateSplitStore(HValue, true);
5402
5403 // Delete the old store.
5404 SI.eraseFromParent();
5405 return true;
5406}
5407
5408bool CodeGenPrepare::optimizeInst(Instruction *I, bool& ModifiedDT) {
5409 // Bail out if we inserted the instruction to prevent optimizations from
5410 // stepping on each other's toes.
5411 if (InsertedInsts.count(I))
5412 return false;
5413
5414 if (PHINode *P = dyn_cast<PHINode>(I)) {
5415 // It is possible for very late stage optimizations (such as SimplifyCFG)
5416 // to introduce PHI nodes too late to be cleaned up. If we detect such a
5417 // trivial PHI, go ahead and zap it here.
5418 if (Value *V = SimplifyInstruction(P, {*DL, TLInfo})) {
5419 P->replaceAllUsesWith(V);
5420 P->eraseFromParent();
5421 ++NumPHIsElim;
5422 return true;
5423 }
5424 return false;
5425 }
5426
5427 if (CastInst *CI = dyn_cast<CastInst>(I)) {
5428 // If the source of the cast is a constant, then this should have
5429 // already been constant folded. The only reason NOT to constant fold
5430 // it is if something (e.g. LSR) was careful to place the constant
5431 // evaluation in a block other than then one that uses it (e.g. to hoist
5432 // the address of globals out of a loop). If this is the case, we don't
5433 // want to forward-subst the cast.
5434 if (isa<Constant>(CI->getOperand(0)))
5435 return false;
5436
5437 if (TLI && OptimizeNoopCopyExpression(CI, *TLI, *DL))
5438 return true;
5439
5440 if (isa<ZExtInst>(I) || isa<SExtInst>(I)) {
5441 /// Sink a zext or sext into its user blocks if the target type doesn't
5442 /// fit in one register
5443 if (TLI &&
5444 TLI->getTypeAction(CI->getContext(),
5445 TLI->getValueType(*DL, CI->getType())) ==
5446 TargetLowering::TypeExpandInteger) {
5447 return SinkCast(CI);
5448 } else {
5449 bool MadeChange = optimizeExt(I);
5450 return MadeChange | optimizeExtUses(I);
5451 }
5452 }
5453 return false;
5454 }
5455
5456 if (CmpInst *CI = dyn_cast<CmpInst>(I))
5457 if (!TLI || !TLI->hasMultipleConditionRegisters())
5458 return OptimizeCmpExpression(CI, TLI);
5459
5460 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
5461 LI->setMetadata(LLVMContext::MD_invariant_group, nullptr);
5462 if (TLI) {
5463 bool Modified = optimizeLoadExt(LI);
5464 unsigned AS = LI->getPointerAddressSpace();
5465 Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS);
5466 return Modified;
5467 }
5468 return false;
5469 }
5470
5471 if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
5472 if (TLI && splitMergedValStore(*SI, *DL, *TLI))
5473 return true;
5474 SI->setMetadata(LLVMContext::MD_invariant_group, nullptr);
5475 if (TLI) {
5476 unsigned AS = SI->getPointerAddressSpace();
5477 return optimizeMemoryInst(I, SI->getOperand(1),
5478 SI->getOperand(0)->getType(), AS);
5479 }
5480 return false;
5481 }
5482
5483 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
5484 unsigned AS = RMW->getPointerAddressSpace();
5485 return optimizeMemoryInst(I, RMW->getPointerOperand(),
5486 RMW->getType(), AS);
5487 }
5488
5489 if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(I)) {
5490 unsigned AS = CmpX->getPointerAddressSpace();
5491 return optimizeMemoryInst(I, CmpX->getPointerOperand(),
5492 CmpX->getCompareOperand()->getType(), AS);
5493 }
5494
5495 BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I);
5496
5497 if (BinOp && (BinOp->getOpcode() == Instruction::And) &&
5498 EnableAndCmpSinking && TLI)
5499 return sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts);
5500
5501 if (BinOp && (BinOp->getOpcode() == Instruction::AShr ||
5502 BinOp->getOpcode() == Instruction::LShr)) {
5503 ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1));
5504 if (TLI && CI && TLI->hasExtractBitsInsn())
5505 return OptimizeExtractBits(BinOp, CI, *TLI, *DL);
5506
5507 return false;
5508 }
5509
5510 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
5511 if (GEPI->hasAllZeroIndices()) {
5512 /// The GEP operand must be a pointer, so must its result -> BitCast
5513 Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(),
5514 GEPI->getName(), GEPI);
5515 GEPI->replaceAllUsesWith(NC);
5516 GEPI->eraseFromParent();
5517 ++NumGEPsElim;
5518 optimizeInst(NC, ModifiedDT);
5519 return true;
5520 }
5521 return false;
5522 }
5523
5524 if (CallInst *CI = dyn_cast<CallInst>(I))
5525 return optimizeCallInst(CI, ModifiedDT);
5526
5527 if (SelectInst *SI = dyn_cast<SelectInst>(I))
5528 return optimizeSelectInst(SI);
5529
5530 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I))
5531 return optimizeShuffleVectorInst(SVI);
5532
5533 if (auto *Switch = dyn_cast<SwitchInst>(I))
5534 return optimizeSwitchInst(Switch);
5535
5536 if (isa<ExtractElementInst>(I))
5537 return optimizeExtractElementInst(I);
5538
5539 return false;
5540}
5541
5542/// Given an OR instruction, check to see if this is a bitreverse
5543/// idiom. If so, insert the new intrinsic and return true.
5544static bool makeBitReverse(Instruction &I, const DataLayout &DL,
5545 const TargetLowering &TLI) {
5546 if (!I.getType()->isIntegerTy() ||
5547 !TLI.isOperationLegalOrCustom(ISD::BITREVERSE,
5548 TLI.getValueType(DL, I.getType(), true)))
5549 return false;
5550
5551 SmallVector<Instruction*, 4> Insts;
5552 if (!recognizeBSwapOrBitReverseIdiom(&I, false, true, Insts))
5553 return false;
5554 Instruction *LastInst = Insts.back();
5555 I.replaceAllUsesWith(LastInst);
5556 RecursivelyDeleteTriviallyDeadInstructions(&I);
5557 return true;
5558}
5559
5560// In this pass we look for GEP and cast instructions that are used
5561// across basic blocks and rewrite them to improve basic-block-at-a-time
5562// selection.
5563bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, bool& ModifiedDT) {
5564 SunkAddrs.clear();
5565 bool MadeChange = false;
5566
5567 CurInstIterator = BB.begin();
5568 while (CurInstIterator != BB.end()) {
5569 MadeChange |= optimizeInst(&*CurInstIterator++, ModifiedDT);
5570 if (ModifiedDT)
5571 return true;
5572 }
5573
5574 bool MadeBitReverse = true;
5575 while (TLI && MadeBitReverse) {
5576 MadeBitReverse = false;
5577 for (auto &I : reverse(BB)) {
5578 if (makeBitReverse(I, *DL, *TLI)) {
5579 MadeBitReverse = MadeChange = true;
5580 ModifiedDT = true;
5581 break;
5582 }
5583 }
5584 }
5585 MadeChange |= dupRetToEnableTailCallOpts(&BB);
5586
5587 return MadeChange;
5588}
5589
5590// llvm.dbg.value is far away from the value then iSel may not be able
5591// handle it properly. iSel will drop llvm.dbg.value if it can not
5592// find a node corresponding to the value.
5593bool CodeGenPrepare::placeDbgValues(Function &F) {
5594 bool MadeChange = false;
5595 for (BasicBlock &BB : F) {
5596 Instruction *PrevNonDbgInst = nullptr;
5597 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) {
5598 Instruction *Insn = &*BI++;
5599 DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn);
5600 // Leave dbg.values that refer to an alloca alone. These
5601 // instrinsics describe the address of a variable (= the alloca)
5602 // being taken. They should not be moved next to the alloca
5603 // (and to the beginning of the scope), but rather stay close to
5604 // where said address is used.
5605 if (!DVI || (DVI->getValue() && isa<AllocaInst>(DVI->getValue()))) {
5606 PrevNonDbgInst = Insn;
5607 continue;
5608 }
5609
5610 Instruction *VI = dyn_cast_or_null<Instruction>(DVI->getValue());
5611 if (VI && VI != PrevNonDbgInst && !VI->isTerminator()) {
5612 // If VI is a phi in a block with an EHPad terminator, we can't insert
5613 // after it.
5614 if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad())
5615 continue;
5616 DEBUG(dbgs() << "Moving Debug Value before :\n" << *DVI << ' ' << *VI)do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Moving Debug Value before :\n"
<< *DVI << ' ' << *VI; } } while (false)
;
5617 DVI->removeFromParent();
5618 if (isa<PHINode>(VI))
5619 DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt());
5620 else
5621 DVI->insertAfter(VI);
5622 MadeChange = true;
5623 ++NumDbgValueMoved;
5624 }
5625 }
5626 }
5627 return MadeChange;
5628}
5629
5630/// \brief Scale down both weights to fit into uint32_t.
5631static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) {
5632 uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse;
5633 uint32_t Scale = (NewMax / UINT32_MAX(4294967295U)) + 1;
5634 NewTrue = NewTrue / Scale;
5635 NewFalse = NewFalse / Scale;
5636}
5637
5638/// \brief Some targets prefer to split a conditional branch like:
5639/// \code
5640/// %0 = icmp ne i32 %a, 0
5641/// %1 = icmp ne i32 %b, 0
5642/// %or.cond = or i1 %0, %1
5643/// br i1 %or.cond, label %TrueBB, label %FalseBB
5644/// \endcode
5645/// into multiple branch instructions like:
5646/// \code
5647/// bb1:
5648/// %0 = icmp ne i32 %a, 0
5649/// br i1 %0, label %TrueBB, label %bb2
5650/// bb2:
5651/// %1 = icmp ne i32 %b, 0
5652/// br i1 %1, label %TrueBB, label %FalseBB
5653/// \endcode
5654/// This usually allows instruction selection to do even further optimizations
5655/// and combine the compare with the branch instruction. Currently this is
5656/// applied for targets which have "cheap" jump instructions.
5657///
5658/// FIXME: Remove the (equivalent?) implementation in SelectionDAG.
5659///
5660bool CodeGenPrepare::splitBranchCondition(Function &F) {
5661 if (!TM || !TM->Options.EnableFastISel || !TLI || TLI->isJumpExpensive())
5662 return false;
5663
5664 bool MadeChange = false;
5665 for (auto &BB : F) {
5666 // Does this BB end with the following?
5667 // %cond1 = icmp|fcmp|binary instruction ...
5668 // %cond2 = icmp|fcmp|binary instruction ...
5669 // %cond.or = or|and i1 %cond1, cond2
5670 // br i1 %cond.or label %dest1, label %dest2"
5671 BinaryOperator *LogicOp;
5672 BasicBlock *TBB, *FBB;
5673 if (!match(BB.getTerminator(), m_Br(m_OneUse(m_BinOp(LogicOp)), TBB, FBB)))
5674 continue;
5675
5676 auto *Br1 = cast<BranchInst>(BB.getTerminator());
5677 if (Br1->getMetadata(LLVMContext::MD_unpredictable))
5678 continue;
5679
5680 unsigned Opc;
5681 Value *Cond1, *Cond2;
5682 if (match(LogicOp, m_And(m_OneUse(m_Value(Cond1)),
5683 m_OneUse(m_Value(Cond2)))))
5684 Opc = Instruction::And;
5685 else if (match(LogicOp, m_Or(m_OneUse(m_Value(Cond1)),
5686 m_OneUse(m_Value(Cond2)))))
5687 Opc = Instruction::Or;
5688 else
5689 continue;
5690
5691 if (!match(Cond1, m_CombineOr(m_Cmp(), m_BinOp())) ||
5692 !match(Cond2, m_CombineOr(m_Cmp(), m_BinOp())) )
5693 continue;
5694
5695 DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "Before branch condition splitting\n"
; BB.dump(); } } while (false)
;
5696
5697 // Create a new BB.
5698 auto TmpBB =
5699 BasicBlock::Create(BB.getContext(), BB.getName() + ".cond.split",
5700 BB.getParent(), BB.getNextNode());
5701
5702 // Update original basic block by using the first condition directly by the
5703 // branch instruction and removing the no longer needed and/or instruction.
5704 Br1->setCondition(Cond1);
5705 LogicOp->eraseFromParent();
5706
5707 // Depending on the conditon we have to either replace the true or the false
5708 // successor of the original branch instruction.
5709 if (Opc == Instruction::And)
5710 Br1->setSuccessor(0, TmpBB);
5711 else
5712 Br1->setSuccessor(1, TmpBB);
5713
5714 // Fill in the new basic block.
5715 auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB);
5716 if (auto *I = dyn_cast<Instruction>(Cond2)) {
5717 I->removeFromParent();
5718 I->insertBefore(Br2);
5719 }
5720
5721 // Update PHI nodes in both successors. The original BB needs to be
5722 // replaced in one succesor's PHI nodes, because the branch comes now from
5723 // the newly generated BB (NewBB). In the other successor we need to add one
5724 // incoming edge to the PHI nodes, because both branch instructions target
5725 // now the same successor. Depending on the original branch condition
5726 // (and/or) we have to swap the successors (TrueDest, FalseDest), so that
5727 // we perform the correct update for the PHI nodes.
5728 // This doesn't change the successor order of the just created branch
5729 // instruction (or any other instruction).
5730 if (Opc == Instruction::Or)
5731 std::swap(TBB, FBB);
5732
5733 // Replace the old BB with the new BB.
5734 for (auto &I : *TBB) {
5735 PHINode *PN = dyn_cast<PHINode>(&I);
5736 if (!PN)
5737 break;
5738 int i;
5739 while ((i = PN->getBasicBlockIndex(&BB)) >= 0)
5740 PN->setIncomingBlock(i, TmpBB);
5741 }
5742
5743 // Add another incoming edge form the new BB.
5744 for (auto &I : *FBB) {
5745 PHINode *PN = dyn_cast<PHINode>(&I);
5746 if (!PN)
5747 break;
5748 auto *Val = PN->getIncomingValueForBlock(&BB);
5749 PN->addIncoming(Val, TmpBB);
5750 }
5751
5752 // Update the branch weights (from SelectionDAGBuilder::
5753 // FindMergedConditions).
5754 if (Opc == Instruction::Or) {
5755 // Codegen X | Y as:
5756 // BB1:
5757 // jmp_if_X TBB
5758 // jmp TmpBB
5759 // TmpBB:
5760 // jmp_if_Y TBB
5761 // jmp FBB
5762 //
5763
5764 // We have flexibility in setting Prob for BB1 and Prob for NewBB.
5765 // The requirement is that
5766 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
5767 // = TrueProb for orignal BB.
5768 // Assuming the orignal weights are A and B, one choice is to set BB1's
5769 // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice
5770 // assumes that
5771 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
5772 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
5773 // TmpBB, but the math is more complicated.
5774 uint64_t TrueWeight, FalseWeight;
5775 if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) {
5776 uint64_t NewTrueWeight = TrueWeight;
5777 uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight;
5778 scaleWeights(NewTrueWeight, NewFalseWeight);
5779 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext())
5780 .createBranchWeights(TrueWeight, FalseWeight));
5781
5782 NewTrueWeight = TrueWeight;
5783 NewFalseWeight = 2 * FalseWeight;
5784 scaleWeights(NewTrueWeight, NewFalseWeight);
5785 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext())
5786 .createBranchWeights(TrueWeight, FalseWeight));
5787 }
5788 } else {
5789 // Codegen X & Y as:
5790 // BB1:
5791 // jmp_if_X TmpBB
5792 // jmp FBB
5793 // TmpBB:
5794 // jmp_if_Y TBB
5795 // jmp FBB
5796 //
5797 // This requires creation of TmpBB after CurBB.
5798
5799 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
5800 // The requirement is that
5801 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
5802 // = FalseProb for orignal BB.
5803 // Assuming the orignal weights are A and B, one choice is to set BB1's
5804 // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice
5805 // assumes that
5806 // FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB.
5807 uint64_t TrueWeight, FalseWeight;
5808 if (Br1->extractProfMetadata(TrueWeight, FalseWeight)) {
5809 uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight;
5810 uint64_t NewFalseWeight = FalseWeight;
5811 scaleWeights(NewTrueWeight, NewFalseWeight);
5812 Br1->setMetadata(LLVMContext::MD_prof, MDBuilder(Br1->getContext())
5813 .createBranchWeights(TrueWeight, FalseWeight));
5814
5815 NewTrueWeight = 2 * TrueWeight;
5816 NewFalseWeight = FalseWeight;
5817 scaleWeights(NewTrueWeight, NewFalseWeight);
5818 Br2->setMetadata(LLVMContext::MD_prof, MDBuilder(Br2->getContext())
5819 .createBranchWeights(TrueWeight, FalseWeight));
5820 }
5821 }
5822
5823 // Note: No point in getting fancy here, since the DT info is never
5824 // available to CodeGenPrepare.
5825 ModifiedDT = true;
5826
5827 MadeChange = true;
5828
5829 DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump();do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "After branch condition splitting\n"
; BB.dump(); TmpBB->dump(); } } while (false)
5830 TmpBB->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("codegenprepare")) { dbgs() << "After branch condition splitting\n"
; BB.dump(); TmpBB->dump(); } } while (false)
;
5831 }
5832 return MadeChange;
5833}