LLVM  14.0.0git
CodeGenPrepare.cpp
Go to the documentation of this file.
1 //===- CodeGenPrepare.cpp - Prepare a function for code generation --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass munges the code in the input function to better prepare it for
10 // SelectionDAG-based code generation. This works around limitations in it's
11 // basic-block-at-a-time approach. It should eventually be removed.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/MapVector.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
28 #include "llvm/Analysis/LoopInfo.h"
35 #include "llvm/CodeGen/Analysis.h"
42 #include "llvm/Config/llvm-config.h"
43 #include "llvm/IR/Argument.h"
44 #include "llvm/IR/Attributes.h"
45 #include "llvm/IR/BasicBlock.h"
46 #include "llvm/IR/Constant.h"
47 #include "llvm/IR/Constants.h"
48 #include "llvm/IR/DataLayout.h"
49 #include "llvm/IR/DebugInfo.h"
50 #include "llvm/IR/DerivedTypes.h"
51 #include "llvm/IR/Dominators.h"
52 #include "llvm/IR/Function.h"
54 #include "llvm/IR/GlobalValue.h"
55 #include "llvm/IR/GlobalVariable.h"
56 #include "llvm/IR/IRBuilder.h"
57 #include "llvm/IR/InlineAsm.h"
58 #include "llvm/IR/InstrTypes.h"
59 #include "llvm/IR/Instruction.h"
60 #include "llvm/IR/Instructions.h"
61 #include "llvm/IR/IntrinsicInst.h"
62 #include "llvm/IR/Intrinsics.h"
63 #include "llvm/IR/IntrinsicsAArch64.h"
64 #include "llvm/IR/LLVMContext.h"
65 #include "llvm/IR/MDBuilder.h"
66 #include "llvm/IR/Module.h"
67 #include "llvm/IR/Operator.h"
68 #include "llvm/IR/PatternMatch.h"
69 #include "llvm/IR/Statepoint.h"
70 #include "llvm/IR/Type.h"
71 #include "llvm/IR/Use.h"
72 #include "llvm/IR/User.h"
73 #include "llvm/IR/Value.h"
74 #include "llvm/IR/ValueHandle.h"
75 #include "llvm/IR/ValueMap.h"
76 #include "llvm/InitializePasses.h"
77 #include "llvm/Pass.h"
80 #include "llvm/Support/Casting.h"
82 #include "llvm/Support/Compiler.h"
83 #include "llvm/Support/Debug.h"
95 #include <algorithm>
96 #include <cassert>
97 #include <cstdint>
98 #include <iterator>
99 #include <limits>
100 #include <memory>
101 #include <utility>
102 #include <vector>
103 
104 using namespace llvm;
105 using namespace llvm::PatternMatch;
106 
107 #define DEBUG_TYPE "codegenprepare"
108 
109 STATISTIC(NumBlocksElim, "Number of blocks eliminated");
110 STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated");
111 STATISTIC(NumGEPsElim, "Number of GEPs converted to casts");
112 STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "
113  "sunken Cmps");
114 STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "
115  "of sunken Casts");
116 STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "
117  "computations were sunk");
118 STATISTIC(NumMemoryInstsPhiCreated,
119  "Number of phis created when address "
120  "computations were sunk to memory instructions");
121 STATISTIC(NumMemoryInstsSelectCreated,
122  "Number of select created when address "
123  "computations were sunk to memory instructions");
124 STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads");
125 STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized");
126 STATISTIC(NumAndsAdded,
127  "Number of and mask instructions added to form ext loads");
128 STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized");
129 STATISTIC(NumRetsDup, "Number of return instructions duplicated");
130 STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved");
131 STATISTIC(NumSelectsExpanded, "Number of selects turned into branches");
132 STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed");
133 
135  "disable-cgp-branch-opts", cl::Hidden, cl::init(false),
136  cl::desc("Disable branch optimizations in CodeGenPrepare"));
137 
138 static cl::opt<bool>
139  DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false),
140  cl::desc("Disable GC optimizations in CodeGenPrepare"));
141 
143  "disable-cgp-select2branch", cl::Hidden, cl::init(false),
144  cl::desc("Disable select to branch conversion."));
145 
147  "addr-sink-using-gep", cl::Hidden, cl::init(true),
148  cl::desc("Address sinking in CGP using GEPs."));
149 
151  "enable-andcmp-sinking", cl::Hidden, cl::init(true),
152  cl::desc("Enable sinkinig and/cmp into branches."));
153 
155  "disable-cgp-store-extract", cl::Hidden, cl::init(false),
156  cl::desc("Disable store(extract) optimizations in CodeGenPrepare"));
157 
159  "stress-cgp-store-extract", cl::Hidden, cl::init(false),
160  cl::desc("Stress test store(extract) optimizations in CodeGenPrepare"));
161 
163  "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
164  cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in "
165  "CodeGenPrepare"));
166 
168  "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
169  cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) "
170  "optimization in CodeGenPrepare"));
171 
173  "disable-preheader-prot", cl::Hidden, cl::init(false),
174  cl::desc("Disable protection against removing loop preheaders"));
175 
177  "profile-guided-section-prefix", cl::Hidden, cl::init(true), cl::ZeroOrMore,
178  cl::desc("Use profile info to add section prefix for hot/cold functions"));
179 
181  "profile-unknown-in-special-section", cl::Hidden, cl::init(false),
183  cl::desc("In profiling mode like sampleFDO, if a function doesn't have "
184  "profile, we cannot tell the function is cold for sure because "
185  "it may be a function newly added without ever being sampled. "
186  "With the flag enabled, compiler can put such profile unknown "
187  "functions into a special section, so runtime system can choose "
188  "to handle it in a different way than .text section, to save "
189  "RAM for example. "));
190 
192  "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2),
193  cl::desc("Skip merging empty blocks if (frequency of empty block) / "
194  "(frequency of destination block) is greater than this ratio"));
195 
197  "force-split-store", cl::Hidden, cl::init(false),
198  cl::desc("Force store splitting no matter what the target query says."));
199 
200 static cl::opt<bool>
201 EnableTypePromotionMerge("cgp-type-promotion-merge", cl::Hidden,
202  cl::desc("Enable merging of redundant sexts when one is dominating"
203  " the other."), cl::init(true));
204 
206  "disable-complex-addr-modes", cl::Hidden, cl::init(false),
207  cl::desc("Disables combining addressing modes with different parts "
208  "in optimizeMemoryInst."));
209 
210 static cl::opt<bool>
211 AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden, cl::init(false),
212  cl::desc("Allow creation of Phis in Address sinking."));
213 
214 static cl::opt<bool>
215 AddrSinkNewSelects("addr-sink-new-select", cl::Hidden, cl::init(true),
216  cl::desc("Allow creation of selects in Address sinking."));
217 
219  "addr-sink-combine-base-reg", cl::Hidden, cl::init(true),
220  cl::desc("Allow combining of BaseReg field in Address sinking."));
221 
223  "addr-sink-combine-base-gv", cl::Hidden, cl::init(true),
224  cl::desc("Allow combining of BaseGV field in Address sinking."));
225 
227  "addr-sink-combine-base-offs", cl::Hidden, cl::init(true),
228  cl::desc("Allow combining of BaseOffs field in Address sinking."));
229 
231  "addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true),
232  cl::desc("Allow combining of ScaledReg field in Address sinking."));
233 
234 static cl::opt<bool>
235  EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden,
236  cl::init(true),
237  cl::desc("Enable splitting large offset of GEP."));
238 
240  "cgp-icmp-eq2icmp-st", cl::Hidden, cl::init(false),
241  cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion."));
242 
243 static cl::opt<bool>
244  VerifyBFIUpdates("cgp-verify-bfi-updates", cl::Hidden, cl::init(false),
245  cl::desc("Enable BFI update verification for "
246  "CodeGenPrepare."));
247 
249  "cgp-optimize-phi-types", cl::Hidden, cl::init(false),
250  cl::desc("Enable converting phi types in CodeGenPrepare"));
251 
252 namespace {
253 
254 enum ExtType {
255  ZeroExtension, // Zero extension has been seen.
256  SignExtension, // Sign extension has been seen.
257  BothExtension // This extension type is used if we saw sext after
258  // ZeroExtension had been set, or if we saw zext after
259  // SignExtension had been set. It makes the type
260  // information of a promoted instruction invalid.
261 };
262 
263 using SetOfInstrs = SmallPtrSet<Instruction *, 16>;
264 using TypeIsSExt = PointerIntPair<Type *, 2, ExtType>;
265 using InstrToOrigTy = DenseMap<Instruction *, TypeIsSExt>;
266 using SExts = SmallVector<Instruction *, 16>;
267 using ValueToSExts = DenseMap<Value *, SExts>;
268 
269 class TypePromotionTransaction;
270 
271  class CodeGenPrepare : public FunctionPass {
272  const TargetMachine *TM = nullptr;
273  const TargetSubtargetInfo *SubtargetInfo;
274  const TargetLowering *TLI = nullptr;
275  const TargetRegisterInfo *TRI;
276  const TargetTransformInfo *TTI = nullptr;
277  const TargetLibraryInfo *TLInfo;
278  const LoopInfo *LI;
279  std::unique_ptr<BlockFrequencyInfo> BFI;
280  std::unique_ptr<BranchProbabilityInfo> BPI;
281  ProfileSummaryInfo *PSI;
282 
283  /// As we scan instructions optimizing them, this is the next instruction
284  /// to optimize. Transforms that can invalidate this should update it.
285  BasicBlock::iterator CurInstIterator;
286 
287  /// Keeps track of non-local addresses that have been sunk into a block.
288  /// This allows us to avoid inserting duplicate code for blocks with
289  /// multiple load/stores of the same address. The usage of WeakTrackingVH
290  /// enables SunkAddrs to be treated as a cache whose entries can be
291  /// invalidated if a sunken address computation has been erased.
293 
294  /// Keeps track of all instructions inserted for the current function.
295  SetOfInstrs InsertedInsts;
296 
297  /// Keeps track of the type of the related instruction before their
298  /// promotion for the current function.
299  InstrToOrigTy PromotedInsts;
300 
301  /// Keep track of instructions removed during promotion.
302  SetOfInstrs RemovedInsts;
303 
304  /// Keep track of sext chains based on their initial value.
305  DenseMap<Value *, Instruction *> SeenChainsForSExt;
306 
307  /// Keep track of GEPs accessing the same data structures such as structs or
308  /// arrays that are candidates to be split later because of their large
309  /// size.
310  MapVector<
313  LargeOffsetGEPMap;
314 
315  /// Keep track of new GEP base after splitting the GEPs having large offset.
316  SmallSet<AssertingVH<Value>, 2> NewGEPBases;
317 
318  /// Map serial numbers to Large offset GEPs.
319  DenseMap<AssertingVH<GetElementPtrInst>, int> LargeOffsetGEPID;
320 
321  /// Keep track of SExt promoted.
322  ValueToSExts ValToSExtendedUses;
323 
324  /// True if the function has the OptSize attribute.
325  bool OptSize;
326 
327  /// DataLayout for the Function being processed.
328  const DataLayout *DL = nullptr;
329 
330  /// Building the dominator tree can be expensive, so we only build it
331  /// lazily and update it when required.
332  std::unique_ptr<DominatorTree> DT;
333 
334  public:
335  static char ID; // Pass identification, replacement for typeid
336 
337  CodeGenPrepare() : FunctionPass(ID) {
339  }
340 
341  bool runOnFunction(Function &F) override;
342 
343  StringRef getPassName() const override { return "CodeGen Prepare"; }
344 
345  void getAnalysisUsage(AnalysisUsage &AU) const override {
346  // FIXME: When we can selectively preserve passes, preserve the domtree.
352  }
353 
354  private:
355  template <typename F>
356  void resetIteratorIfInvalidatedWhileCalling(BasicBlock *BB, F f) {
357  // Substituting can cause recursive simplifications, which can invalidate
358  // our iterator. Use a WeakTrackingVH to hold onto it in case this
359  // happens.
360  Value *CurValue = &*CurInstIterator;
361  WeakTrackingVH IterHandle(CurValue);
362 
363  f();
364 
365  // If the iterator instruction was recursively deleted, start over at the
366  // start of the block.
367  if (IterHandle != CurValue) {
368  CurInstIterator = BB->begin();
369  SunkAddrs.clear();
370  }
371  }
372 
373  // Get the DominatorTree, building if necessary.
374  DominatorTree &getDT(Function &F) {
375  if (!DT)
376  DT = std::make_unique<DominatorTree>(F);
377  return *DT;
378  }
379 
380  void removeAllAssertingVHReferences(Value *V);
381  bool eliminateAssumptions(Function &F);
382  bool eliminateFallThrough(Function &F);
383  bool eliminateMostlyEmptyBlocks(Function &F);
384  BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB);
385  bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const;
386  void eliminateMostlyEmptyBlock(BasicBlock *BB);
387  bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB,
388  bool isPreheader);
389  bool makeBitReverse(Instruction &I);
390  bool optimizeBlock(BasicBlock &BB, bool &ModifiedDT);
391  bool optimizeInst(Instruction *I, bool &ModifiedDT);
392  bool optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
393  Type *AccessTy, unsigned AddrSpace);
394  bool optimizeGatherScatterInst(Instruction *MemoryInst, Value *Ptr);
395  bool optimizeInlineAsmInst(CallInst *CS);
396  bool optimizeCallInst(CallInst *CI, bool &ModifiedDT);
397  bool optimizeExt(Instruction *&I);
398  bool optimizeExtUses(Instruction *I);
399  bool optimizeLoadExt(LoadInst *Load);
400  bool optimizeShiftInst(BinaryOperator *BO);
401  bool optimizeFunnelShift(IntrinsicInst *Fsh);
402  bool optimizeSelectInst(SelectInst *SI);
403  bool optimizeShuffleVectorInst(ShuffleVectorInst *SVI);
404  bool optimizeSwitchInst(SwitchInst *SI);
405  bool optimizeExtractElementInst(Instruction *Inst);
406  bool dupRetToEnableTailCallOpts(BasicBlock *BB, bool &ModifiedDT);
407  bool fixupDbgValue(Instruction *I);
408  bool placeDbgValues(Function &F);
409  bool placePseudoProbes(Function &F);
410  bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts,
411  LoadInst *&LI, Instruction *&Inst, bool HasPromoted);
412  bool tryToPromoteExts(TypePromotionTransaction &TPT,
413  const SmallVectorImpl<Instruction *> &Exts,
414  SmallVectorImpl<Instruction *> &ProfitablyMovedExts,
415  unsigned CreatedInstsCost = 0);
416  bool mergeSExts(Function &F);
417  bool splitLargeGEPOffsets();
418  bool optimizePhiType(PHINode *Inst, SmallPtrSetImpl<PHINode *> &Visited,
419  SmallPtrSetImpl<Instruction *> &DeletedInstrs);
420  bool optimizePhiTypes(Function &F);
421  bool performAddressTypePromotion(
422  Instruction *&Inst,
423  bool AllowPromotionWithoutCommonHeader,
424  bool HasPromoted, TypePromotionTransaction &TPT,
425  SmallVectorImpl<Instruction *> &SpeculativelyMovedExts);
426  bool splitBranchCondition(Function &F, bool &ModifiedDT);
427  bool simplifyOffsetableRelocate(GCStatepointInst &I);
428 
429  bool tryToSinkFreeOperands(Instruction *I);
430  bool replaceMathCmpWithIntrinsic(BinaryOperator *BO, Value *Arg0,
431  Value *Arg1, CmpInst *Cmp,
432  Intrinsic::ID IID);
433  bool optimizeCmp(CmpInst *Cmp, bool &ModifiedDT);
434  bool combineToUSubWithOverflow(CmpInst *Cmp, bool &ModifiedDT);
435  bool combineToUAddWithOverflow(CmpInst *Cmp, bool &ModifiedDT);
436  void verifyBFIUpdates(Function &F);
437  };
438 
439 } // end anonymous namespace
440 
441 char CodeGenPrepare::ID = 0;
442 
443 INITIALIZE_PASS_BEGIN(CodeGenPrepare, DEBUG_TYPE,
444  "Optimize for code generation", false, false)
451  "Optimize for code generation", false, false)
452 
453 FunctionPass *llvm::createCodeGenPreparePass() { return new CodeGenPrepare(); }
454 
456  if (skipFunction(F))
457  return false;
458 
459  DL = &F.getParent()->getDataLayout();
460 
461  bool EverMadeChange = false;
462  // Clear per function information.
463  InsertedInsts.clear();
464  PromotedInsts.clear();
465 
466  TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
467  SubtargetInfo = TM->getSubtargetImpl(F);
468  TLI = SubtargetInfo->getTargetLowering();
469  TRI = SubtargetInfo->getRegisterInfo();
470  TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
471  TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
472  LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
473  BPI.reset(new BranchProbabilityInfo(F, *LI));
474  BFI.reset(new BlockFrequencyInfo(F, *BPI, *LI));
475  PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
476  OptSize = F.hasOptSize();
478  // The hot attribute overwrites profile count based hotness while profile
479  // counts based hotness overwrite the cold attribute.
480  // This is a conservative behabvior.
481  if (F.hasFnAttribute(Attribute::Hot) ||
482  PSI->isFunctionHotInCallGraph(&F, *BFI))
483  F.setSectionPrefix("hot");
484  // If PSI shows this function is not hot, we will placed the function
485  // into unlikely section if (1) PSI shows this is a cold function, or
486  // (2) the function has a attribute of cold.
487  else if (PSI->isFunctionColdInCallGraph(&F, *BFI) ||
488  F.hasFnAttribute(Attribute::Cold))
489  F.setSectionPrefix("unlikely");
492  F.setSectionPrefix("unknown");
493  }
494 
495  /// This optimization identifies DIV instructions that can be
496  /// profitably bypassed and carried out with a shorter, faster divide.
497  if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI->isSlowDivBypassed()) {
498  const DenseMap<unsigned int, unsigned int> &BypassWidths =
499  TLI->getBypassSlowDivWidths();
500  BasicBlock* BB = &*F.begin();
501  while (BB != nullptr) {
502  // bypassSlowDivision may create new BBs, but we don't want to reapply the
503  // optimization to those blocks.
504  BasicBlock* Next = BB->getNextNode();
505  // F.hasOptSize is already checked in the outer if statement.
506  if (!llvm::shouldOptimizeForSize(BB, PSI, BFI.get()))
507  EverMadeChange |= bypassSlowDivision(BB, BypassWidths);
508  BB = Next;
509  }
510  }
511 
512  // Get rid of @llvm.assume builtins before attempting to eliminate empty
513  // blocks, since there might be blocks that only contain @llvm.assume calls
514  // (plus arguments that we can get rid of).
515  EverMadeChange |= eliminateAssumptions(F);
516 
517  // Eliminate blocks that contain only PHI nodes and an
518  // unconditional branch.
519  EverMadeChange |= eliminateMostlyEmptyBlocks(F);
520 
521  bool ModifiedDT = false;
522  if (!DisableBranchOpts)
523  EverMadeChange |= splitBranchCondition(F, ModifiedDT);
524 
525  // Split some critical edges where one of the sources is an indirect branch,
526  // to help generate sane code for PHIs involving such edges.
527  EverMadeChange |= SplitIndirectBrCriticalEdges(F);
528 
529  bool MadeChange = true;
530  while (MadeChange) {
531  MadeChange = false;
532  DT.reset();
534  bool ModifiedDTOnIteration = false;
535  MadeChange |= optimizeBlock(BB, ModifiedDTOnIteration);
536 
537  // Restart BB iteration if the dominator tree of the Function was changed
538  if (ModifiedDTOnIteration)
539  break;
540  }
541  if (EnableTypePromotionMerge && !ValToSExtendedUses.empty())
542  MadeChange |= mergeSExts(F);
543  if (!LargeOffsetGEPMap.empty())
544  MadeChange |= splitLargeGEPOffsets();
545  MadeChange |= optimizePhiTypes(F);
546 
547  if (MadeChange)
548  eliminateFallThrough(F);
549 
550  // Really free removed instructions during promotion.
551  for (Instruction *I : RemovedInsts)
552  I->deleteValue();
553 
554  EverMadeChange |= MadeChange;
555  SeenChainsForSExt.clear();
556  ValToSExtendedUses.clear();
557  RemovedInsts.clear();
558  LargeOffsetGEPMap.clear();
559  LargeOffsetGEPID.clear();
560  }
561 
562  NewGEPBases.clear();
563  SunkAddrs.clear();
564 
565  if (!DisableBranchOpts) {
566  MadeChange = false;
567  // Use a set vector to get deterministic iteration order. The order the
568  // blocks are removed may affect whether or not PHI nodes in successors
569  // are removed.
571  for (BasicBlock &BB : F) {
573  MadeChange |= ConstantFoldTerminator(&BB, true);
574  if (!MadeChange) continue;
575 
576  for (BasicBlock *Succ : Successors)
577  if (pred_empty(Succ))
578  WorkList.insert(Succ);
579  }
580 
581  // Delete the dead blocks and any of their dead successors.
582  MadeChange |= !WorkList.empty();
583  while (!WorkList.empty()) {
584  BasicBlock *BB = WorkList.pop_back_val();
586 
588 
589  for (BasicBlock *Succ : Successors)
590  if (pred_empty(Succ))
591  WorkList.insert(Succ);
592  }
593 
594  // Merge pairs of basic blocks with unconditional branches, connected by
595  // a single edge.
596  if (EverMadeChange || MadeChange)
597  MadeChange |= eliminateFallThrough(F);
598 
599  EverMadeChange |= MadeChange;
600  }
601 
602  if (!DisableGCOpts) {
604  for (BasicBlock &BB : F)
605  for (Instruction &I : BB)
606  if (auto *SP = dyn_cast<GCStatepointInst>(&I))
607  Statepoints.push_back(SP);
608  for (auto &I : Statepoints)
609  EverMadeChange |= simplifyOffsetableRelocate(*I);
610  }
611 
612  // Do this last to clean up use-before-def scenarios introduced by other
613  // preparatory transforms.
614  EverMadeChange |= placeDbgValues(F);
615  EverMadeChange |= placePseudoProbes(F);
616 
617 #ifndef NDEBUG
618  if (VerifyBFIUpdates)
619  verifyBFIUpdates(F);
620 #endif
621 
622  return EverMadeChange;
623 }
624 
625 bool CodeGenPrepare::eliminateAssumptions(Function &F) {
626  bool MadeChange = false;
627  for (BasicBlock &BB : F) {
628  CurInstIterator = BB.begin();
629  while (CurInstIterator != BB.end()) {
630  Instruction *I = &*(CurInstIterator++);
631  if (auto *Assume = dyn_cast<AssumeInst>(I)) {
632  MadeChange = true;
633  Value *Operand = Assume->getOperand(0);
634  Assume->eraseFromParent();
635 
636  resetIteratorIfInvalidatedWhileCalling(&BB, [&]() {
637  RecursivelyDeleteTriviallyDeadInstructions(Operand, TLInfo, nullptr);
638  });
639  }
640  }
641  }
642  return MadeChange;
643 }
644 
645 /// An instruction is about to be deleted, so remove all references to it in our
646 /// GEP-tracking data strcutures.
647 void CodeGenPrepare::removeAllAssertingVHReferences(Value *V) {
648  LargeOffsetGEPMap.erase(V);
649  NewGEPBases.erase(V);
650 
651  auto GEP = dyn_cast<GetElementPtrInst>(V);
652  if (!GEP)
653  return;
654 
655  LargeOffsetGEPID.erase(GEP);
656 
657  auto VecI = LargeOffsetGEPMap.find(GEP->getPointerOperand());
658  if (VecI == LargeOffsetGEPMap.end())
659  return;
660 
661  auto &GEPVector = VecI->second;
662  llvm::erase_if(GEPVector, [=](auto &Elt) { return Elt.first == GEP; });
663 
664  if (GEPVector.empty())
665  LargeOffsetGEPMap.erase(VecI);
666 }
667 
668 // Verify BFI has been updated correctly by recomputing BFI and comparing them.
669 void LLVM_ATTRIBUTE_UNUSED CodeGenPrepare::verifyBFIUpdates(Function &F) {
670  DominatorTree NewDT(F);
671  LoopInfo NewLI(NewDT);
672  BranchProbabilityInfo NewBPI(F, NewLI, TLInfo);
673  BlockFrequencyInfo NewBFI(F, NewBPI, NewLI);
674  NewBFI.verifyMatch(*BFI);
675 }
676 
677 /// Merge basic blocks which are connected by a single edge, where one of the
678 /// basic blocks has a single successor pointing to the other basic block,
679 /// which has a single predecessor.
680 bool CodeGenPrepare::eliminateFallThrough(Function &F) {
681  bool Changed = false;
682  // Scan all of the blocks in the function, except for the entry block.
683  // Use a temporary array to avoid iterator being invalidated when
684  // deleting blocks.
686  for (auto &Block : llvm::drop_begin(F))
687  Blocks.push_back(&Block);
688 
690  for (auto &Block : Blocks) {
691  auto *BB = cast_or_null<BasicBlock>(Block);
692  if (!BB)
693  continue;
694  // If the destination block has a single pred, then this is a trivial
695  // edge, just collapse it.
696  BasicBlock *SinglePred = BB->getSinglePredecessor();
697 
698  // Don't merge if BB's address is taken.
699  if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue;
700 
701  BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator());
702  if (Term && !Term->isConditional()) {
703  Changed = true;
704  LLVM_DEBUG(dbgs() << "To merge:\n" << *BB << "\n\n\n");
705 
706  // Merge BB into SinglePred and delete it.
708  Preds.insert(SinglePred);
709  }
710  }
711 
712  // (Repeatedly) merging blocks into their predecessors can create redundant
713  // debug intrinsics.
714  for (auto &Pred : Preds)
715  if (auto *BB = cast_or_null<BasicBlock>(Pred))
717 
718  return Changed;
719 }
720 
721 /// Find a destination block from BB if BB is mergeable empty block.
722 BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) {
723  // If this block doesn't end with an uncond branch, ignore it.
724  BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
725  if (!BI || !BI->isUnconditional())
726  return nullptr;
727 
728  // If the instruction before the branch (skipping debug info) isn't a phi
729  // node, then other stuff is happening here.
730  BasicBlock::iterator BBI = BI->getIterator();
731  if (BBI != BB->begin()) {
732  --BBI;
733  while (isa<DbgInfoIntrinsic>(BBI)) {
734  if (BBI == BB->begin())
735  break;
736  --BBI;
737  }
738  if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI))
739  return nullptr;
740  }
741 
742  // Do not break infinite loops.
743  BasicBlock *DestBB = BI->getSuccessor(0);
744  if (DestBB == BB)
745  return nullptr;
746 
747  if (!canMergeBlocks(BB, DestBB))
748  DestBB = nullptr;
749 
750  return DestBB;
751 }
752 
753 /// Eliminate blocks that contain only PHI nodes, debug info directives, and an
754 /// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split
755 /// edges in ways that are non-optimal for isel. Start by eliminating these
756 /// blocks so we can split them the way we want them.
757 bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) {
759  SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end());
760  while (!LoopList.empty()) {
761  Loop *L = LoopList.pop_back_val();
762  llvm::append_range(LoopList, *L);
763  if (BasicBlock *Preheader = L->getLoopPreheader())
764  Preheaders.insert(Preheader);
765  }
766 
767  bool MadeChange = false;
768  // Copy blocks into a temporary array to avoid iterator invalidation issues
769  // as we remove them.
770  // Note that this intentionally skips the entry block.
772  for (auto &Block : llvm::drop_begin(F))
773  Blocks.push_back(&Block);
774 
775  for (auto &Block : Blocks) {
776  BasicBlock *BB = cast_or_null<BasicBlock>(Block);
777  if (!BB)
778  continue;
779  BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB);
780  if (!DestBB ||
781  !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB)))
782  continue;
783 
784  eliminateMostlyEmptyBlock(BB);
785  MadeChange = true;
786  }
787  return MadeChange;
788 }
789 
790 bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB,
791  BasicBlock *DestBB,
792  bool isPreheader) {
793  // Do not delete loop preheaders if doing so would create a critical edge.
794  // Loop preheaders can be good locations to spill registers. If the
795  // preheader is deleted and we create a critical edge, registers may be
796  // spilled in the loop body instead.
797  if (!DisablePreheaderProtect && isPreheader &&
798  !(BB->getSinglePredecessor() &&
799  BB->getSinglePredecessor()->getSingleSuccessor()))
800  return false;
801 
802  // Skip merging if the block's successor is also a successor to any callbr
803  // that leads to this block.
804  // FIXME: Is this really needed? Is this a correctness issue?
805  for (BasicBlock *Pred : predecessors(BB)) {
806  if (auto *CBI = dyn_cast<CallBrInst>((Pred)->getTerminator()))
807  for (unsigned i = 0, e = CBI->getNumSuccessors(); i != e; ++i)
808  if (DestBB == CBI->getSuccessor(i))
809  return false;
810  }
811 
812  // Try to skip merging if the unique predecessor of BB is terminated by a
813  // switch or indirect branch instruction, and BB is used as an incoming block
814  // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to
815  // add COPY instructions in the predecessor of BB instead of BB (if it is not
816  // merged). Note that the critical edge created by merging such blocks wont be
817  // split in MachineSink because the jump table is not analyzable. By keeping
818  // such empty block (BB), ISel will place COPY instructions in BB, not in the
819  // predecessor of BB.
820  BasicBlock *Pred = BB->getUniquePredecessor();
821  if (!Pred ||
822  !(isa<SwitchInst>(Pred->getTerminator()) ||
823  isa<IndirectBrInst>(Pred->getTerminator())))
824  return true;
825 
826  if (BB->getTerminator() != BB->getFirstNonPHIOrDbg())
827  return true;
828 
829  // We use a simple cost heuristic which determine skipping merging is
830  // profitable if the cost of skipping merging is less than the cost of
831  // merging : Cost(skipping merging) < Cost(merging BB), where the
832  // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and
833  // the Cost(merging BB) is Freq(Pred) * Cost(Copy).
834  // Assuming Cost(Copy) == Cost(Branch), we could simplify it to :
835  // Freq(Pred) / Freq(BB) > 2.
836  // Note that if there are multiple empty blocks sharing the same incoming
837  // value for the PHIs in the DestBB, we consider them together. In such
838  // case, Cost(merging BB) will be the sum of their frequencies.
839 
840  if (!isa<PHINode>(DestBB->begin()))
841  return true;
842 
843  SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs;
844 
845  // Find all other incoming blocks from which incoming values of all PHIs in
846  // DestBB are the same as the ones from BB.
847  for (BasicBlock *DestBBPred : predecessors(DestBB)) {
848  if (DestBBPred == BB)
849  continue;
850 
851  if (llvm::all_of(DestBB->phis(), [&](const PHINode &DestPN) {
852  return DestPN.getIncomingValueForBlock(BB) ==
853  DestPN.getIncomingValueForBlock(DestBBPred);
854  }))
855  SameIncomingValueBBs.insert(DestBBPred);
856  }
857 
858  // See if all BB's incoming values are same as the value from Pred. In this
859  // case, no reason to skip merging because COPYs are expected to be place in
860  // Pred already.
861  if (SameIncomingValueBBs.count(Pred))
862  return true;
863 
864  BlockFrequency PredFreq = BFI->getBlockFreq(Pred);
865  BlockFrequency BBFreq = BFI->getBlockFreq(BB);
866 
867  for (auto *SameValueBB : SameIncomingValueBBs)
868  if (SameValueBB->getUniquePredecessor() == Pred &&
869  DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB))
870  BBFreq += BFI->getBlockFreq(SameValueBB);
871 
872  return PredFreq.getFrequency() <=
874 }
875 
876 /// Return true if we can merge BB into DestBB if there is a single
877 /// unconditional branch between them, and BB contains no other non-phi
878 /// instructions.
879 bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB,
880  const BasicBlock *DestBB) const {
881  // We only want to eliminate blocks whose phi nodes are used by phi nodes in
882  // the successor. If there are more complex condition (e.g. preheaders),
883  // don't mess around with them.
884  for (const PHINode &PN : BB->phis()) {
885  for (const User *U : PN.users()) {
886  const Instruction *UI = cast<Instruction>(U);
887  if (UI->getParent() != DestBB || !isa<PHINode>(UI))
888  return false;
889  // If User is inside DestBB block and it is a PHINode then check
890  // incoming value. If incoming value is not from BB then this is
891  // a complex condition (e.g. preheaders) we want to avoid here.
892  if (UI->getParent() == DestBB) {
893  if (const PHINode *UPN = dyn_cast<PHINode>(UI))
894  for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) {
895  Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I));
896  if (Insn && Insn->getParent() == BB &&
897  Insn->getParent() != UPN->getIncomingBlock(I))
898  return false;
899  }
900  }
901  }
902  }
903 
904  // If BB and DestBB contain any common predecessors, then the phi nodes in BB
905  // and DestBB may have conflicting incoming values for the block. If so, we
906  // can't merge the block.
907  const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin());
908  if (!DestBBPN) return true; // no conflict.
909 
910  // Collect the preds of BB.
912  if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
913  // It is faster to get preds from a PHI than with pred_iterator.
914  for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
915  BBPreds.insert(BBPN->getIncomingBlock(i));
916  } else {
917  BBPreds.insert(pred_begin(BB), pred_end(BB));
918  }
919 
920  // Walk the preds of DestBB.
921  for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) {
922  BasicBlock *Pred = DestBBPN->getIncomingBlock(i);
923  if (BBPreds.count(Pred)) { // Common predecessor?
924  for (const PHINode &PN : DestBB->phis()) {
925  const Value *V1 = PN.getIncomingValueForBlock(Pred);
926  const Value *V2 = PN.getIncomingValueForBlock(BB);
927 
928  // If V2 is a phi node in BB, look up what the mapped value will be.
929  if (const PHINode *V2PN = dyn_cast<PHINode>(V2))
930  if (V2PN->getParent() == BB)
931  V2 = V2PN->getIncomingValueForBlock(Pred);
932 
933  // If there is a conflict, bail out.
934  if (V1 != V2) return false;
935  }
936  }
937  }
938 
939  return true;
940 }
941 
942 /// Eliminate a basic block that has only phi's and an unconditional branch in
943 /// it.
944 void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) {
945  BranchInst *BI = cast<BranchInst>(BB->getTerminator());
946  BasicBlock *DestBB = BI->getSuccessor(0);
947 
948  LLVM_DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n"
949  << *BB << *DestBB);
950 
951  // If the destination block has a single pred, then this is a trivial edge,
952  // just collapse it.
953  if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) {
954  if (SinglePred != DestBB) {
955  assert(SinglePred == BB &&
956  "Single predecessor not the same as predecessor");
957  // Merge DestBB into SinglePred/BB and delete it.
959  // Note: BB(=SinglePred) will not be deleted on this path.
960  // DestBB(=its single successor) is the one that was deleted.
961  LLVM_DEBUG(dbgs() << "AFTER:\n" << *SinglePred << "\n\n\n");
962  return;
963  }
964  }
965 
966  // Otherwise, we have multiple predecessors of BB. Update the PHIs in DestBB
967  // to handle the new incoming edges it is about to have.
968  for (PHINode &PN : DestBB->phis()) {
969  // Remove the incoming value for BB, and remember it.
970  Value *InVal = PN.removeIncomingValue(BB, false);
971 
972  // Two options: either the InVal is a phi node defined in BB or it is some
973  // value that dominates BB.
974  PHINode *InValPhi = dyn_cast<PHINode>(InVal);
975  if (InValPhi && InValPhi->getParent() == BB) {
976  // Add all of the input values of the input PHI as inputs of this phi.
977  for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i)
978  PN.addIncoming(InValPhi->getIncomingValue(i),
979  InValPhi->getIncomingBlock(i));
980  } else {
981  // Otherwise, add one instance of the dominating value for each edge that
982  // we will be adding.
983  if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
984  for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
985  PN.addIncoming(InVal, BBPN->getIncomingBlock(i));
986  } else {
987  for (BasicBlock *Pred : predecessors(BB))
988  PN.addIncoming(InVal, Pred);
989  }
990  }
991  }
992 
993  // The PHIs are now updated, change everything that refers to BB to use
994  // DestBB and remove BB.
995  BB->replaceAllUsesWith(DestBB);
996  BB->eraseFromParent();
997  ++NumBlocksElim;
998 
999  LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
1000 }
1001 
1002 // Computes a map of base pointer relocation instructions to corresponding
1003 // derived pointer relocation instructions given a vector of all relocate calls
1005  const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls,
1007  &RelocateInstMap) {
1008  // Collect information in two maps: one primarily for locating the base object
1009  // while filling the second map; the second map is the final structure holding
1010  // a mapping between Base and corresponding Derived relocate calls
1012  for (auto *ThisRelocate : AllRelocateCalls) {
1013  auto K = std::make_pair(ThisRelocate->getBasePtrIndex(),
1014  ThisRelocate->getDerivedPtrIndex());
1015  RelocateIdxMap.insert(std::make_pair(K, ThisRelocate));
1016  }
1017  for (auto &Item : RelocateIdxMap) {
1018  std::pair<unsigned, unsigned> Key = Item.first;
1019  if (Key.first == Key.second)
1020  // Base relocation: nothing to insert
1021  continue;
1022 
1023  GCRelocateInst *I = Item.second;
1024  auto BaseKey = std::make_pair(Key.first, Key.first);
1025 
1026  // We're iterating over RelocateIdxMap so we cannot modify it.
1027  auto MaybeBase = RelocateIdxMap.find(BaseKey);
1028  if (MaybeBase == RelocateIdxMap.end())
1029  // TODO: We might want to insert a new base object relocate and gep off
1030  // that, if there are enough derived object relocates.
1031  continue;
1032 
1033  RelocateInstMap[MaybeBase->second].push_back(I);
1034  }
1035 }
1036 
1037 // Accepts a GEP and extracts the operands into a vector provided they're all
1038 // small integer constants
1040  SmallVectorImpl<Value *> &OffsetV) {
1041  for (unsigned i = 1; i < GEP->getNumOperands(); i++) {
1042  // Only accept small constant integer operands
1043  auto *Op = dyn_cast<ConstantInt>(GEP->getOperand(i));
1044  if (!Op || Op->getZExtValue() > 20)
1045  return false;
1046  }
1047 
1048  for (unsigned i = 1; i < GEP->getNumOperands(); i++)
1049  OffsetV.push_back(GEP->getOperand(i));
1050  return true;
1051 }
1052 
1053 // Takes a RelocatedBase (base pointer relocation instruction) and Targets to
1054 // replace, computes a replacement, and affects it.
1055 static bool
1057  const SmallVectorImpl<GCRelocateInst *> &Targets) {
1058  bool MadeChange = false;
1059  // We must ensure the relocation of derived pointer is defined after
1060  // relocation of base pointer. If we find a relocation corresponding to base
1061  // defined earlier than relocation of base then we move relocation of base
1062  // right before found relocation. We consider only relocation in the same
1063  // basic block as relocation of base. Relocations from other basic block will
1064  // be skipped by optimization and we do not care about them.
1065  for (auto R = RelocatedBase->getParent()->getFirstInsertionPt();
1066  &*R != RelocatedBase; ++R)
1067  if (auto *RI = dyn_cast<GCRelocateInst>(R))
1068  if (RI->getStatepoint() == RelocatedBase->getStatepoint())
1069  if (RI->getBasePtrIndex() == RelocatedBase->getBasePtrIndex()) {
1070  RelocatedBase->moveBefore(RI);
1071  break;
1072  }
1073 
1074  for (GCRelocateInst *ToReplace : Targets) {
1075  assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() &&
1076  "Not relocating a derived object of the original base object");
1077  if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) {
1078  // A duplicate relocate call. TODO: coalesce duplicates.
1079  continue;
1080  }
1081 
1082  if (RelocatedBase->getParent() != ToReplace->getParent()) {
1083  // Base and derived relocates are in different basic blocks.
1084  // In this case transform is only valid when base dominates derived
1085  // relocate. However it would be too expensive to check dominance
1086  // for each such relocate, so we skip the whole transformation.
1087  continue;
1088  }
1089 
1090  Value *Base = ToReplace->getBasePtr();
1091  auto *Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr());
1092  if (!Derived || Derived->getPointerOperand() != Base)
1093  continue;
1094 
1095  SmallVector<Value *, 2> OffsetV;
1096  if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV))
1097  continue;
1098 
1099  // Create a Builder and replace the target callsite with a gep
1100  assert(RelocatedBase->getNextNode() &&
1101  "Should always have one since it's not a terminator");
1102 
1103  // Insert after RelocatedBase
1104  IRBuilder<> Builder(RelocatedBase->getNextNode());
1105  Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc());
1106 
1107  // If gc_relocate does not match the actual type, cast it to the right type.
1108  // In theory, there must be a bitcast after gc_relocate if the type does not
1109  // match, and we should reuse it to get the derived pointer. But it could be
1110  // cases like this:
1111  // bb1:
1112  // ...
1113  // %g1 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...)
1114  // br label %merge
1115  //
1116  // bb2:
1117  // ...
1118  // %g2 = call coldcc i8 addrspace(1)* @llvm.experimental.gc.relocate.p1i8(...)
1119  // br label %merge
1120  //
1121  // merge:
1122  // %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ]
1123  // %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)*
1124  //
1125  // In this case, we can not find the bitcast any more. So we insert a new bitcast
1126  // no matter there is already one or not. In this way, we can handle all cases, and
1127  // the extra bitcast should be optimized away in later passes.
1128  Value *ActualRelocatedBase = RelocatedBase;
1129  if (RelocatedBase->getType() != Base->getType()) {
1130  ActualRelocatedBase =
1131  Builder.CreateBitCast(RelocatedBase, Base->getType());
1132  }
1133  Value *Replacement = Builder.CreateGEP(
1134  Derived->getSourceElementType(), ActualRelocatedBase, makeArrayRef(OffsetV));
1135  Replacement->takeName(ToReplace);
1136  // If the newly generated derived pointer's type does not match the original derived
1137  // pointer's type, cast the new derived pointer to match it. Same reasoning as above.
1138  Value *ActualReplacement = Replacement;
1139  if (Replacement->getType() != ToReplace->getType()) {
1140  ActualReplacement =
1141  Builder.CreateBitCast(Replacement, ToReplace->getType());
1142  }
1143  ToReplace->replaceAllUsesWith(ActualReplacement);
1144  ToReplace->eraseFromParent();
1145 
1146  MadeChange = true;
1147  }
1148  return MadeChange;
1149 }
1150 
1151 // Turns this:
1152 //
1153 // %base = ...
1154 // %ptr = gep %base + 15
1155 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1156 // %base' = relocate(%tok, i32 4, i32 4)
1157 // %ptr' = relocate(%tok, i32 4, i32 5)
1158 // %val = load %ptr'
1159 //
1160 // into this:
1161 //
1162 // %base = ...
1163 // %ptr = gep %base + 15
1164 // %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1165 // %base' = gc.relocate(%tok, i32 4, i32 4)
1166 // %ptr' = gep %base' + 15
1167 // %val = load %ptr'
1168 bool CodeGenPrepare::simplifyOffsetableRelocate(GCStatepointInst &I) {
1169  bool MadeChange = false;
1170  SmallVector<GCRelocateInst *, 2> AllRelocateCalls;
1171  for (auto *U : I.users())
1172  if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U))
1173  // Collect all the relocate calls associated with a statepoint
1174  AllRelocateCalls.push_back(Relocate);
1175 
1176  // We need at least one base pointer relocation + one derived pointer
1177  // relocation to mangle
1178  if (AllRelocateCalls.size() < 2)
1179  return false;
1180 
1181  // RelocateInstMap is a mapping from the base relocate instruction to the
1182  // corresponding derived relocate instructions
1184  computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap);
1185  if (RelocateInstMap.empty())
1186  return false;
1187 
1188  for (auto &Item : RelocateInstMap)
1189  // Item.first is the RelocatedBase to offset against
1190  // Item.second is the vector of Targets to replace
1191  MadeChange = simplifyRelocatesOffABase(Item.first, Item.second);
1192  return MadeChange;
1193 }
1194 
1195 /// Sink the specified cast instruction into its user blocks.
1196 static bool SinkCast(CastInst *CI) {
1197  BasicBlock *DefBB = CI->getParent();
1198 
1199  /// InsertedCasts - Only insert a cast in each block once.
1200  DenseMap<BasicBlock*, CastInst*> InsertedCasts;
1201 
1202  bool MadeChange = false;
1203  for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end();
1204  UI != E; ) {
1205  Use &TheUse = UI.getUse();
1206  Instruction *User = cast<Instruction>(*UI);
1207 
1208  // Figure out which BB this cast is used in. For PHI's this is the
1209  // appropriate predecessor block.
1210  BasicBlock *UserBB = User->getParent();
1211  if (PHINode *PN = dyn_cast<PHINode>(User)) {
1212  UserBB = PN->getIncomingBlock(TheUse);
1213  }
1214 
1215  // Preincrement use iterator so we don't invalidate it.
1216  ++UI;
1217 
1218  // The first insertion point of a block containing an EH pad is after the
1219  // pad. If the pad is the user, we cannot sink the cast past the pad.
1220  if (User->isEHPad())
1221  continue;
1222 
1223  // If the block selected to receive the cast is an EH pad that does not
1224  // allow non-PHI instructions before the terminator, we can't sink the
1225  // cast.
1226  if (UserBB->getTerminator()->isEHPad())
1227  continue;
1228 
1229  // If this user is in the same block as the cast, don't change the cast.
1230  if (UserBB == DefBB) continue;
1231 
1232  // If we have already inserted a cast into this block, use it.
1233  CastInst *&InsertedCast = InsertedCasts[UserBB];
1234 
1235  if (!InsertedCast) {
1236  BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1237  assert(InsertPt != UserBB->end());
1238  InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0),
1239  CI->getType(), "", &*InsertPt);
1240  InsertedCast->setDebugLoc(CI->getDebugLoc());
1241  }
1242 
1243  // Replace a use of the cast with a use of the new cast.
1244  TheUse = InsertedCast;
1245  MadeChange = true;
1246  ++NumCastUses;
1247  }
1248 
1249  // If we removed all uses, nuke the cast.
1250  if (CI->use_empty()) {
1251  salvageDebugInfo(*CI);
1252  CI->eraseFromParent();
1253  MadeChange = true;
1254  }
1255 
1256  return MadeChange;
1257 }
1258 
1259 /// If the specified cast instruction is a noop copy (e.g. it's casting from
1260 /// one pointer type to another, i32->i8 on PPC), sink it into user blocks to
1261 /// reduce the number of virtual registers that must be created and coalesced.
1262 ///
1263 /// Return true if any changes are made.
1265  const DataLayout &DL) {
1266  // Sink only "cheap" (or nop) address-space casts. This is a weaker condition
1267  // than sinking only nop casts, but is helpful on some platforms.
1268  if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) {
1269  if (!TLI.isFreeAddrSpaceCast(ASC->getSrcAddressSpace(),
1270  ASC->getDestAddressSpace()))
1271  return false;
1272  }
1273 
1274  // If this is a noop copy,
1275  EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType());
1276  EVT DstVT = TLI.getValueType(DL, CI->getType());
1277 
1278  // This is an fp<->int conversion?
1279  if (SrcVT.isInteger() != DstVT.isInteger())
1280  return false;
1281 
1282  // If this is an extension, it will be a zero or sign extension, which
1283  // isn't a noop.
1284  if (SrcVT.bitsLT(DstVT)) return false;
1285 
1286  // If these values will be promoted, find out what they will be promoted
1287  // to. This helps us consider truncates on PPC as noop copies when they
1288  // are.
1289  if (TLI.getTypeAction(CI->getContext(), SrcVT) ==
1291  SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT);
1292  if (TLI.getTypeAction(CI->getContext(), DstVT) ==
1294  DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT);
1295 
1296  // If, after promotion, these are the same types, this is a noop copy.
1297  if (SrcVT != DstVT)
1298  return false;
1299 
1300  return SinkCast(CI);
1301 }
1302 
1303 // Match a simple increment by constant operation. Note that if a sub is
1304 // matched, the step is negated (as if the step had been canonicalized to
1305 // an add, even though we leave the instruction alone.)
1307  Constant *&Step) {
1308  if (match(IVInc, m_Add(m_Instruction(LHS), m_Constant(Step))) ||
1309  match(IVInc, m_ExtractValue<0>(m_Intrinsic<Intrinsic::uadd_with_overflow>(
1310  m_Instruction(LHS), m_Constant(Step)))))
1311  return true;
1312  if (match(IVInc, m_Sub(m_Instruction(LHS), m_Constant(Step))) ||
1313  match(IVInc, m_ExtractValue<0>(m_Intrinsic<Intrinsic::usub_with_overflow>(
1314  m_Instruction(LHS), m_Constant(Step))))) {
1315  Step = ConstantExpr::getNeg(Step);
1316  return true;
1317  }
1318  return false;
1319 }
1320 
1321 /// If given \p PN is an inductive variable with value IVInc coming from the
1322 /// backedge, and on each iteration it gets increased by Step, return pair
1323 /// <IVInc, Step>. Otherwise, return None.
1325 getIVIncrement(const PHINode *PN, const LoopInfo *LI) {
1326  const Loop *L = LI->getLoopFor(PN->getParent());
1327  if (!L || L->getHeader() != PN->getParent() || !L->getLoopLatch())
1328  return None;
1329  auto *IVInc =
1330  dyn_cast<Instruction>(PN->getIncomingValueForBlock(L->getLoopLatch()));
1331  if (!IVInc || LI->getLoopFor(IVInc->getParent()) != L)
1332  return None;
1333  Instruction *LHS = nullptr;
1334  Constant *Step = nullptr;
1335  if (matchIncrement(IVInc, LHS, Step) && LHS == PN)
1336  return std::make_pair(IVInc, Step);
1337  return None;
1338 }
1339 
1340 static bool isIVIncrement(const Value *V, const LoopInfo *LI) {
1341  auto *I = dyn_cast<Instruction>(V);
1342  if (!I)
1343  return false;
1344  Instruction *LHS = nullptr;
1345  Constant *Step = nullptr;
1346  if (!matchIncrement(I, LHS, Step))
1347  return false;
1348  if (auto *PN = dyn_cast<PHINode>(LHS))
1349  if (auto IVInc = getIVIncrement(PN, LI))
1350  return IVInc->first == I;
1351  return false;
1352 }
1353 
1354 bool CodeGenPrepare::replaceMathCmpWithIntrinsic(BinaryOperator *BO,
1355  Value *Arg0, Value *Arg1,
1356  CmpInst *Cmp,
1357  Intrinsic::ID IID) {
1358  auto IsReplacableIVIncrement = [this, &Cmp](BinaryOperator *BO) {
1359  if (!isIVIncrement(BO, LI))
1360  return false;
1361  const Loop *L = LI->getLoopFor(BO->getParent());
1362  assert(L && "L should not be null after isIVIncrement()");
1363  // Do not risk on moving increment into a child loop.
1364  if (LI->getLoopFor(Cmp->getParent()) != L)
1365  return false;
1366 
1367  // Finally, we need to ensure that the insert point will dominate all
1368  // existing uses of the increment.
1369 
1370  auto &DT = getDT(*BO->getParent()->getParent());
1371  if (DT.dominates(Cmp->getParent(), BO->getParent()))
1372  // If we're moving up the dom tree, all uses are trivially dominated.
1373  // (This is the common case for code produced by LSR.)
1374  return true;
1375 
1376  // Otherwise, special case the single use in the phi recurrence.
1377  return BO->hasOneUse() && DT.dominates(Cmp->getParent(), L->getLoopLatch());
1378  };
1379  if (BO->getParent() != Cmp->getParent() && !IsReplacableIVIncrement(BO)) {
1380  // We used to use a dominator tree here to allow multi-block optimization.
1381  // But that was problematic because:
1382  // 1. It could cause a perf regression by hoisting the math op into the
1383  // critical path.
1384  // 2. It could cause a perf regression by creating a value that was live
1385  // across multiple blocks and increasing register pressure.
1386  // 3. Use of a dominator tree could cause large compile-time regression.
1387  // This is because we recompute the DT on every change in the main CGP
1388  // run-loop. The recomputing is probably unnecessary in many cases, so if
1389  // that was fixed, using a DT here would be ok.
1390  //
1391  // There is one important particular case we still want to handle: if BO is
1392  // the IV increment. Important properties that make it profitable:
1393  // - We can speculate IV increment anywhere in the loop (as long as the
1394  // indvar Phi is its only user);
1395  // - Upon computing Cmp, we effectively compute something equivalent to the
1396  // IV increment (despite it loops differently in the IR). So moving it up
1397  // to the cmp point does not really increase register pressure.
1398  return false;
1399  }
1400 
1401  // We allow matching the canonical IR (add X, C) back to (usubo X, -C).
1402  if (BO->getOpcode() == Instruction::Add &&
1403  IID == Intrinsic::usub_with_overflow) {
1404  assert(isa<Constant>(Arg1) && "Unexpected input for usubo");
1405  Arg1 = ConstantExpr::getNeg(cast<Constant>(Arg1));
1406  }
1407 
1408  // Insert at the first instruction of the pair.
1409  Instruction *InsertPt = nullptr;
1410  for (Instruction &Iter : *Cmp->getParent()) {
1411  // If BO is an XOR, it is not guaranteed that it comes after both inputs to
1412  // the overflow intrinsic are defined.
1413  if ((BO->getOpcode() != Instruction::Xor && &Iter == BO) || &Iter == Cmp) {
1414  InsertPt = &Iter;
1415  break;
1416  }
1417  }
1418  assert(InsertPt != nullptr && "Parent block did not contain cmp or binop");
1419 
1420  IRBuilder<> Builder(InsertPt);
1421  Value *MathOV = Builder.CreateBinaryIntrinsic(IID, Arg0, Arg1);
1422  if (BO->getOpcode() != Instruction::Xor) {
1423  Value *Math = Builder.CreateExtractValue(MathOV, 0, "math");
1424  BO->replaceAllUsesWith(Math);
1425  } else
1426  assert(BO->hasOneUse() &&
1427  "Patterns with XOr should use the BO only in the compare");
1428  Value *OV = Builder.CreateExtractValue(MathOV, 1, "ov");
1429  Cmp->replaceAllUsesWith(OV);
1430  Cmp->eraseFromParent();
1431  BO->eraseFromParent();
1432  return true;
1433 }
1434 
1435 /// Match special-case patterns that check for unsigned add overflow.
1437  BinaryOperator *&Add) {
1438  // Add = add A, 1; Cmp = icmp eq A,-1 (overflow if A is max val)
1439  // Add = add A,-1; Cmp = icmp ne A, 0 (overflow if A is non-zero)
1440  Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1);
1441 
1442  // We are not expecting non-canonical/degenerate code. Just bail out.
1443  if (isa<Constant>(A))
1444  return false;
1445 
1446  ICmpInst::Predicate Pred = Cmp->getPredicate();
1447  if (Pred == ICmpInst::ICMP_EQ && match(B, m_AllOnes()))
1448  B = ConstantInt::get(B->getType(), 1);
1449  else if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt()))
1450  B = ConstantInt::get(B->getType(), -1);
1451  else
1452  return false;
1453 
1454  // Check the users of the variable operand of the compare looking for an add
1455  // with the adjusted constant.
1456  for (User *U : A->users()) {
1457  if (match(U, m_Add(m_Specific(A), m_Specific(B)))) {
1458  Add = cast<BinaryOperator>(U);
1459  return true;
1460  }
1461  }
1462  return false;
1463 }
1464 
1465 /// Try to combine the compare into a call to the llvm.uadd.with.overflow
1466 /// intrinsic. Return true if any changes were made.
1467 bool CodeGenPrepare::combineToUAddWithOverflow(CmpInst *Cmp,
1468  bool &ModifiedDT) {
1469  Value *A, *B;
1471  if (!match(Cmp, m_UAddWithOverflow(m_Value(A), m_Value(B), m_BinOp(Add)))) {
1473  return false;
1474  // Set A and B in case we match matchUAddWithOverflowConstantEdgeCases.
1475  A = Add->getOperand(0);
1476  B = Add->getOperand(1);
1477  }
1478 
1479  if (!TLI->shouldFormOverflowOp(ISD::UADDO,
1480  TLI->getValueType(*DL, Add->getType()),
1481  Add->hasNUsesOrMore(2)))
1482  return false;
1483 
1484  // We don't want to move around uses of condition values this late, so we
1485  // check if it is legal to create the call to the intrinsic in the basic
1486  // block containing the icmp.
1487  if (Add->getParent() != Cmp->getParent() && !Add->hasOneUse())
1488  return false;
1489 
1490  if (!replaceMathCmpWithIntrinsic(Add, A, B, Cmp,
1491  Intrinsic::uadd_with_overflow))
1492  return false;
1493 
1494  // Reset callers - do not crash by iterating over a dead instruction.
1495  ModifiedDT = true;
1496  return true;
1497 }
1498 
1499 bool CodeGenPrepare::combineToUSubWithOverflow(CmpInst *Cmp,
1500  bool &ModifiedDT) {
1501  // We are not expecting non-canonical/degenerate code. Just bail out.
1502  Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1);
1503  if (isa<Constant>(A) && isa<Constant>(B))
1504  return false;
1505 
1506  // Convert (A u> B) to (A u< B) to simplify pattern matching.
1507  ICmpInst::Predicate Pred = Cmp->getPredicate();
1508  if (Pred == ICmpInst::ICMP_UGT) {
1509  std::swap(A, B);
1510  Pred = ICmpInst::ICMP_ULT;
1511  }
1512  // Convert special-case: (A == 0) is the same as (A u< 1).
1513  if (Pred == ICmpInst::ICMP_EQ && match(B, m_ZeroInt())) {
1514  B = ConstantInt::get(B->getType(), 1);
1515  Pred = ICmpInst::ICMP_ULT;
1516  }
1517  // Convert special-case: (A != 0) is the same as (0 u< A).
1518  if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt())) {
1519  std::swap(A, B);
1520  Pred = ICmpInst::ICMP_ULT;
1521  }
1522  if (Pred != ICmpInst::ICMP_ULT)
1523  return false;
1524 
1525  // Walk the users of a variable operand of a compare looking for a subtract or
1526  // add with that same operand. Also match the 2nd operand of the compare to
1527  // the add/sub, but that may be a negated constant operand of an add.
1528  Value *CmpVariableOperand = isa<Constant>(A) ? B : A;
1529  BinaryOperator *Sub = nullptr;
1530  for (User *U : CmpVariableOperand->users()) {
1531  // A - B, A u< B --> usubo(A, B)
1532  if (match(U, m_Sub(m_Specific(A), m_Specific(B)))) {
1533  Sub = cast<BinaryOperator>(U);
1534  break;
1535  }
1536 
1537  // A + (-C), A u< C (canonicalized form of (sub A, C))
1538  const APInt *CmpC, *AddC;
1539  if (match(U, m_Add(m_Specific(A), m_APInt(AddC))) &&
1540  match(B, m_APInt(CmpC)) && *AddC == -(*CmpC)) {
1541  Sub = cast<BinaryOperator>(U);
1542  break;
1543  }
1544  }
1545  if (!Sub)
1546  return false;
1547 
1548  if (!TLI->shouldFormOverflowOp(ISD::USUBO,
1549  TLI->getValueType(*DL, Sub->getType()),
1550  Sub->hasNUsesOrMore(2)))
1551  return false;
1552 
1553  if (!replaceMathCmpWithIntrinsic(Sub, Sub->getOperand(0), Sub->getOperand(1),
1554  Cmp, Intrinsic::usub_with_overflow))
1555  return false;
1556 
1557  // Reset callers - do not crash by iterating over a dead instruction.
1558  ModifiedDT = true;
1559  return true;
1560 }
1561 
1562 /// Sink the given CmpInst into user blocks to reduce the number of virtual
1563 /// registers that must be created and coalesced. This is a clear win except on
1564 /// targets with multiple condition code registers (PowerPC), where it might
1565 /// lose; some adjustment may be wanted there.
1566 ///
1567 /// Return true if any changes are made.
1568 static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) {
1570  return false;
1571 
1572  // Avoid sinking soft-FP comparisons, since this can move them into a loop.
1573  if (TLI.useSoftFloat() && isa<FCmpInst>(Cmp))
1574  return false;
1575 
1576  // Only insert a cmp in each block once.
1577  DenseMap<BasicBlock*, CmpInst*> InsertedCmps;
1578 
1579  bool MadeChange = false;
1580  for (Value::user_iterator UI = Cmp->user_begin(), E = Cmp->user_end();
1581  UI != E; ) {
1582  Use &TheUse = UI.getUse();
1583  Instruction *User = cast<Instruction>(*UI);
1584 
1585  // Preincrement use iterator so we don't invalidate it.
1586  ++UI;
1587 
1588  // Don't bother for PHI nodes.
1589  if (isa<PHINode>(User))
1590  continue;
1591 
1592  // Figure out which BB this cmp is used in.
1593  BasicBlock *UserBB = User->getParent();
1594  BasicBlock *DefBB = Cmp->getParent();
1595 
1596  // If this user is in the same block as the cmp, don't change the cmp.
1597  if (UserBB == DefBB) continue;
1598 
1599  // If we have already inserted a cmp into this block, use it.
1600  CmpInst *&InsertedCmp = InsertedCmps[UserBB];
1601 
1602  if (!InsertedCmp) {
1603  BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1604  assert(InsertPt != UserBB->end());
1605  InsertedCmp =
1606  CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(),
1607  Cmp->getOperand(0), Cmp->getOperand(1), "",
1608  &*InsertPt);
1609  // Propagate the debug info.
1610  InsertedCmp->setDebugLoc(Cmp->getDebugLoc());
1611  }
1612 
1613  // Replace a use of the cmp with a use of the new cmp.
1614  TheUse = InsertedCmp;
1615  MadeChange = true;
1616  ++NumCmpUses;
1617  }
1618 
1619  // If we removed all uses, nuke the cmp.
1620  if (Cmp->use_empty()) {
1621  Cmp->eraseFromParent();
1622  MadeChange = true;
1623  }
1624 
1625  return MadeChange;
1626 }
1627 
1628 /// For pattern like:
1629 ///
1630 /// DomCond = icmp sgt/slt CmpOp0, CmpOp1 (might not be in DomBB)
1631 /// ...
1632 /// DomBB:
1633 /// ...
1634 /// br DomCond, TrueBB, CmpBB
1635 /// CmpBB: (with DomBB being the single predecessor)
1636 /// ...
1637 /// Cmp = icmp eq CmpOp0, CmpOp1
1638 /// ...
1639 ///
1640 /// It would use two comparison on targets that lowering of icmp sgt/slt is
1641 /// different from lowering of icmp eq (PowerPC). This function try to convert
1642 /// 'Cmp = icmp eq CmpOp0, CmpOp1' to ' Cmp = icmp slt/sgt CmpOp0, CmpOp1'.
1643 /// After that, DomCond and Cmp can use the same comparison so reduce one
1644 /// comparison.
1645 ///
1646 /// Return true if any changes are made.
1648  const TargetLowering &TLI) {
1650  return false;
1651 
1652  ICmpInst::Predicate Pred = Cmp->getPredicate();
1653  if (Pred != ICmpInst::ICMP_EQ)
1654  return false;
1655 
1656  // If icmp eq has users other than BranchInst and SelectInst, converting it to
1657  // icmp slt/sgt would introduce more redundant LLVM IR.
1658  for (User *U : Cmp->users()) {
1659  if (isa<BranchInst>(U))
1660  continue;
1661  if (isa<SelectInst>(U) && cast<SelectInst>(U)->getCondition() == Cmp)
1662  continue;
1663  return false;
1664  }
1665 
1666  // This is a cheap/incomplete check for dominance - just match a single
1667  // predecessor with a conditional branch.
1668  BasicBlock *CmpBB = Cmp->getParent();
1669  BasicBlock *DomBB = CmpBB->getSinglePredecessor();
1670  if (!DomBB)
1671  return false;
1672 
1673  // We want to ensure that the only way control gets to the comparison of
1674  // interest is that a less/greater than comparison on the same operands is
1675  // false.
1676  Value *DomCond;
1677  BasicBlock *TrueBB, *FalseBB;
1678  if (!match(DomBB->getTerminator(), m_Br(m_Value(DomCond), TrueBB, FalseBB)))
1679  return false;
1680  if (CmpBB != FalseBB)
1681  return false;
1682 
1683  Value *CmpOp0 = Cmp->getOperand(0), *CmpOp1 = Cmp->getOperand(1);
1684  ICmpInst::Predicate DomPred;
1685  if (!match(DomCond, m_ICmp(DomPred, m_Specific(CmpOp0), m_Specific(CmpOp1))))
1686  return false;
1687  if (DomPred != ICmpInst::ICMP_SGT && DomPred != ICmpInst::ICMP_SLT)
1688  return false;
1689 
1690  // Convert the equality comparison to the opposite of the dominating
1691  // comparison and swap the direction for all branch/select users.
1692  // We have conceptually converted:
1693  // Res = (a < b) ? <LT_RES> : (a == b) ? <EQ_RES> : <GT_RES>;
1694  // to
1695  // Res = (a < b) ? <LT_RES> : (a > b) ? <GT_RES> : <EQ_RES>;
1696  // And similarly for branches.
1697  for (User *U : Cmp->users()) {
1698  if (auto *BI = dyn_cast<BranchInst>(U)) {
1699  assert(BI->isConditional() && "Must be conditional");
1700  BI->swapSuccessors();
1701  continue;
1702  }
1703  if (auto *SI = dyn_cast<SelectInst>(U)) {
1704  // Swap operands
1705  SI->swapValues();
1706  SI->swapProfMetadata();
1707  continue;
1708  }
1709  llvm_unreachable("Must be a branch or a select");
1710  }
1711  Cmp->setPredicate(CmpInst::getSwappedPredicate(DomPred));
1712  return true;
1713 }
1714 
1715 bool CodeGenPrepare::optimizeCmp(CmpInst *Cmp, bool &ModifiedDT) {
1716  if (sinkCmpExpression(Cmp, *TLI))
1717  return true;
1718 
1719  if (combineToUAddWithOverflow(Cmp, ModifiedDT))
1720  return true;
1721 
1722  if (combineToUSubWithOverflow(Cmp, ModifiedDT))
1723  return true;
1724 
1725  if (foldICmpWithDominatingICmp(Cmp, *TLI))
1726  return true;
1727 
1728  return false;
1729 }
1730 
1731 /// Duplicate and sink the given 'and' instruction into user blocks where it is
1732 /// used in a compare to allow isel to generate better code for targets where
1733 /// this operation can be combined.
1734 ///
1735 /// Return true if any changes are made.
1737  const TargetLowering &TLI,
1738  SetOfInstrs &InsertedInsts) {
1739  // Double-check that we're not trying to optimize an instruction that was
1740  // already optimized by some other part of this pass.
1741  assert(!InsertedInsts.count(AndI) &&
1742  "Attempting to optimize already optimized and instruction");
1743  (void) InsertedInsts;
1744 
1745  // Nothing to do for single use in same basic block.
1746  if (AndI->hasOneUse() &&
1747  AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent())
1748  return false;
1749 
1750  // Try to avoid cases where sinking/duplicating is likely to increase register
1751  // pressure.
1752  if (!isa<ConstantInt>(AndI->getOperand(0)) &&
1753  !isa<ConstantInt>(AndI->getOperand(1)) &&
1754  AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse())
1755  return false;
1756 
1757  for (auto *U : AndI->users()) {
1758  Instruction *User = cast<Instruction>(U);
1759 
1760  // Only sink 'and' feeding icmp with 0.
1761  if (!isa<ICmpInst>(User))
1762  return false;
1763 
1764  auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1));
1765  if (!CmpC || !CmpC->isZero())
1766  return false;
1767  }
1768 
1769  if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI))
1770  return false;
1771 
1772  LLVM_DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n");
1773  LLVM_DEBUG(AndI->getParent()->dump());
1774 
1775  // Push the 'and' into the same block as the icmp 0. There should only be
1776  // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any
1777  // others, so we don't need to keep track of which BBs we insert into.
1778  for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end();
1779  UI != E; ) {
1780  Use &TheUse = UI.getUse();
1781  Instruction *User = cast<Instruction>(*UI);
1782 
1783  // Preincrement use iterator so we don't invalidate it.
1784  ++UI;
1785 
1786  LLVM_DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n");
1787 
1788  // Keep the 'and' in the same place if the use is already in the same block.
1789  Instruction *InsertPt =
1790  User->getParent() == AndI->getParent() ? AndI : User;
1791  Instruction *InsertedAnd =
1792  BinaryOperator::Create(Instruction::And, AndI->getOperand(0),
1793  AndI->getOperand(1), "", InsertPt);
1794  // Propagate the debug info.
1795  InsertedAnd->setDebugLoc(AndI->getDebugLoc());
1796 
1797  // Replace a use of the 'and' with a use of the new 'and'.
1798  TheUse = InsertedAnd;
1799  ++NumAndUses;
1800  LLVM_DEBUG(User->getParent()->dump());
1801  }
1802 
1803  // We removed all uses, nuke the and.
1804  AndI->eraseFromParent();
1805  return true;
1806 }
1807 
1808 /// Check if the candidates could be combined with a shift instruction, which
1809 /// includes:
1810 /// 1. Truncate instruction
1811 /// 2. And instruction and the imm is a mask of the low bits:
1812 /// imm & (imm+1) == 0
1814  if (!isa<TruncInst>(User)) {
1815  if (User->getOpcode() != Instruction::And ||
1816  !isa<ConstantInt>(User->getOperand(1)))
1817  return false;
1818 
1819  const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue();
1820 
1821  if ((Cimm & (Cimm + 1)).getBoolValue())
1822  return false;
1823  }
1824  return true;
1825 }
1826 
1827 /// Sink both shift and truncate instruction to the use of truncate's BB.
1828 static bool
1831  const TargetLowering &TLI, const DataLayout &DL) {
1832  BasicBlock *UserBB = User->getParent();
1833  DenseMap<BasicBlock *, CastInst *> InsertedTruncs;
1834  auto *TruncI = cast<TruncInst>(User);
1835  bool MadeChange = false;
1836 
1837  for (Value::user_iterator TruncUI = TruncI->user_begin(),
1838  TruncE = TruncI->user_end();
1839  TruncUI != TruncE;) {
1840 
1841  Use &TruncTheUse = TruncUI.getUse();
1842  Instruction *TruncUser = cast<Instruction>(*TruncUI);
1843  // Preincrement use iterator so we don't invalidate it.
1844 
1845  ++TruncUI;
1846 
1847  int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode());
1848  if (!ISDOpcode)
1849  continue;
1850 
1851  // If the use is actually a legal node, there will not be an
1852  // implicit truncate.
1853  // FIXME: always querying the result type is just an
1854  // approximation; some nodes' legality is determined by the
1855  // operand or other means. There's no good way to find out though.
1856  if (TLI.isOperationLegalOrCustom(
1857  ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true)))
1858  continue;
1859 
1860  // Don't bother for PHI nodes.
1861  if (isa<PHINode>(TruncUser))
1862  continue;
1863 
1864  BasicBlock *TruncUserBB = TruncUser->getParent();
1865 
1866  if (UserBB == TruncUserBB)
1867  continue;
1868 
1869  BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB];
1870  CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB];
1871 
1872  if (!InsertedShift && !InsertedTrunc) {
1873  BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt();
1874  assert(InsertPt != TruncUserBB->end());
1875  // Sink the shift
1876  if (ShiftI->getOpcode() == Instruction::AShr)
1877  InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI,
1878  "", &*InsertPt);
1879  else
1880  InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI,
1881  "", &*InsertPt);
1882  InsertedShift->setDebugLoc(ShiftI->getDebugLoc());
1883 
1884  // Sink the trunc
1885  BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt();
1886  TruncInsertPt++;
1887  assert(TruncInsertPt != TruncUserBB->end());
1888 
1889  InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift,
1890  TruncI->getType(), "", &*TruncInsertPt);
1891  InsertedTrunc->setDebugLoc(TruncI->getDebugLoc());
1892 
1893  MadeChange = true;
1894 
1895  TruncTheUse = InsertedTrunc;
1896  }
1897  }
1898  return MadeChange;
1899 }
1900 
1901 /// Sink the shift *right* instruction into user blocks if the uses could
1902 /// potentially be combined with this shift instruction and generate BitExtract
1903 /// instruction. It will only be applied if the architecture supports BitExtract
1904 /// instruction. Here is an example:
1905 /// BB1:
1906 /// %x.extract.shift = lshr i64 %arg1, 32
1907 /// BB2:
1908 /// %x.extract.trunc = trunc i64 %x.extract.shift to i16
1909 /// ==>
1910 ///
1911 /// BB2:
1912 /// %x.extract.shift.1 = lshr i64 %arg1, 32
1913 /// %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16
1914 ///
1915 /// CodeGen will recognize the pattern in BB2 and generate BitExtract
1916 /// instruction.
1917 /// Return true if any changes are made.
1919  const TargetLowering &TLI,
1920  const DataLayout &DL) {
1921  BasicBlock *DefBB = ShiftI->getParent();
1922 
1923  /// Only insert instructions in each block once.
1925 
1926  bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType()));
1927 
1928  bool MadeChange = false;
1929  for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end();
1930  UI != E;) {
1931  Use &TheUse = UI.getUse();
1932  Instruction *User = cast<Instruction>(*UI);
1933  // Preincrement use iterator so we don't invalidate it.
1934  ++UI;
1935 
1936  // Don't bother for PHI nodes.
1937  if (isa<PHINode>(User))
1938  continue;
1939 
1941  continue;
1942 
1943  BasicBlock *UserBB = User->getParent();
1944 
1945  if (UserBB == DefBB) {
1946  // If the shift and truncate instruction are in the same BB. The use of
1947  // the truncate(TruncUse) may still introduce another truncate if not
1948  // legal. In this case, we would like to sink both shift and truncate
1949  // instruction to the BB of TruncUse.
1950  // for example:
1951  // BB1:
1952  // i64 shift.result = lshr i64 opnd, imm
1953  // trunc.result = trunc shift.result to i16
1954  //
1955  // BB2:
1956  // ----> We will have an implicit truncate here if the architecture does
1957  // not have i16 compare.
1958  // cmp i16 trunc.result, opnd2
1959  //
1960  if (isa<TruncInst>(User) && shiftIsLegal
1961  // If the type of the truncate is legal, no truncate will be
1962  // introduced in other basic blocks.
1963  &&
1964  (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType()))))
1965  MadeChange =
1966  SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL);
1967 
1968  continue;
1969  }
1970  // If we have already inserted a shift into this block, use it.
1971  BinaryOperator *&InsertedShift = InsertedShifts[UserBB];
1972 
1973  if (!InsertedShift) {
1974  BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1975  assert(InsertPt != UserBB->end());
1976 
1977  if (ShiftI->getOpcode() == Instruction::AShr)
1978  InsertedShift = BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI,
1979  "", &*InsertPt);
1980  else
1981  InsertedShift = BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI,
1982  "", &*InsertPt);
1983  InsertedShift->setDebugLoc(ShiftI->getDebugLoc());
1984 
1985  MadeChange = true;
1986  }
1987 
1988  // Replace a use of the shift with a use of the new shift.
1989  TheUse = InsertedShift;
1990  }
1991 
1992  // If we removed all uses, or there are none, nuke the shift.
1993  if (ShiftI->use_empty()) {
1994  salvageDebugInfo(*ShiftI);
1995  ShiftI->eraseFromParent();
1996  MadeChange = true;
1997  }
1998 
1999  return MadeChange;
2000 }
2001 
2002 /// If counting leading or trailing zeros is an expensive operation and a zero
2003 /// input is defined, add a check for zero to avoid calling the intrinsic.
2004 ///
2005 /// We want to transform:
2006 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 false)
2007 ///
2008 /// into:
2009 /// entry:
2010 /// %cmpz = icmp eq i64 %A, 0
2011 /// br i1 %cmpz, label %cond.end, label %cond.false
2012 /// cond.false:
2013 /// %z = call i64 @llvm.cttz.i64(i64 %A, i1 true)
2014 /// br label %cond.end
2015 /// cond.end:
2016 /// %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ]
2017 ///
2018 /// If the transform is performed, return true and set ModifiedDT to true.
2019 static bool despeculateCountZeros(IntrinsicInst *CountZeros,
2020  const TargetLowering *TLI,
2021  const DataLayout *DL,
2022  bool &ModifiedDT) {
2023  // If a zero input is undefined, it doesn't make sense to despeculate that.
2024  if (match(CountZeros->getOperand(1), m_One()))
2025  return false;
2026 
2027  // If it's cheap to speculate, there's nothing to do.
2028  auto IntrinsicID = CountZeros->getIntrinsicID();
2029  if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz()) ||
2030  (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz()))
2031  return false;
2032 
2033  // Only handle legal scalar cases. Anything else requires too much work.
2034  Type *Ty = CountZeros->getType();
2035  unsigned SizeInBits = Ty->getScalarSizeInBits();
2036  if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits())
2037  return false;
2038 
2039  // Bail if the value is never zero.
2040  if (llvm::isKnownNonZero(CountZeros->getOperand(0), *DL))
2041  return false;
2042 
2043  // The intrinsic will be sunk behind a compare against zero and branch.
2044  BasicBlock *StartBlock = CountZeros->getParent();
2045  BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false");
2046 
2047  // Create another block after the count zero intrinsic. A PHI will be added
2048  // in this block to select the result of the intrinsic or the bit-width
2049  // constant if the input to the intrinsic is zero.
2050  BasicBlock::iterator SplitPt = ++(BasicBlock::iterator(CountZeros));
2051  BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end");
2052 
2053  // Set up a builder to create a compare, conditional branch, and PHI.
2054  IRBuilder<> Builder(CountZeros->getContext());
2055  Builder.SetInsertPoint(StartBlock->getTerminator());
2056  Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc());
2057 
2058  // Replace the unconditional branch that was created by the first split with
2059  // a compare against zero and a conditional branch.
2060  Value *Zero = Constant::getNullValue(Ty);
2061  Value *Cmp = Builder.CreateICmpEQ(CountZeros->getOperand(0), Zero, "cmpz");
2062  Builder.CreateCondBr(Cmp, EndBlock, CallBlock);
2063  StartBlock->getTerminator()->eraseFromParent();
2064 
2065  // Create a PHI in the end block to select either the output of the intrinsic
2066  // or the bit width of the operand.
2067  Builder.SetInsertPoint(&EndBlock->front());
2068  PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz");
2069  CountZeros->replaceAllUsesWith(PN);
2070  Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits));
2071  PN->addIncoming(BitWidth, StartBlock);
2072  PN->addIncoming(CountZeros, CallBlock);
2073 
2074  // We are explicitly handling the zero case, so we can set the intrinsic's
2075  // undefined zero argument to 'true'. This will also prevent reprocessing the
2076  // intrinsic; we only despeculate when a zero input is defined.
2077  CountZeros->setArgOperand(1, Builder.getTrue());
2078  ModifiedDT = true;
2079  return true;
2080 }
2081 
2082 bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool &ModifiedDT) {
2083  BasicBlock *BB = CI->getParent();
2084 
2085  // Lower inline assembly if we can.
2086  // If we found an inline asm expession, and if the target knows how to
2087  // lower it to normal LLVM code, do so now.
2088  if (CI->isInlineAsm()) {
2089  if (TLI->ExpandInlineAsm(CI)) {
2090  // Avoid invalidating the iterator.
2091  CurInstIterator = BB->begin();
2092  // Avoid processing instructions out of order, which could cause
2093  // reuse before a value is defined.
2094  SunkAddrs.clear();
2095  return true;
2096  }
2097  // Sink address computing for memory operands into the block.
2098  if (optimizeInlineAsmInst(CI))
2099  return true;
2100  }
2101 
2102  // Align the pointer arguments to this call if the target thinks it's a good
2103  // idea
2104  unsigned MinSize, PrefAlign;
2105  if (TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) {
2106  for (auto &Arg : CI->args()) {
2107  // We want to align both objects whose address is used directly and
2108  // objects whose address is used in casts and GEPs, though it only makes
2109  // sense for GEPs if the offset is a multiple of the desired alignment and
2110  // if size - offset meets the size threshold.
2111  if (!Arg->getType()->isPointerTy())
2112  continue;
2113  APInt Offset(DL->getIndexSizeInBits(
2114  cast<PointerType>(Arg->getType())->getAddressSpace()),
2115  0);
2116  Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset);
2117  uint64_t Offset2 = Offset.getLimitedValue();
2118  if ((Offset2 & (PrefAlign-1)) != 0)
2119  continue;
2120  AllocaInst *AI;
2121  if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlignment() < PrefAlign &&
2122  DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2)
2123  AI->setAlignment(Align(PrefAlign));
2124  // Global variables can only be aligned if they are defined in this
2125  // object (i.e. they are uniquely initialized in this object), and
2126  // over-aligning global variables that have an explicit section is
2127  // forbidden.
2128  GlobalVariable *GV;
2129  if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() &&
2130  GV->getPointerAlignment(*DL) < PrefAlign &&
2131  DL->getTypeAllocSize(GV->getValueType()) >=
2132  MinSize + Offset2)
2133  GV->setAlignment(MaybeAlign(PrefAlign));
2134  }
2135  // If this is a memcpy (or similar) then we may be able to improve the
2136  // alignment
2137  if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) {
2138  Align DestAlign = getKnownAlignment(MI->getDest(), *DL);
2139  MaybeAlign MIDestAlign = MI->getDestAlign();
2140  if (!MIDestAlign || DestAlign > *MIDestAlign)
2141  MI->setDestAlignment(DestAlign);
2142  if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
2143  MaybeAlign MTISrcAlign = MTI->getSourceAlign();
2144  Align SrcAlign = getKnownAlignment(MTI->getSource(), *DL);
2145  if (!MTISrcAlign || SrcAlign > *MTISrcAlign)
2146  MTI->setSourceAlignment(SrcAlign);
2147  }
2148  }
2149  }
2150 
2151  // If we have a cold call site, try to sink addressing computation into the
2152  // cold block. This interacts with our handling for loads and stores to
2153  // ensure that we can fold all uses of a potential addressing computation
2154  // into their uses. TODO: generalize this to work over profiling data
2155  if (CI->hasFnAttr(Attribute::Cold) &&
2156  !OptSize && !llvm::shouldOptimizeForSize(BB, PSI, BFI.get()))
2157  for (auto &Arg : CI->args()) {
2158  if (!Arg->getType()->isPointerTy())
2159  continue;
2160  unsigned AS = Arg->getType()->getPointerAddressSpace();
2161  return optimizeMemoryInst(CI, Arg, Arg->getType(), AS);
2162  }
2163 
2164  IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
2165  if (II) {
2166  switch (II->getIntrinsicID()) {
2167  default: break;
2168  case Intrinsic::assume:
2169  llvm_unreachable("llvm.assume should have been removed already");
2170  case Intrinsic::experimental_widenable_condition: {
2171  // Give up on future widening oppurtunties so that we can fold away dead
2172  // paths and merge blocks before going into block-local instruction
2173  // selection.
2174  if (II->use_empty()) {
2175  II->eraseFromParent();
2176  return true;
2177  }
2178  Constant *RetVal = ConstantInt::getTrue(II->getContext());
2179  resetIteratorIfInvalidatedWhileCalling(BB, [&]() {
2180  replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr);
2181  });
2182  return true;
2183  }
2184  case Intrinsic::objectsize:
2185  llvm_unreachable("llvm.objectsize.* should have been lowered already");
2186  case Intrinsic::is_constant:
2187  llvm_unreachable("llvm.is.constant.* should have been lowered already");
2188  case Intrinsic::aarch64_stlxr:
2189  case Intrinsic::aarch64_stxr: {
2190  ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0));
2191  if (!ExtVal || !ExtVal->hasOneUse() ||
2192  ExtVal->getParent() == CI->getParent())
2193  return false;
2194  // Sink a zext feeding stlxr/stxr before it, so it can be folded into it.
2195  ExtVal->moveBefore(CI);
2196  // Mark this instruction as "inserted by CGP", so that other
2197  // optimizations don't touch it.
2198  InsertedInsts.insert(ExtVal);
2199  return true;
2200  }
2201 
2202  case Intrinsic::launder_invariant_group:
2203  case Intrinsic::strip_invariant_group: {
2204  Value *ArgVal = II->getArgOperand(0);
2205  auto it = LargeOffsetGEPMap.find(II);
2206  if (it != LargeOffsetGEPMap.end()) {
2207  // Merge entries in LargeOffsetGEPMap to reflect the RAUW.
2208  // Make sure not to have to deal with iterator invalidation
2209  // after possibly adding ArgVal to LargeOffsetGEPMap.
2210  auto GEPs = std::move(it->second);
2211  LargeOffsetGEPMap[ArgVal].append(GEPs.begin(), GEPs.end());
2212  LargeOffsetGEPMap.erase(II);
2213  }
2214 
2215  II->replaceAllUsesWith(ArgVal);
2216  II->eraseFromParent();
2217  return true;
2218  }
2219  case Intrinsic::cttz:
2220  case Intrinsic::ctlz:
2221  // If counting zeros is expensive, try to avoid it.
2222  return despeculateCountZeros(II, TLI, DL, ModifiedDT);
2223  case Intrinsic::fshl:
2224  case Intrinsic::fshr:
2225  return optimizeFunnelShift(II);
2226  case Intrinsic::dbg_value:
2227  return fixupDbgValue(II);
2228  case Intrinsic::vscale: {
2229  // If datalayout has no special restrictions on vector data layout,
2230  // replace `llvm.vscale` by an equivalent constant expression
2231  // to benefit from cheap constant propagation.
2232  Type *ScalableVectorTy =
2233  VectorType::get(Type::getInt8Ty(II->getContext()), 1, true);
2234  if (DL->getTypeAllocSize(ScalableVectorTy).getKnownMinSize() == 8) {
2235  auto *Null = Constant::getNullValue(ScalableVectorTy->getPointerTo());
2236  auto *One = ConstantInt::getSigned(II->getType(), 1);
2237  auto *CGep =
2238  ConstantExpr::getGetElementPtr(ScalableVectorTy, Null, One);
2240  II->eraseFromParent();
2241  return true;
2242  }
2243  break;
2244  }
2245  case Intrinsic::masked_gather:
2246  return optimizeGatherScatterInst(II, II->getArgOperand(0));
2247  case Intrinsic::masked_scatter:
2248  return optimizeGatherScatterInst(II, II->getArgOperand(1));
2249  }
2250 
2251  SmallVector<Value *, 2> PtrOps;
2252  Type *AccessTy;
2253  if (TLI->getAddrModeArguments(II, PtrOps, AccessTy))
2254  while (!PtrOps.empty()) {
2255  Value *PtrVal = PtrOps.pop_back_val();
2256  unsigned AS = PtrVal->getType()->getPointerAddressSpace();
2257  if (optimizeMemoryInst(II, PtrVal, AccessTy, AS))
2258  return true;
2259  }
2260  }
2261 
2262  // From here on out we're working with named functions.
2263  if (!CI->getCalledFunction()) return false;
2264 
2265  // Lower all default uses of _chk calls. This is very similar
2266  // to what InstCombineCalls does, but here we are only lowering calls
2267  // to fortified library functions (e.g. __memcpy_chk) that have the default
2268  // "don't know" as the objectsize. Anything else should be left alone.
2269  FortifiedLibCallSimplifier Simplifier(TLInfo, true);
2270  IRBuilder<> Builder(CI);
2271  if (Value *V = Simplifier.optimizeCall(CI, Builder)) {
2272  CI->replaceAllUsesWith(V);
2273  CI->eraseFromParent();
2274  return true;
2275  }
2276 
2277  return false;
2278 }
2279 
2280 /// Look for opportunities to duplicate return instructions to the predecessor
2281 /// to enable tail call optimizations. The case it is currently looking for is:
2282 /// @code
2283 /// bb0:
2284 /// %tmp0 = tail call i32 @f0()
2285 /// br label %return
2286 /// bb1:
2287 /// %tmp1 = tail call i32 @f1()
2288 /// br label %return
2289 /// bb2:
2290 /// %tmp2 = tail call i32 @f2()
2291 /// br label %return
2292 /// return:
2293 /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
2294 /// ret i32 %retval
2295 /// @endcode
2296 ///
2297 /// =>
2298 ///
2299 /// @code
2300 /// bb0:
2301 /// %tmp0 = tail call i32 @f0()
2302 /// ret i32 %tmp0
2303 /// bb1:
2304 /// %tmp1 = tail call i32 @f1()
2305 /// ret i32 %tmp1
2306 /// bb2:
2307 /// %tmp2 = tail call i32 @f2()
2308 /// ret i32 %tmp2
2309 /// @endcode
2310 bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB, bool &ModifiedDT) {
2311  ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator());
2312  if (!RetI)
2313  return false;
2314 
2315  PHINode *PN = nullptr;
2316  ExtractValueInst *EVI = nullptr;
2317  BitCastInst *BCI = nullptr;
2318  Value *V = RetI->getReturnValue();
2319  if (V) {
2320  BCI = dyn_cast<BitCastInst>(V);
2321  if (BCI)
2322  V = BCI->getOperand(0);
2323 
2324  EVI = dyn_cast<ExtractValueInst>(V);
2325  if (EVI) {
2326  V = EVI->getOperand(0);
2327  if (!llvm::all_of(EVI->indices(), [](unsigned idx) { return idx == 0; }))
2328  return false;
2329  }
2330 
2331  PN = dyn_cast<PHINode>(V);
2332  if (!PN)
2333  return false;
2334  }
2335 
2336  if (PN && PN->getParent() != BB)
2337  return false;
2338 
2339  auto isLifetimeEndOrBitCastFor = [](const Instruction *Inst) {
2340  const BitCastInst *BC = dyn_cast<BitCastInst>(Inst);
2341  if (BC && BC->hasOneUse())
2342  Inst = BC->user_back();
2343 
2344  if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
2345  return II->getIntrinsicID() == Intrinsic::lifetime_end;
2346  return false;
2347  };
2348 
2349  // Make sure there are no instructions between the first instruction
2350  // and return.
2351  const Instruction *BI = BB->getFirstNonPHI();
2352  // Skip over debug and the bitcast.
2353  while (isa<DbgInfoIntrinsic>(BI) || BI == BCI || BI == EVI ||
2354  isa<PseudoProbeInst>(BI) || isLifetimeEndOrBitCastFor(BI))
2355  BI = BI->getNextNode();
2356  if (BI != RetI)
2357  return false;
2358 
2359  /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail
2360  /// call.
2361  const Function *F = BB->getParent();
2362  SmallVector<BasicBlock*, 4> TailCallBBs;
2363  if (PN) {
2364  for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) {
2365  // Look through bitcasts.
2366  Value *IncomingVal = PN->getIncomingValue(I)->stripPointerCasts();
2367  CallInst *CI = dyn_cast<CallInst>(IncomingVal);
2368  BasicBlock *PredBB = PN->getIncomingBlock(I);
2369  // Make sure the phi value is indeed produced by the tail call.
2370  if (CI && CI->hasOneUse() && CI->getParent() == PredBB &&
2371  TLI->mayBeEmittedAsTailCall(CI) &&
2372  attributesPermitTailCall(F, CI, RetI, *TLI))
2373  TailCallBBs.push_back(PredBB);
2374  }
2375  } else {
2376  SmallPtrSet<BasicBlock*, 4> VisitedBBs;
2377  for (BasicBlock *Pred : predecessors(BB)) {
2378  if (!VisitedBBs.insert(Pred).second)
2379  continue;
2380  if (Instruction *I = Pred->rbegin()->getPrevNonDebugInstruction(true)) {
2381  CallInst *CI = dyn_cast<CallInst>(I);
2382  if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) &&
2383  attributesPermitTailCall(F, CI, RetI, *TLI))
2384  TailCallBBs.push_back(Pred);
2385  }
2386  }
2387  }
2388 
2389  bool Changed = false;
2390  for (auto const &TailCallBB : TailCallBBs) {
2391  // Make sure the call instruction is followed by an unconditional branch to
2392  // the return block.
2393  BranchInst *BI = dyn_cast<BranchInst>(TailCallBB->getTerminator());
2394  if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB)
2395  continue;
2396 
2397  // Duplicate the return into TailCallBB.
2398  (void)FoldReturnIntoUncondBranch(RetI, BB, TailCallBB);
2400  BFI->getBlockFreq(BB) >= BFI->getBlockFreq(TailCallBB));
2401  BFI->setBlockFreq(
2402  BB,
2403  (BFI->getBlockFreq(BB) - BFI->getBlockFreq(TailCallBB)).getFrequency());
2404  ModifiedDT = Changed = true;
2405  ++NumRetsDup;
2406  }
2407 
2408  // If we eliminated all predecessors of the block, delete the block now.
2409  if (Changed && !BB->hasAddressTaken() && pred_empty(BB))
2410  BB->eraseFromParent();
2411 
2412  return Changed;
2413 }
2414 
2415 //===----------------------------------------------------------------------===//
2416 // Memory Optimization
2417 //===----------------------------------------------------------------------===//
2418 
2419 namespace {
2420 
2421 /// This is an extended version of TargetLowering::AddrMode
2422 /// which holds actual Value*'s for register values.
2423 struct ExtAddrMode : public TargetLowering::AddrMode {
2424  Value *BaseReg = nullptr;
2425  Value *ScaledReg = nullptr;
2426  Value *OriginalValue = nullptr;
2427  bool InBounds = true;
2428 
2429  enum FieldName {
2430  NoField = 0x00,
2431  BaseRegField = 0x01,
2432  BaseGVField = 0x02,
2433  BaseOffsField = 0x04,
2434  ScaledRegField = 0x08,
2435  ScaleField = 0x10,
2436  MultipleFields = 0xff
2437  };
2438 
2439 
2440  ExtAddrMode() = default;
2441 
2442  void print(raw_ostream &OS) const;
2443  void dump() const;
2444 
2445  FieldName compare(const ExtAddrMode &other) {
2446  // First check that the types are the same on each field, as differing types
2447  // is something we can't cope with later on.
2448  if (BaseReg && other.BaseReg &&
2449  BaseReg->getType() != other.BaseReg->getType())
2450  return MultipleFields;
2451  if (BaseGV && other.BaseGV &&
2452  BaseGV->getType() != other.BaseGV->getType())
2453  return MultipleFields;
2454  if (ScaledReg && other.ScaledReg &&
2455  ScaledReg->getType() != other.ScaledReg->getType())
2456  return MultipleFields;
2457 
2458  // Conservatively reject 'inbounds' mismatches.
2459  if (InBounds != other.InBounds)
2460  return MultipleFields;
2461 
2462  // Check each field to see if it differs.
2463  unsigned Result = NoField;
2464  if (BaseReg != other.BaseReg)
2465  Result |= BaseRegField;
2466  if (BaseGV != other.BaseGV)
2467  Result |= BaseGVField;
2468  if (BaseOffs != other.BaseOffs)
2469  Result |= BaseOffsField;
2470  if (ScaledReg != other.ScaledReg)
2471  Result |= ScaledRegField;
2472  // Don't count 0 as being a different scale, because that actually means
2473  // unscaled (which will already be counted by having no ScaledReg).
2474  if (Scale && other.Scale && Scale != other.Scale)
2475  Result |= ScaleField;
2476 
2477  if (countPopulation(Result) > 1)
2478  return MultipleFields;
2479  else
2480  return static_cast<FieldName>(Result);
2481  }
2482 
2483  // An AddrMode is trivial if it involves no calculation i.e. it is just a base
2484  // with no offset.
2485  bool isTrivial() {
2486  // An AddrMode is (BaseGV + BaseReg + BaseOffs + ScaleReg * Scale) so it is
2487  // trivial if at most one of these terms is nonzero, except that BaseGV and
2488  // BaseReg both being zero actually means a null pointer value, which we
2489  // consider to be 'non-zero' here.
2490  return !BaseOffs && !Scale && !(BaseGV && BaseReg);
2491  }
2492 
2493  Value *GetFieldAsValue(FieldName Field, Type *IntPtrTy) {
2494  switch (Field) {
2495  default:
2496  return nullptr;
2497  case BaseRegField:
2498  return BaseReg;
2499  case BaseGVField:
2500  return BaseGV;
2501  case ScaledRegField:
2502  return ScaledReg;
2503  case BaseOffsField:
2504  return ConstantInt::get(IntPtrTy, BaseOffs);
2505  }
2506  }
2507 
2508  void SetCombinedField(FieldName Field, Value *V,
2509  const SmallVectorImpl<ExtAddrMode> &AddrModes) {
2510  switch (Field) {
2511  default:
2512  llvm_unreachable("Unhandled fields are expected to be rejected earlier");
2513  break;
2514  case ExtAddrMode::BaseRegField:
2515  BaseReg = V;
2516  break;
2517  case ExtAddrMode::BaseGVField:
2518  // A combined BaseGV is an Instruction, not a GlobalValue, so it goes
2519  // in the BaseReg field.
2520  assert(BaseReg == nullptr);
2521  BaseReg = V;
2522  BaseGV = nullptr;
2523  break;
2524  case ExtAddrMode::ScaledRegField:
2525  ScaledReg = V;
2526  // If we have a mix of scaled and unscaled addrmodes then we want scale
2527  // to be the scale and not zero.
2528  if (!Scale)
2529  for (const ExtAddrMode &AM : AddrModes)
2530  if (AM.Scale) {
2531  Scale = AM.Scale;
2532  break;
2533  }
2534  break;
2535  case ExtAddrMode::BaseOffsField:
2536  // The offset is no longer a constant, so it goes in ScaledReg with a
2537  // scale of 1.
2538  assert(ScaledReg == nullptr);
2539  ScaledReg = V;
2540  Scale = 1;
2541  BaseOffs = 0;
2542  break;
2543  }
2544  }
2545 };
2546 
2547 } // end anonymous namespace
2548 
2549 #ifndef NDEBUG
2550 static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) {
2551  AM.print(OS);
2552  return OS;
2553 }
2554 #endif
2555 
2556 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2557 void ExtAddrMode::print(raw_ostream &OS) const {
2558  bool NeedPlus = false;
2559  OS << "[";
2560  if (InBounds)
2561  OS << "inbounds ";
2562  if (BaseGV) {
2563  OS << (NeedPlus ? " + " : "")
2564  << "GV:";
2565  BaseGV->printAsOperand(OS, /*PrintType=*/false);
2566  NeedPlus = true;
2567  }
2568 
2569  if (BaseOffs) {
2570  OS << (NeedPlus ? " + " : "")
2571  << BaseOffs;
2572  NeedPlus = true;
2573  }
2574 
2575  if (BaseReg) {
2576  OS << (NeedPlus ? " + " : "")
2577  << "Base:";
2578  BaseReg->printAsOperand(OS, /*PrintType=*/false);
2579  NeedPlus = true;
2580  }
2581  if (Scale) {
2582  OS << (NeedPlus ? " + " : "")
2583  << Scale << "*";
2584  ScaledReg->printAsOperand(OS, /*PrintType=*/false);
2585  }
2586 
2587  OS << ']';
2588 }
2589 
2590 LLVM_DUMP_METHOD void ExtAddrMode::dump() const {
2591  print(dbgs());
2592  dbgs() << '\n';
2593 }
2594 #endif
2595 
2596 namespace {
2597 
2598 /// This class provides transaction based operation on the IR.
2599 /// Every change made through this class is recorded in the internal state and
2600 /// can be undone (rollback) until commit is called.
2601 /// CGP does not check if instructions could be speculatively executed when
2602 /// moved. Preserving the original location would pessimize the debugging
2603 /// experience, as well as negatively impact the quality of sample PGO.
2604 class TypePromotionTransaction {
2605  /// This represents the common interface of the individual transaction.
2606  /// Each class implements the logic for doing one specific modification on
2607  /// the IR via the TypePromotionTransaction.
2608  class TypePromotionAction {
2609  protected:
2610  /// The Instruction modified.
2611  Instruction *Inst;
2612 
2613  public:
2614  /// Constructor of the action.
2615  /// The constructor performs the related action on the IR.
2616  TypePromotionAction(Instruction *Inst) : Inst(Inst) {}
2617 
2618  virtual ~TypePromotionAction() = default;
2619 
2620  /// Undo the modification done by this action.
2621  /// When this method is called, the IR must be in the same state as it was
2622  /// before this action was applied.
2623  /// \pre Undoing the action works if and only if the IR is in the exact same
2624  /// state as it was directly after this action was applied.
2625  virtual void undo() = 0;
2626 
2627  /// Advocate every change made by this action.
2628  /// When the results on the IR of the action are to be kept, it is important
2629  /// to call this function, otherwise hidden information may be kept forever.
2630  virtual void commit() {
2631  // Nothing to be done, this action is not doing anything.
2632  }
2633  };
2634 
2635  /// Utility to remember the position of an instruction.
2636  class InsertionHandler {
2637  /// Position of an instruction.
2638  /// Either an instruction:
2639  /// - Is the first in a basic block: BB is used.
2640  /// - Has a previous instruction: PrevInst is used.
2641  union {
2642  Instruction *PrevInst;
2643  BasicBlock *BB;
2644  } Point;
2645 
2646  /// Remember whether or not the instruction had a previous instruction.
2647  bool HasPrevInstruction;
2648 
2649  public:
2650  /// Record the position of \p Inst.
2651  InsertionHandler(Instruction *Inst) {
2652  BasicBlock::iterator It = Inst->getIterator();
2653  HasPrevInstruction = (It != (Inst->getParent()->begin()));
2654  if (HasPrevInstruction)
2655  Point.PrevInst = &*--It;
2656  else
2657  Point.BB = Inst->getParent();
2658  }
2659 
2660  /// Insert \p Inst at the recorded position.
2661  void insert(Instruction *Inst) {
2662  if (HasPrevInstruction) {
2663  if (Inst->getParent())
2664  Inst->removeFromParent();
2665  Inst->insertAfter(Point.PrevInst);
2666  } else {
2667  Instruction *Position = &*Point.BB->getFirstInsertionPt();
2668  if (Inst->getParent())
2669  Inst->moveBefore(Position);
2670  else
2671  Inst->insertBefore(Position);
2672  }
2673  }
2674  };
2675 
2676  /// Move an instruction before another.
2677  class InstructionMoveBefore : public TypePromotionAction {
2678  /// Original position of the instruction.
2679  InsertionHandler Position;
2680 
2681  public:
2682  /// Move \p Inst before \p Before.
2683  InstructionMoveBefore(Instruction *Inst, Instruction *Before)
2684  : TypePromotionAction(Inst), Position(Inst) {
2685  LLVM_DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before
2686  << "\n");
2687  Inst->moveBefore(Before);
2688  }
2689 
2690  /// Move the instruction back to its original position.
2691  void undo() override {
2692  LLVM_DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n");
2693  Position.insert(Inst);
2694  }
2695  };
2696 
2697  /// Set the operand of an instruction with a new value.
2698  class OperandSetter : public TypePromotionAction {
2699  /// Original operand of the instruction.
2700  Value *Origin;
2701 
2702  /// Index of the modified instruction.
2703  unsigned Idx;
2704 
2705  public:
2706  /// Set \p Idx operand of \p Inst with \p NewVal.
2707  OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal)
2708  : TypePromotionAction(Inst), Idx(Idx) {
2709  LLVM_DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n"
2710  << "for:" << *Inst << "\n"
2711  << "with:" << *NewVal << "\n");
2712  Origin = Inst->getOperand(Idx);
2713  Inst->setOperand(Idx, NewVal);
2714  }
2715 
2716  /// Restore the original value of the instruction.
2717  void undo() override {
2718  LLVM_DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n"
2719  << "for: " << *Inst << "\n"
2720  << "with: " << *Origin << "\n");
2721  Inst->setOperand(Idx, Origin);
2722  }
2723  };
2724 
2725  /// Hide the operands of an instruction.
2726  /// Do as if this instruction was not using any of its operands.
2727  class OperandsHider : public TypePromotionAction {
2728  /// The list of original operands.
2729  SmallVector<Value *, 4> OriginalValues;
2730 
2731  public:
2732  /// Remove \p Inst from the uses of the operands of \p Inst.
2733  OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) {
2734  LLVM_DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n");
2735  unsigned NumOpnds = Inst->getNumOperands();
2736  OriginalValues.reserve(NumOpnds);
2737  for (unsigned It = 0; It < NumOpnds; ++It) {
2738  // Save the current operand.
2739  Value *Val = Inst->getOperand(It);
2740  OriginalValues.push_back(Val);
2741  // Set a dummy one.
2742  // We could use OperandSetter here, but that would imply an overhead
2743  // that we are not willing to pay.
2744  Inst->setOperand(It, UndefValue::get(Val->getType()));
2745  }
2746  }
2747 
2748  /// Restore the original list of uses.
2749  void undo() override {
2750  LLVM_DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n");
2751  for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It)
2752  Inst->setOperand(It, OriginalValues[It]);
2753  }
2754  };
2755 
2756  /// Build a truncate instruction.
2757  class TruncBuilder : public TypePromotionAction {
2758  Value *Val;
2759 
2760  public:
2761  /// Build a truncate instruction of \p Opnd producing a \p Ty
2762  /// result.
2763  /// trunc Opnd to Ty.
2764  TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) {
2765  IRBuilder<> Builder(Opnd);
2766  Builder.SetCurrentDebugLocation(DebugLoc());
2767  Val = Builder.CreateTrunc(Opnd, Ty, "promoted");
2768  LLVM_DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n");
2769  }
2770 
2771  /// Get the built value.
2772  Value *getBuiltValue() { return Val; }
2773 
2774  /// Remove the built instruction.
2775  void undo() override {
2776  LLVM_DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n");
2777  if (Instruction *IVal = dyn_cast<Instruction>(Val))
2778  IVal->eraseFromParent();
2779  }
2780  };
2781 
2782  /// Build a sign extension instruction.
2783  class SExtBuilder : public TypePromotionAction {
2784  Value *Val;
2785 
2786  public:
2787  /// Build a sign extension instruction of \p Opnd producing a \p Ty
2788  /// result.
2789  /// sext Opnd to Ty.
2790  SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
2791  : TypePromotionAction(InsertPt) {
2792  IRBuilder<> Builder(InsertPt);
2793  Val = Builder.CreateSExt(Opnd, Ty, "promoted");
2794  LLVM_DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n");
2795  }
2796 
2797  /// Get the built value.
2798  Value *getBuiltValue() { return Val; }
2799 
2800  /// Remove the built instruction.
2801  void undo() override {
2802  LLVM_DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n");
2803  if (Instruction *IVal = dyn_cast<Instruction>(Val))
2804  IVal->eraseFromParent();
2805  }
2806  };
2807 
2808  /// Build a zero extension instruction.
2809  class ZExtBuilder : public TypePromotionAction {
2810  Value *Val;
2811 
2812  public:
2813  /// Build a zero extension instruction of \p Opnd producing a \p Ty
2814  /// result.
2815  /// zext Opnd to Ty.
2816  ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
2817  : TypePromotionAction(InsertPt) {
2818  IRBuilder<> Builder(InsertPt);
2819  Builder.SetCurrentDebugLocation(DebugLoc());
2820  Val = Builder.CreateZExt(Opnd, Ty, "promoted");
2821  LLVM_DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n");
2822  }
2823 
2824  /// Get the built value.
2825  Value *getBuiltValue() { return Val; }
2826 
2827  /// Remove the built instruction.
2828  void undo() override {
2829  LLVM_DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n");
2830  if (Instruction *IVal = dyn_cast<Instruction>(Val))
2831  IVal->eraseFromParent();
2832  }
2833  };
2834 
2835  /// Mutate an instruction to another type.
2836  class TypeMutator : public TypePromotionAction {
2837  /// Record the original type.
2838  Type *OrigTy;
2839 
2840  public:
2841  /// Mutate the type of \p Inst into \p NewTy.
2842  TypeMutator(Instruction *Inst, Type *NewTy)
2843  : TypePromotionAction(Inst), OrigTy(Inst->getType()) {
2844  LLVM_DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy
2845  << "\n");
2846  Inst->mutateType(NewTy);
2847  }
2848 
2849  /// Mutate the instruction back to its original type.
2850  void undo() override {
2851  LLVM_DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy
2852  << "\n");
2853  Inst->mutateType(OrigTy);
2854  }
2855  };
2856 
2857  /// Replace the uses of an instruction by another instruction.
2858  class UsesReplacer : public TypePromotionAction {
2859  /// Helper structure to keep track of the replaced uses.
2860  struct InstructionAndIdx {
2861  /// The instruction using the instruction.
2862  Instruction *Inst;
2863 
2864  /// The index where this instruction is used for Inst.
2865  unsigned Idx;
2866 
2867  InstructionAndIdx(Instruction *Inst, unsigned Idx)
2868  : Inst(Inst), Idx(Idx) {}
2869  };
2870 
2871  /// Keep track of the original uses (pair Instruction, Index).
2872  SmallVector<InstructionAndIdx, 4> OriginalUses;
2873  /// Keep track of the debug users.
2875 
2876  /// Keep track of the new value so that we can undo it by replacing
2877  /// instances of the new value with the original value.
2878  Value *New;
2879 
2880  using use_iterator = SmallVectorImpl<InstructionAndIdx>::iterator;
2881 
2882  public:
2883  /// Replace all the use of \p Inst by \p New.
2884  UsesReplacer(Instruction *Inst, Value *New)
2885  : TypePromotionAction(Inst), New(New) {
2886  LLVM_DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New
2887  << "\n");
2888  // Record the original uses.
2889  for (Use &U : Inst->uses()) {
2890  Instruction *UserI = cast<Instruction>(U.getUser());
2891  OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo()));
2892  }
2893  // Record the debug uses separately. They are not in the instruction's
2894  // use list, but they are replaced by RAUW.
2895  findDbgValues(DbgValues, Inst);
2896 
2897  // Now, we can replace the uses.
2898  Inst->replaceAllUsesWith(New);
2899  }
2900 
2901  /// Reassign the original uses of Inst to Inst.
2902  void undo() override {
2903  LLVM_DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n");
2904  for (InstructionAndIdx &Use : OriginalUses)
2905  Use.Inst->setOperand(Use.Idx, Inst);
2906  // RAUW has replaced all original uses with references to the new value,
2907  // including the debug uses. Since we are undoing the replacements,
2908  // the original debug uses must also be reinstated to maintain the
2909  // correctness and utility of debug value instructions.
2910  for (auto *DVI : DbgValues)
2911  DVI->replaceVariableLocationOp(New, Inst);
2912  }
2913  };
2914 
2915  /// Remove an instruction from the IR.
2916  class InstructionRemover : public TypePromotionAction {
2917  /// Original position of the instruction.
2918  InsertionHandler Inserter;
2919 
2920  /// Helper structure to hide all the link to the instruction. In other
2921  /// words, this helps to do as if the instruction was removed.
2922  OperandsHider Hider;
2923 
2924  /// Keep track of the uses replaced, if any.
2925  UsesReplacer *Replacer = nullptr;
2926 
2927  /// Keep track of instructions removed.
2928  SetOfInstrs &RemovedInsts;
2929 
2930  public:
2931  /// Remove all reference of \p Inst and optionally replace all its
2932  /// uses with New.
2933  /// \p RemovedInsts Keep track of the instructions removed by this Action.
2934  /// \pre If !Inst->use_empty(), then New != nullptr
2935  InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts,
2936  Value *New = nullptr)
2937  : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst),
2938  RemovedInsts(RemovedInsts) {
2939  if (New)
2940  Replacer = new UsesReplacer(Inst, New);
2941  LLVM_DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n");
2942  RemovedInsts.insert(Inst);
2943  /// The instructions removed here will be freed after completing
2944  /// optimizeBlock() for all blocks as we need to keep track of the
2945  /// removed instructions during promotion.
2946  Inst->removeFromParent();
2947  }
2948 
2949  ~InstructionRemover() override { delete Replacer; }
2950 
2951  /// Resurrect the instruction and reassign it to the proper uses if
2952  /// new value was provided when build this action.
2953  void undo() override {
2954  LLVM_DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n");
2955  Inserter.insert(Inst);
2956  if (Replacer)
2957  Replacer->undo();
2958  Hider.undo();
2959  RemovedInsts.erase(Inst);
2960  }
2961  };
2962 
2963 public:
2964  /// Restoration point.
2965  /// The restoration point is a pointer to an action instead of an iterator
2966  /// because the iterator may be invalidated but not the pointer.
2967  using ConstRestorationPt = const TypePromotionAction *;
2968 
2969  TypePromotionTransaction(SetOfInstrs &RemovedInsts)
2970  : RemovedInsts(RemovedInsts) {}
2971 
2972  /// Advocate every changes made in that transaction. Return true if any change
2973  /// happen.
2974  bool commit();
2975 
2976  /// Undo all the changes made after the given point.
2977  void rollback(ConstRestorationPt Point);
2978 
2979  /// Get the current restoration point.
2980  ConstRestorationPt getRestorationPoint() const;
2981 
2982  /// \name API for IR modification with state keeping to support rollback.
2983  /// @{
2984  /// Same as Instruction::setOperand.
2985  void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal);
2986 
2987  /// Same as Instruction::eraseFromParent.
2988  void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr);
2989 
2990  /// Same as Value::replaceAllUsesWith.
2991  void replaceAllUsesWith(Instruction *Inst, Value *New);
2992 
2993  /// Same as Value::mutateType.
2994  void mutateType(Instruction *Inst, Type *NewTy);
2995 
2996  /// Same as IRBuilder::createTrunc.
2997  Value *createTrunc(Instruction *Opnd, Type *Ty);
2998 
2999  /// Same as IRBuilder::createSExt.
3000  Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty);
3001 
3002  /// Same as IRBuilder::createZExt.
3003  Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty);
3004 
3005  /// Same as Instruction::moveBefore.
3006  void moveBefore(Instruction *Inst, Instruction *Before);
3007  /// @}
3008 
3009 private:
3010  /// The ordered list of actions made so far.
3012 
3013  using CommitPt = SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator;
3014 
3015  SetOfInstrs &RemovedInsts;
3016 };
3017 
3018 } // end anonymous namespace
3019 
3020 void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx,
3021  Value *NewVal) {
3022  Actions.push_back(std::make_unique<TypePromotionTransaction::OperandSetter>(
3023  Inst, Idx, NewVal));
3024 }
3025 
3027  Value *NewVal) {
3028  Actions.push_back(
3029  std::make_unique<TypePromotionTransaction::InstructionRemover>(
3030  Inst, RemovedInsts, NewVal));
3031 }
3032 
3033 void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst,
3034  Value *New) {
3035  Actions.push_back(
3036  std::make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New));
3037 }
3038 
3039 void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) {
3040  Actions.push_back(
3041  std::make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy));
3042 }
3043 
3044 Value *TypePromotionTransaction::createTrunc(Instruction *Opnd,
3045  Type *Ty) {
3046  std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty));
3047  Value *Val = Ptr->getBuiltValue();
3048  Actions.push_back(std::move(Ptr));
3049  return Val;
3050 }
3051 
3052 Value *TypePromotionTransaction::createSExt(Instruction *Inst,
3053  Value *Opnd, Type *Ty) {
3054  std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty));
3055  Value *Val = Ptr->getBuiltValue();
3056  Actions.push_back(std::move(Ptr));
3057  return Val;
3058 }
3059 
3060 Value *TypePromotionTransaction::createZExt(Instruction *Inst,
3061  Value *Opnd, Type *Ty) {
3062  std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty));
3063  Value *Val = Ptr->getBuiltValue();
3064  Actions.push_back(std::move(Ptr));
3065  return Val;
3066 }
3067 
3068 void TypePromotionTransaction::moveBefore(Instruction *Inst,
3069  Instruction *Before) {
3070  Actions.push_back(
3071  std::make_unique<TypePromotionTransaction::InstructionMoveBefore>(
3072  Inst, Before));
3073 }
3074 
3075 TypePromotionTransaction::ConstRestorationPt
3076 TypePromotionTransaction::getRestorationPoint() const {
3077  return !Actions.empty() ? Actions.back().get() : nullptr;
3078 }
3079 
3080 bool TypePromotionTransaction::commit() {
3081  for (std::unique_ptr<TypePromotionAction> &Action : Actions)
3082  Action->commit();
3083  bool Modified = !Actions.empty();
3084  Actions.clear();
3085  return Modified;
3086 }
3087 
3088 void TypePromotionTransaction::rollback(
3089  TypePromotionTransaction::ConstRestorationPt Point) {
3090  while (!Actions.empty() && Point != Actions.back().get()) {
3091  std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val();
3092  Curr->undo();
3093  }
3094 }
3095 
3096 namespace {
3097 
3098 /// A helper class for matching addressing modes.
3099 ///
3100 /// This encapsulates the logic for matching the target-legal addressing modes.
3101 class AddressingModeMatcher {
3102  SmallVectorImpl<Instruction*> &AddrModeInsts;
3103  const TargetLowering &TLI;
3104  const TargetRegisterInfo &TRI;
3105  const DataLayout &DL;
3106  const LoopInfo &LI;
3107  const std::function<const DominatorTree &()> getDTFn;
3108 
3109  /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and
3110  /// the memory instruction that we're computing this address for.
3111  Type *AccessTy;
3112  unsigned AddrSpace;
3113  Instruction *MemoryInst;
3114 
3115  /// This is the addressing mode that we're building up. This is
3116  /// part of the return value of this addressing mode matching stuff.
3118 
3119  /// The instructions inserted by other CodeGenPrepare optimizations.
3120  const SetOfInstrs &InsertedInsts;
3121 
3122  /// A map from the instructions to their type before promotion.
3123  InstrToOrigTy &PromotedInsts;
3124 
3125  /// The ongoing transaction where every action should be registered.
3126  TypePromotionTransaction &TPT;
3127 
3128  // A GEP which has too large offset to be folded into the addressing mode.
3129  std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP;
3130 
3131  /// This is set to true when we should not do profitability checks.
3132  /// When true, IsProfitableToFoldIntoAddressingMode always returns true.
3133  bool IgnoreProfitability;
3134 
3135  /// True if we are optimizing for size.
3136  bool OptSize;
3137 
3138  ProfileSummaryInfo *PSI;
3140 
3141  AddressingModeMatcher(
3143  const TargetRegisterInfo &TRI, const LoopInfo &LI,
3144  const std::function<const DominatorTree &()> getDTFn,
3145  Type *AT, unsigned AS, Instruction *MI, ExtAddrMode &AM,
3146  const SetOfInstrs &InsertedInsts, InstrToOrigTy &PromotedInsts,
3147  TypePromotionTransaction &TPT,
3148  std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP,
3149  bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
3150  : AddrModeInsts(AMI), TLI(TLI), TRI(TRI),
3151  DL(MI->getModule()->getDataLayout()), LI(LI), getDTFn(getDTFn),
3152  AccessTy(AT), AddrSpace(AS), MemoryInst(MI), AddrMode(AM),
3153  InsertedInsts(InsertedInsts), PromotedInsts(PromotedInsts), TPT(TPT),
3154  LargeOffsetGEP(LargeOffsetGEP), OptSize(OptSize), PSI(PSI), BFI(BFI) {
3155  IgnoreProfitability = false;
3156  }
3157 
3158 public:
3159  /// Find the maximal addressing mode that a load/store of V can fold,
3160  /// give an access type of AccessTy. This returns a list of involved
3161  /// instructions in AddrModeInsts.
3162  /// \p InsertedInsts The instructions inserted by other CodeGenPrepare
3163  /// optimizations.
3164  /// \p PromotedInsts maps the instructions to their type before promotion.
3165  /// \p The ongoing transaction where every action should be registered.
3166  static ExtAddrMode
3167  Match(Value *V, Type *AccessTy, unsigned AS, Instruction *MemoryInst,
3168  SmallVectorImpl<Instruction *> &AddrModeInsts,
3169  const TargetLowering &TLI, const LoopInfo &LI,
3170  const std::function<const DominatorTree &()> getDTFn,
3171  const TargetRegisterInfo &TRI, const SetOfInstrs &InsertedInsts,
3172  InstrToOrigTy &PromotedInsts, TypePromotionTransaction &TPT,
3173  std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP,
3174  bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
3176 
3177  bool Success = AddressingModeMatcher(
3178  AddrModeInsts, TLI, TRI, LI, getDTFn, AccessTy, AS, MemoryInst, Result,
3179  InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP, OptSize, PSI,
3180  BFI).matchAddr(V, 0);
3181  (void)Success; assert(Success && "Couldn't select *anything*?");
3182  return Result;
3183  }
3184 
3185 private:
3186  bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth);
3187  bool matchAddr(Value *Addr, unsigned Depth);
3188  bool matchOperationAddr(User *AddrInst, unsigned Opcode, unsigned Depth,
3189  bool *MovedAway = nullptr);
3190  bool isProfitableToFoldIntoAddressingMode(Instruction *I,
3191  ExtAddrMode &AMBefore,
3192  ExtAddrMode &AMAfter);
3193  bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2);
3194  bool isPromotionProfitable(unsigned NewCost, unsigned OldCost,
3195  Value *PromotedOperand) const;
3196 };
3197 
3198 class PhiNodeSet;
3199 
3200 /// An iterator for PhiNodeSet.
3201 class PhiNodeSetIterator {
3202  PhiNodeSet * const Set;
3203  size_t CurrentIndex = 0;
3204 
3205 public:
3206  /// The constructor. Start should point to either a valid element, or be equal
3207  /// to the size of the underlying SmallVector of the PhiNodeSet.
3208  PhiNodeSetIterator(PhiNodeSet * const Set, size_t Start);
3209  PHINode * operator*() const;
3210  PhiNodeSetIterator& operator++();
3211  bool operator==(const PhiNodeSetIterator &RHS) const;
3212  bool operator!=(const PhiNodeSetIterator &RHS) const;
3213 };
3214 
3215 /// Keeps a set of PHINodes.
3216 ///
3217 /// This is a minimal set implementation for a specific use case:
3218 /// It is very fast when there are very few elements, but also provides good
3219 /// performance when there are many. It is similar to SmallPtrSet, but also
3220 /// provides iteration by insertion order, which is deterministic and stable
3221 /// across runs. It is also similar to SmallSetVector, but provides removing
3222 /// elements in O(1) time. This is achieved by not actually removing the element
3223 /// from the underlying vector, so comes at the cost of using more memory, but
3224 /// that is fine, since PhiNodeSets are used as short lived objects.
3225 class PhiNodeSet {
3226  friend class PhiNodeSetIterator;
3227 
3228  using MapType = SmallDenseMap<PHINode *, size_t, 32>;
3229  using iterator = PhiNodeSetIterator;
3230 
3231  /// Keeps the elements in the order of their insertion in the underlying
3232  /// vector. To achieve constant time removal, it never deletes any element.
3234 
3235  /// Keeps the elements in the underlying set implementation. This (and not the
3236  /// NodeList defined above) is the source of truth on whether an element
3237  /// is actually in the collection.
3238  MapType NodeMap;
3239 
3240  /// Points to the first valid (not deleted) element when the set is not empty
3241  /// and the value is not zero. Equals to the size of the underlying vector
3242  /// when the set is empty. When the value is 0, as in the beginning, the
3243  /// first element may or may not be valid.
3244  size_t FirstValidElement = 0;
3245 
3246 public:
3247  /// Inserts a new element to the collection.
3248  /// \returns true if the element is actually added, i.e. was not in the
3249  /// collection before the operation.
3250  bool insert(PHINode *Ptr) {
3251  if (NodeMap.insert(std::make_pair(Ptr, NodeList.size())).second) {
3252  NodeList.push_back(Ptr);
3253  return true;
3254  }
3255  return false;
3256  }
3257 
3258  /// Removes the element from the collection.
3259  /// \returns whether the element is actually removed, i.e. was in the
3260  /// collection before the operation.
3261  bool erase(PHINode *Ptr) {
3262  if (NodeMap.erase(Ptr)) {
3263  SkipRemovedElements(FirstValidElement);
3264  return true;
3265  }
3266  return false;
3267  }
3268 
3269  /// Removes all elements and clears the collection.
3270  void clear() {
3271  NodeMap.clear();
3272  NodeList.clear();
3273  FirstValidElement = 0;
3274  }
3275 
3276  /// \returns an iterator that will iterate the elements in the order of
3277  /// insertion.
3278  iterator begin() {
3279  if (FirstValidElement == 0)
3280  SkipRemovedElements(FirstValidElement);
3281  return PhiNodeSetIterator(this, FirstValidElement);
3282  }
3283 
3284  /// \returns an iterator that points to the end of the collection.
3285  iterator end() { return PhiNodeSetIterator(this, NodeList.size()); }
3286 
3287  /// Returns the number of elements in the collection.
3288  size_t size() const {
3289  return NodeMap.size();
3290  }
3291 
3292  /// \returns 1 if the given element is in the collection, and 0 if otherwise.
3293  size_t count(PHINode *Ptr) const {
3294  return NodeMap.count(Ptr);
3295  }
3296 
3297 private:
3298  /// Updates the CurrentIndex so that it will point to a valid element.
3299  ///
3300  /// If the element of NodeList at CurrentIndex is valid, it does not
3301  /// change it. If there are no more valid elements, it updates CurrentIndex
3302  /// to point to the end of the NodeList.
3303  void SkipRemovedElements(size_t &CurrentIndex) {
3304  while (CurrentIndex < NodeList.size()) {
3305  auto it = NodeMap.find(NodeList[CurrentIndex]);
3306  // If the element has been deleted and added again later, NodeMap will
3307  // point to a different index, so CurrentIndex will still be invalid.
3308  if (it != NodeMap.end() && it->second == CurrentIndex)
3309  break;
3310  ++CurrentIndex;
3311  }
3312  }
3313 };
3314 
3315 PhiNodeSetIterator::PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start)
3316  : Set(Set), CurrentIndex(Start) {}
3317 
3319  assert(CurrentIndex < Set->NodeList.size() &&
3320  "PhiNodeSet access out of range");
3321  return Set->NodeList[CurrentIndex];
3322 }
3323 
3324 PhiNodeSetIterator& PhiNodeSetIterator::operator++() {
3325  assert(CurrentIndex < Set->NodeList.size() &&
3326  "PhiNodeSet access out of range");
3327  ++CurrentIndex;
3328  Set->SkipRemovedElements(CurrentIndex);
3329  return *this;
3330 }
3331 
3332 bool PhiNodeSetIterator::operator==(const PhiNodeSetIterator &RHS) const {
3333  return CurrentIndex == RHS.CurrentIndex;
3334 }
3335 
3336 bool PhiNodeSetIterator::operator!=(const PhiNodeSetIterator &RHS) const {
3337  return !((*this) == RHS);
3338 }
3339 
3340 /// Keep track of simplification of Phi nodes.
3341 /// Accept the set of all phi nodes and erase phi node from this set
3342 /// if it is simplified.
3343 class SimplificationTracker {
3345  const SimplifyQuery &SQ;
3346  // Tracks newly created Phi nodes. The elements are iterated by insertion
3347  // order.
3348  PhiNodeSet AllPhiNodes;
3349  // Tracks newly created Select nodes.
3350  SmallPtrSet<SelectInst *, 32> AllSelectNodes;
3351 
3352 public:
3353  SimplificationTracker(const SimplifyQuery &sq)
3354  : SQ(sq) {}
3355 
3356  Value *Get(Value *V) {
3357  do {
3358  auto SV = Storage.find(V);
3359  if (SV == Storage.end())
3360  return V;
3361  V = SV->second;
3362  } while (true);
3363  }
3364 
3365  Value *Simplify(Value *Val) {
3366  SmallVector<Value *, 32> WorkList;
3367  SmallPtrSet<Value *, 32> Visited;
3368  WorkList.push_back(Val);
3369  while (!WorkList.empty()) {
3370  auto *P = WorkList.pop_back_val();
3371  if (!Visited.insert(P).second)
3372  continue;
3373  if (auto *PI = dyn_cast<Instruction>(P))
3374  if (Value *V = SimplifyInstruction(cast<Instruction>(PI), SQ)) {
3375  for (auto *U : PI->users())
3376  WorkList.push_back(cast<Value>(U));
3377  Put(PI, V);
3378  PI->replaceAllUsesWith(V);
3379  if (auto *PHI = dyn_cast<PHINode>(PI))
3380  AllPhiNodes.erase(PHI);
3381  if (auto *Select = dyn_cast<SelectInst>(PI))
3382  AllSelectNodes.erase(Select);
3383  PI->eraseFromParent();
3384  }
3385  }
3386  return Get(Val);
3387  }
3388 
3389  void Put(Value *From, Value *To) {
3390  Storage.insert({ From, To });
3391  }
3392 
3393  void ReplacePhi(PHINode *From, PHINode *To) {
3394  Value* OldReplacement = Get(From);
3395  while (OldReplacement != From) {
3396  From = To;
3397  To = dyn_cast<PHINode>(OldReplacement);
3398  OldReplacement = Get(From);
3399  }
3400  assert(To && Get(To) == To && "Replacement PHI node is already replaced.");
3401  Put(From, To);
3402  From->replaceAllUsesWith(To);
3403  AllPhiNodes.erase(From);
3404  From->eraseFromParent();
3405  }
3406 
3407  PhiNodeSet& newPhiNodes() { return AllPhiNodes; }
3408 
3409  void insertNewPhi(PHINode *PN) { AllPhiNodes.insert(PN); }
3410 
3411  void insertNewSelect(SelectInst *SI) { AllSelectNodes.insert(SI); }
3412 
3413  unsigned countNewPhiNodes() const { return AllPhiNodes.size(); }
3414 
3415  unsigned countNewSelectNodes() const { return AllSelectNodes.size(); }
3416 
3417  void destroyNewNodes(Type *CommonType) {
3418  // For safe erasing, replace the uses with dummy value first.
3419  auto *Dummy = UndefValue::get(CommonType);
3420  for (auto *I : AllPhiNodes) {
3421  I->replaceAllUsesWith(Dummy);
3422  I->eraseFromParent();
3423  }
3424  AllPhiNodes.clear();
3425  for (auto *I : AllSelectNodes) {
3426  I->replaceAllUsesWith(Dummy);
3427  I->eraseFromParent();
3428  }
3429  AllSelectNodes.clear();
3430  }
3431 };
3432 
3433 /// A helper class for combining addressing modes.
3434 class AddressingModeCombiner {
3435  typedef DenseMap<Value *, Value *> FoldAddrToValueMapping;
3436  typedef std::pair<PHINode *, PHINode *> PHIPair;
3437 
3438 private:
3439  /// The addressing modes we've collected.
3440  SmallVector<ExtAddrMode, 16> AddrModes;
3441 
3442  /// The field in which the AddrModes differ, when we have more than one.
3443  ExtAddrMode::FieldName DifferentField = ExtAddrMode::NoField;
3444 
3445  /// Are the AddrModes that we have all just equal to their original values?
3446  bool AllAddrModesTrivial = true;
3447 
3448  /// Common Type for all different fields in addressing modes.
3449  Type *CommonType;
3450 
3451  /// SimplifyQuery for simplifyInstruction utility.
3452  const SimplifyQuery &SQ;
3453 
3454  /// Original Address.
3455  Value *Original;
3456 
3457 public:
3458  AddressingModeCombiner(const SimplifyQuery &_SQ, Value *OriginalValue)
3459  : CommonType(nullptr), SQ(_SQ), Original(OriginalValue) {}
3460 
3461  /// Get the combined AddrMode
3462  const ExtAddrMode &getAddrMode() const {
3463  return AddrModes[0];
3464  }
3465 
3466  /// Add a new AddrMode if it's compatible with the AddrModes we already
3467  /// have.
3468  /// \return True iff we succeeded in doing so.
3469  bool addNewAddrMode(ExtAddrMode &NewAddrMode) {
3470  // Take note of if we have any non-trivial AddrModes, as we need to detect
3471  // when all AddrModes are trivial as then we would introduce a phi or select
3472  // which just duplicates what's already there.
3473  AllAddrModesTrivial = AllAddrModesTrivial && NewAddrMode.isTrivial();
3474 
3475  // If this is the first addrmode then everything is fine.
3476  if (AddrModes.empty()) {
3477  AddrModes.emplace_back(NewAddrMode);
3478  return true;
3479  }
3480 
3481  // Figure out how different this is from the other address modes, which we
3482  // can do just by comparing against the first one given that we only care
3483  // about the cumulative difference.
3484  ExtAddrMode::FieldName ThisDifferentField =
3485  AddrModes[0].compare(NewAddrMode);
3486  if (DifferentField == ExtAddrMode::NoField)
3487  DifferentField = ThisDifferentField;
3488  else if (DifferentField != ThisDifferentField)
3489  DifferentField = ExtAddrMode::MultipleFields;
3490 
3491  // If NewAddrMode differs in more than one dimension we cannot handle it.
3492  bool CanHandle = DifferentField != ExtAddrMode::MultipleFields;
3493 
3494  // If Scale Field is different then we reject.
3495  CanHandle = CanHandle && DifferentField != ExtAddrMode::ScaleField;
3496 
3497  // We also must reject the case when base offset is different and
3498  // scale reg is not null, we cannot handle this case due to merge of
3499  // different offsets will be used as ScaleReg.
3500  CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseOffsField ||
3501  !NewAddrMode.ScaledReg);
3502 
3503  // We also must reject the case when GV is different and BaseReg installed
3504  // due to we want to use base reg as a merge of GV values.
3505  CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseGVField ||
3506  !NewAddrMode.HasBaseReg);
3507 
3508  // Even if NewAddMode is the same we still need to collect it due to
3509  // original value is different. And later we will need all original values
3510  // as anchors during finding the common Phi node.
3511  if (CanHandle)
3512  AddrModes.emplace_back(NewAddrMode);
3513  else
3514  AddrModes.clear();
3515 
3516  return CanHandle;
3517  }
3518 
3519  /// Combine the addressing modes we've collected into a single
3520  /// addressing mode.
3521  /// \return True iff we successfully combined them or we only had one so
3522  /// didn't need to combine them anyway.
3523  bool combineAddrModes() {
3524  // If we have no AddrModes then they can't be combined.
3525  if (AddrModes.size() == 0)
3526  return false;
3527 
3528  // A single AddrMode can trivially be combined.
3529  if (AddrModes.size() == 1 || DifferentField == ExtAddrMode::NoField)
3530  return true;
3531 
3532  // If the AddrModes we collected are all just equal to the value they are
3533  // derived from then combining them wouldn't do anything useful.
3534  if (AllAddrModesTrivial)
3535  return false;
3536 
3537  if (!addrModeCombiningAllowed())
3538  return false;
3539 
3540  // Build a map between <original value, basic block where we saw it> to
3541  // value of base register.
3542  // Bail out if there is no common type.
3543  FoldAddrToValueMapping Map;
3544  if (!initializeMap(Map))
3545  return false;
3546 
3547  Value *CommonValue = findCommon(Map);
3548  if (CommonValue)
3549  AddrModes[0].SetCombinedField(DifferentField, CommonValue, AddrModes);
3550  return CommonValue != nullptr;
3551  }
3552 
3553 private:
3554  /// Initialize Map with anchor values. For address seen
3555  /// we set the value of different field saw in this address.
3556  /// At the same time we find a common type for different field we will
3557  /// use to create new Phi/Select nodes. Keep it in CommonType field.
3558  /// Return false if there is no common type found.
3559  bool initializeMap(FoldAddrToValueMapping &Map) {
3560  // Keep track of keys where the value is null. We will need to replace it
3561  // with constant null when we know the common type.
3562  SmallVector<Value *, 2> NullValue;
3563  Type *IntPtrTy = SQ.DL.getIntPtrType(AddrModes[0].OriginalValue->getType());
3564  for (auto &AM : AddrModes) {
3565  Value *DV = AM.GetFieldAsValue(DifferentField, IntPtrTy);
3566  if (DV) {
3567  auto *Type = DV->getType();
3568  if (CommonType && CommonType != Type)
3569  return false;
3570  CommonType = Type;
3571  Map[AM.OriginalValue] = DV;
3572  } else {
3573  NullValue.push_back(AM.OriginalValue);
3574  }
3575  }
3576  assert(CommonType && "At least one non-null value must be!");
3577  for (auto *V : NullValue)
3578  Map[V] = Constant::getNullValue(CommonType);
3579  return true;
3580  }
3581 
3582  /// We have mapping between value A and other value B where B was a field in
3583  /// addressing mode represented by A. Also we have an original value C
3584  /// representing an address we start with. Traversing from C through phi and
3585  /// selects we ended up with A's in a map. This utility function tries to find
3586  /// a value V which is a field in addressing mode C and traversing through phi
3587  /// nodes and selects we will end up in corresponded values B in a map.
3588  /// The utility will create a new Phi/Selects if needed.
3589  // The simple example looks as follows:
3590  // BB1:
3591  // p1 = b1 + 40
3592  // br cond BB2, BB3
3593  // BB2:
3594  // p2 = b2 + 40
3595  // br BB3
3596  // BB3:
3597  // p = phi [p1, BB1], [p2, BB2]
3598  // v = load p
3599  // Map is
3600  // p1 -> b1
3601  // p2 -> b2
3602  // Request is
3603  // p -> ?
3604  // The function tries to find or build phi [b1, BB1], [b2, BB2] in BB3.
3605  Value *findCommon(FoldAddrToValueMapping &Map) {
3606  // Tracks the simplification of newly created phi nodes. The reason we use
3607  // this mapping is because we will add new created Phi nodes in AddrToBase.
3608  // Simplification of Phi nodes is recursive, so some Phi node may
3609  // be simplified after we added it to AddrToBase. In reality this
3610  // simplification is possible only if original phi/selects were not
3611  // simplified yet.
3612  // Using this mapping we can find the current value in AddrToBase.
3613  SimplificationTracker ST(SQ);
3614 
3615  // First step, DFS to create PHI nodes for all intermediate blocks.
3616  // Also fill traverse order for the second step.
3617  SmallVector<Value *, 32> TraverseOrder;
3618  InsertPlaceholders(Map, TraverseOrder, ST);
3619 
3620  // Second Step, fill new nodes by merged values and simplify if possible.
3621  FillPlaceholders(Map, TraverseOrder, ST);
3622 
3623  if (!AddrSinkNewSelects && ST.countNewSelectNodes() > 0) {
3624  ST.destroyNewNodes(CommonType);
3625  return nullptr;
3626  }
3627 
3628  // Now we'd like to match New Phi nodes to existed ones.
3629  unsigned PhiNotMatchedCount = 0;
3630  if (!MatchPhiSet(ST, AddrSinkNewPhis, PhiNotMatchedCount)) {
3631  ST.destroyNewNodes(CommonType);
3632  return nullptr;
3633  }
3634 
3635  auto *Result = ST.Get(Map.find(Original)->second);
3636  if (Result) {
3637  NumMemoryInstsPhiCreated += ST.countNewPhiNodes() + PhiNotMatchedCount;
3638  NumMemoryInstsSelectCreated += ST.countNewSelectNodes();
3639  }
3640  return Result;
3641  }
3642 
3643  /// Try to match PHI node to Candidate.
3644  /// Matcher tracks the matched Phi nodes.
3645  bool MatchPhiNode(PHINode *PHI, PHINode *Candidate,
3646  SmallSetVector<PHIPair, 8> &Matcher,
3647  PhiNodeSet &PhiNodesToMatch) {
3648  SmallVector<PHIPair, 8> WorkList;
3649  Matcher.insert({ PHI, Candidate });
3650  SmallSet<PHINode *, 8> MatchedPHIs;
3651  MatchedPHIs.insert(PHI);
3652  WorkList.push_back({ PHI, Candidate });
3653  SmallSet<PHIPair, 8> Visited;
3654  while (!WorkList.empty()) {
3655  auto Item = WorkList.pop_back_val();
3656  if (!Visited.insert(Item).second)
3657  continue;
3658  // We iterate over all incoming values to Phi to compare them.
3659  // If values are different and both of them Phi and the first one is a
3660  // Phi we added (subject to match) and both of them is in the same basic
3661  // block then we can match our pair if values match. So we state that
3662  // these values match and add it to work list to verify that.
3663  for (auto B : Item.first->blocks()) {
3664  Value *FirstValue = Item.first->getIncomingValueForBlock(B);
3665  Value *SecondValue = Item.second->getIncomingValueForBlock(B);
3666  if (FirstValue == SecondValue)
3667  continue;
3668 
3669  PHINode *FirstPhi = dyn_cast<PHINode>(FirstValue);
3670  PHINode *SecondPhi = dyn_cast<PHINode>(SecondValue);
3671 
3672  // One of them is not Phi or
3673  // The first one is not Phi node from the set we'd like to match or
3674  // Phi nodes from different basic blocks then
3675  // we will not be able to match.
3676  if (!FirstPhi || !SecondPhi || !PhiNodesToMatch.count(FirstPhi) ||
3677  FirstPhi->getParent() != SecondPhi->getParent())
3678  return false;
3679 
3680  // If we already matched them then continue.
3681  if (Matcher.count({ FirstPhi, SecondPhi }))
3682  continue;
3683  // So the values are different and does not match. So we need them to
3684  // match. (But we register no more than one match per PHI node, so that
3685  // we won't later try to replace them twice.)
3686  if (MatchedPHIs.insert(FirstPhi).second)
3687  Matcher.insert({ FirstPhi, SecondPhi });
3688  // But me must check it.
3689  WorkList.push_back({ FirstPhi, SecondPhi });
3690  }
3691  }
3692  return true;
3693  }
3694 
3695  /// For the given set of PHI nodes (in the SimplificationTracker) try
3696  /// to find their equivalents.
3697  /// Returns false if this matching fails and creation of new Phi is disabled.
3698  bool MatchPhiSet(SimplificationTracker &ST, bool AllowNewPhiNodes,
3699  unsigned &PhiNotMatchedCount) {
3700  // Matched and PhiNodesToMatch iterate their elements in a deterministic
3701  // order, so the replacements (ReplacePhi) are also done in a deterministic
3702  // order.
3704  SmallPtrSet<PHINode *, 8> WillNotMatch;
3705  PhiNodeSet &PhiNodesToMatch = ST.newPhiNodes();
3706  while (PhiNodesToMatch.size()) {
3707  PHINode *PHI = *PhiNodesToMatch.begin();
3708 
3709  // Add us, if no Phi nodes in the basic block we do not match.
3710  WillNotMatch.clear();
3711  WillNotMatch.insert(PHI);
3712 
3713  // Traverse all Phis until we found equivalent or fail to do that.
3714  bool IsMatched = false;
3715  for (auto &P : PHI->getParent()->phis()) {
3716  // Skip new Phi nodes.
3717  if (PhiNodesToMatch.count(&P))
3718  continue;
3719  if ((IsMatched = MatchPhiNode(PHI, &P, Matched, PhiNodesToMatch)))
3720  break;
3721  // If it does not match, collect all Phi nodes from matcher.
3722  // if we end up with no match, them all these Phi nodes will not match
3723  // later.
3724  for (auto M : Matched)
3725  WillNotMatch.insert(M.first);
3726  Matched.clear();
3727  }
3728  if (IsMatched) {
3729  // Replace all matched values and erase them.
3730  for (auto MV : Matched)
3731  ST.ReplacePhi(MV.first, MV.second);
3732  Matched.clear();
3733  continue;
3734  }
3735  // If we are not allowed to create new nodes then bail out.
3736  if (!AllowNewPhiNodes)
3737  return false;
3738  // Just remove all seen values in matcher. They will not match anything.
3739  PhiNotMatchedCount += WillNotMatch.size();
3740  for (auto *P : WillNotMatch)
3741  PhiNodesToMatch.erase(P);
3742  }
3743  return true;
3744  }
3745  /// Fill the placeholders with values from predecessors and simplify them.
3746  void FillPlaceholders(FoldAddrToValueMapping &Map,
3747  SmallVectorImpl<Value *> &TraverseOrder,
3748  SimplificationTracker &ST) {
3749  while (!TraverseOrder.empty()) {
3750  Value *Current = TraverseOrder.pop_back_val();
3751  assert(Map.find(Current) != Map.end() && "No node to fill!!!");
3752  Value *V = Map[Current];
3753 
3754  if (SelectInst *Select = dyn_cast<SelectInst>(V)) {
3755  // CurrentValue also must be Select.
3756  auto *CurrentSelect = cast<SelectInst>(Current);
3757  auto *TrueValue = CurrentSelect->getTrueValue();
3758  assert(Map.find(TrueValue) != Map.end() && "No True Value!");
3759  Select->setTrueValue(ST.Get(Map[TrueValue]));
3760  auto *FalseValue = CurrentSelect->getFalseValue();
3761  assert(Map.find(FalseValue) != Map.end() && "No False Value!");
3762  Select->setFalseValue(ST.Get(Map[FalseValue]));
3763  } else {
3764  // Must be a Phi node then.
3765  auto *PHI = cast<PHINode>(V);
3766  // Fill the Phi node with values from predecessors.
3767  for (auto *B : predecessors(PHI->getParent())) {
3768  Value *PV = cast<PHINode>(Current)->getIncomingValueForBlock(B);
3769  assert(Map.find(PV) != Map.end() && "No predecessor Value!");
3770  PHI->addIncoming(ST.Get(Map[PV]), B);
3771  }
3772  }
3773  Map[Current] = ST.Simplify(V);
3774  }
3775  }
3776 
3777  /// Starting from original value recursively iterates over def-use chain up to
3778  /// known ending values represented in a map. For each traversed phi/select
3779  /// inserts a placeholder Phi or Select.
3780  /// Reports all new created Phi/Select nodes by adding them to set.
3781  /// Also reports and order in what values have been traversed.
3782  void InsertPlaceholders(FoldAddrToValueMapping &Map,
3783  SmallVectorImpl<Value *> &TraverseOrder,
3784  SimplificationTracker &ST) {
3785  SmallVector<Value *, 32> Worklist;
3786  assert((isa<PHINode>(Original) || isa<SelectInst>(Original)) &&
3787  "Address must be a Phi or Select node");
3788  auto *Dummy = UndefValue::get(CommonType);
3789  Worklist.push_back(Original);
3790  while (!Worklist.empty()) {
3791  Value *Current = Worklist.pop_back_val();
3792  // if it is already visited or it is an ending value then skip it.
3793  if (Map.find(Current) != Map.end())
3794  continue;
3795  TraverseOrder.push_back(Current);
3796 
3797  // CurrentValue must be a Phi node or select. All others must be covered
3798  // by anchors.
3799  if (SelectInst *CurrentSelect = dyn_cast<SelectInst>(Current)) {
3800  // Is it OK to get metadata from OrigSelect?!
3801  // Create a Select placeholder with dummy value.
3802  SelectInst *Select = SelectInst::Create(
3803  CurrentSelect->getCondition(), Dummy, Dummy,
3804  CurrentSelect->getName(), CurrentSelect, CurrentSelect);
3805  Map[Current] = Select;
3806  ST.insertNewSelect(Select);
3807  // We are interested in True and False values.
3808  Worklist.push_back(CurrentSelect->getTrueValue());
3809  Worklist.push_back(CurrentSelect->getFalseValue());
3810  } else {
3811  // It must be a Phi node then.
3812  PHINode *CurrentPhi = cast<PHINode>(Current);
3813  unsigned PredCount = CurrentPhi->getNumIncomingValues();
3814  PHINode *PHI =
3815  PHINode::Create(CommonType, PredCount, "sunk_phi", CurrentPhi);
3816  Map[Current] = PHI;
3817  ST.insertNewPhi(PHI);
3818  append_range(Worklist, CurrentPhi->incoming_values());
3819  }
3820  }
3821  }
3822 
3823  bool addrModeCombiningAllowed() {
3825  return false;
3826  switch (DifferentField) {
3827  default:
3828  return false;
3829  case ExtAddrMode::BaseRegField:
3830  return AddrSinkCombineBaseReg;
3831  case ExtAddrMode::BaseGVField:
3832  return AddrSinkCombineBaseGV;
3833  case ExtAddrMode::BaseOffsField:
3834  return AddrSinkCombineBaseOffs;
3835  case ExtAddrMode::ScaledRegField:
3836  return AddrSinkCombineScaledReg;
3837  }
3838  }
3839 };
3840 } // end anonymous namespace
3841 
3842 /// Try adding ScaleReg*Scale to the current addressing mode.
3843 /// Return true and update AddrMode if this addr mode is legal for the target,
3844 /// false if not.
3845 bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale,
3846  unsigned Depth) {
3847  // If Scale is 1, then this is the same as adding ScaleReg to the addressing
3848  // mode. Just process that directly.
3849  if (Scale == 1)
3850  return matchAddr(ScaleReg, Depth);
3851 
3852  // If the scale is 0, it takes nothing to add this.
3853  if (Scale == 0)
3854  return true;
3855 
3856  // If we already have a scale of this value, we can add to it, otherwise, we
3857  // need an available scale field.
3858  if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg)
3859  return false;
3860 
3861  ExtAddrMode TestAddrMode = AddrMode;
3862 
3863  // Add scale to turn X*4+X*3 -> X*7. This could also do things like
3864  // [A+B + A*7] -> [B+A*8].
3865  TestAddrMode.Scale += Scale;
3866  TestAddrMode.ScaledReg = ScaleReg;
3867 
3868  // If the new address isn't legal, bail out.
3869  if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace))
3870  return false;
3871 
3872  // It was legal, so commit it.
3873  AddrMode = TestAddrMode;
3874 
3875  // Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now
3876  // to see if ScaleReg is actually X+C. If so, we can turn this into adding
3877  // X*Scale + C*Scale to addr mode. If we found available IV increment, do not
3878  // go any further: we can reuse it and cannot eliminate it.
3879  ConstantInt *CI = nullptr; Value *AddLHS = nullptr;
3880  if (isa<Instruction>(ScaleReg) && // not a constant expr.
3881  match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI))) &&
3882  !isIVIncrement(ScaleReg, &LI) && CI->getValue().isSignedIntN(64)) {
3883  TestAddrMode.InBounds = false;
3884  TestAddrMode.ScaledReg = AddLHS;
3885  TestAddrMode.BaseOffs += CI->getSExtValue() * TestAddrMode.Scale;
3886 
3887  // If this addressing mode is legal, commit it and remember that we folded
3888  // this instruction.
3889  if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) {
3890  AddrModeInsts.push_back(cast<Instruction>(ScaleReg));
3891  AddrMode = TestAddrMode;
3892  return true;
3893  }
3894  // Restore status quo.
3895  TestAddrMode = AddrMode;
3896  }
3897 
3898  // If this is an add recurrence with a constant step, return the increment
3899  // instruction and the canonicalized step.
3900  auto GetConstantStep = [this](const Value * V)
3901  ->Optional<std::pair<Instruction *, APInt> > {
3902  auto *PN = dyn_cast<PHINode>(V);
3903  if (!PN)
3904  return None;
3905  auto IVInc = getIVIncrement(PN, &LI);
3906  if (!IVInc)
3907  return None;
3908  // TODO: The result of the intrinsics above is two-compliment. However when
3909  // IV inc is expressed as add or sub, iv.next is potentially a poison value.
3910  // If it has nuw or nsw flags, we need to make sure that these flags are
3911  // inferrable at the point of memory instruction. Otherwise we are replacing
3912  // well-defined two-compliment computation with poison. Currently, to avoid
3913  // potentially complex analysis needed to prove this, we reject such cases.
3914  if (auto *OIVInc = dyn_cast<OverflowingBinaryOperator>(IVInc->first))
3915  if (OIVInc->hasNoSignedWrap() || OIVInc->hasNoUnsignedWrap())
3916  return None;
3917  if (auto *ConstantStep = dyn_cast<ConstantInt>(IVInc->second))
3918  return std::make_pair(IVInc->first, ConstantStep->getValue());
3919  return None;
3920  };
3921 
3922  // Try to account for the following special case:
3923  // 1. ScaleReg is an inductive variable;
3924  // 2. We use it with non-zero offset;
3925  // 3. IV's increment is available at the point of memory instruction.
3926  //
3927  // In this case, we may reuse the IV increment instead of the IV Phi to
3928  // achieve the following advantages:
3929  // 1. If IV step matches the offset, we will have no need in the offset;
3930  // 2. Even if they don't match, we will reduce the overlap of living IV
3931  // and IV increment, that will potentially lead to better register
3932  // assignment.
3933  if (AddrMode.BaseOffs) {
3934  if (auto IVStep = GetConstantStep(ScaleReg)) {
3935  Instruction *IVInc = IVStep->first;
3936  // The following assert is important to ensure a lack of infinite loops.
3937  // This transforms is (intentionally) the inverse of the one just above.
3938  // If they don't agree on the definition of an increment, we'd alternate
3939  // back and forth indefinitely.
3940  assert(isIVIncrement(IVInc, &LI) && "implied by GetConstantStep");
3941  APInt Step = IVStep->second;
3942  APInt Offset = Step * AddrMode.Scale;
3943  if (Offset.isSignedIntN(64)) {
3944  TestAddrMode.InBounds = false;
3945  TestAddrMode.ScaledReg = IVInc;
3946  TestAddrMode.BaseOffs -= Offset.getLimitedValue();
3947  // If this addressing mode is legal, commit it..
3948  // (Note that we defer the (expensive) domtree base legality check
3949  // to the very last possible point.)
3950  if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace) &&
3951  getDTFn().dominates(IVInc, MemoryInst)) {
3952  AddrModeInsts.push_back(cast<Instruction>(IVInc));
3953  AddrMode = TestAddrMode;
3954  return true;
3955  }
3956  // Restore status quo.
3957  TestAddrMode = AddrMode;
3958  }
3959  }
3960  }
3961 
3962  // Otherwise, just return what we have.
3963  return true;
3964 }
3965 
3966 /// This is a little filter, which returns true if an addressing computation
3967 /// involving I might be folded into a load/store accessing it.
3968 /// This doesn't need to be perfect, but needs to accept at least
3969 /// the set of instructions that MatchOperationAddr can.
3971  switch (I->getOpcode()) {
3972  case Instruction::BitCast:
3973  case Instruction::AddrSpaceCast:
3974  // Don't touch identity bitcasts.
3975  if (I->getType() == I->getOperand(0)->getType())
3976  return false;
3977  return I->getType()->isIntOrPtrTy();
3978  case Instruction::PtrToInt:
3979  // PtrToInt is always a noop, as we know that the int type is pointer sized.
3980  return true;
3981  case Instruction::IntToPtr:
3982  // We know the input is intptr_t, so this is foldable.
3983  return true;
3984  case Instruction::Add:
3985  return true;
3986  case Instruction::Mul:
3987  case Instruction::Shl:
3988  // Can only handle X*C and X << C.
3989  return isa<ConstantInt>(I->getOperand(1));
3990  case Instruction::GetElementPtr:
3991  return true;
3992  default:
3993  return false;
3994  }
3995 }
3996 
3997 /// Check whether or not \p Val is a legal instruction for \p TLI.
3998 /// \note \p Val is assumed to be the product of some type promotion.
3999 /// Therefore if \p Val has an undefined state in \p TLI, this is assumed
4000 /// to be legal, as the non-promoted value would have had the same state.
4002  const DataLayout &DL, Value *Val) {
4003  Instruction *PromotedInst = dyn_cast<Instruction>(Val);
4004  if (!PromotedInst)
4005  return false;
4006  int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode());
4007  // If the ISDOpcode is undefined, it was undefined before the promotion.
4008  if (!ISDOpcode)
4009  return true;
4010  // Otherwise, check if the promoted instruction is legal or not.
4011  return TLI.isOperationLegalOrCustom(
4012  ISDOpcode, TLI.getValueType(DL, PromotedInst->getType()));
4013 }
4014 
4015 namespace {
4016 
4017 /// Hepler class to perform type promotion.
4018 class TypePromotionHelper {
4019  /// Utility function to add a promoted instruction \p ExtOpnd to
4020  /// \p PromotedInsts and record the type of extension we have seen.
4021  static void addPromotedInst(InstrToOrigTy &PromotedInsts,
4022  Instruction *ExtOpnd,
4023  bool IsSExt) {
4024  ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension;
4025  InstrToOrigTy::iterator It = PromotedInsts.find(ExtOpnd);
4026  if (It != PromotedInsts.end()) {
4027  // If the new extension is same as original, the information in
4028  // PromotedInsts[ExtOpnd] is still correct.
4029  if (It->second.getInt() == ExtTy)
4030  return;
4031 
4032  // Now the new extension is different from old extension, we make
4033  // the type information invalid by setting extension type to
4034  // BothExtension.
4035  ExtTy = BothExtension;
4036  }
4037  PromotedInsts[ExtOpnd] = TypeIsSExt(ExtOpnd->getType(), ExtTy);
4038  }
4039 
4040  /// Utility function to query the original type of instruction \p Opnd
4041  /// with a matched extension type. If the extension doesn't match, we
4042  /// cannot use the information we had on the original type.
4043  /// BothExtension doesn't match any extension type.
4044  static const Type *getOrigType(const InstrToOrigTy &PromotedInsts,
4045  Instruction *Opnd,
4046  bool IsSExt) {
4047  ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension;
4048  InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd);
4049  if (It != PromotedInsts.end() && It->second.getInt() == ExtTy)
4050  return It->second.getPointer();
4051  return nullptr;
4052  }
4053 
4054  /// Utility function to check whether or not a sign or zero extension
4055  /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by
4056  /// either using the operands of \p Inst or promoting \p Inst.
4057  /// The type of the extension is defined by \p IsSExt.
4058  /// In other words, check if:
4059  /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType.
4060  /// #1 Promotion applies:
4061  /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...).
4062  /// #2 Operand reuses:
4063  /// ext opnd1 to ConsideredExtType.
4064  /// \p PromotedInsts maps the instructions to their type before promotion.
4065  static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType,
4066  const InstrToOrigTy &PromotedInsts, bool IsSExt);
4067 
4068  /// Utility function to determine if \p OpIdx should be promoted when
4069  /// promoting \p Inst.
4070  static bool shouldExtOperand(const Instruction *Inst, int OpIdx) {
4071  return !(isa<SelectInst>(Inst) && OpIdx == 0);
4072  }
4073 
4074  /// Utility function to promote the operand of \p Ext when this
4075  /// operand is a promotable trunc or sext or zext.
4076  /// \p PromotedInsts maps the instructions to their type before promotion.
4077  /// \p CreatedInstsCost[out] contains the cost of all instructions
4078  /// created to promote the operand of Ext.
4079  /// Newly added extensions are inserted in \p Exts.
4080  /// Newly added truncates are inserted in \p Truncs.
4081  /// Should never be called directly.
4082  /// \return The promoted value which is used instead of Ext.
4083  static Value *promoteOperandForTruncAndAnyExt(
4084  Instruction *Ext, TypePromotionTransaction &TPT,
4085  InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4087  SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI);
4088 
4089  /// Utility function to promote the operand of \p Ext when this
4090  /// operand is promotable and is not a supported trunc or sext.
4091  /// \p PromotedInsts maps the instructions to their type before promotion.
4092  /// \p CreatedInstsCost[out] contains the cost of all the instructions
4093  /// created to promote the operand of Ext.
4094  /// Newly added extensions are inserted in \p Exts.
4095  /// Newly added truncates are inserted in \p Truncs.
4096  /// Should never be called directly.
4097  /// \return The promoted value which is used instead of Ext.
4098  static Value *promoteOperandForOther(Instruction *Ext,
4099  TypePromotionTransaction &TPT,
4100  InstrToOrigTy &PromotedInsts,
4101  unsigned &CreatedInstsCost,
4104  const TargetLowering &TLI, bool IsSExt);
4105 
4106  /// \see promoteOperandForOther.
4107  static Value *signExtendOperandForOther(
4108  Instruction *Ext, TypePromotionTransaction &TPT,
4109  InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4111  SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4112  return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
4113  Exts, Truncs, TLI, true);
4114  }
4115 
4116  /// \see promoteOperandForOther.
4117  static Value *zeroExtendOperandForOther(
4118  Instruction *Ext, TypePromotionTransaction &TPT,
4119  InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4121  SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4122  return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
4123  Exts, Truncs, TLI, false);
4124  }
4125 
4126 public:
4127  /// Type for the utility function that promotes the operand of Ext.
4128  using Action = Value *(*)(Instruction *Ext, TypePromotionTransaction &TPT,
4129  InstrToOrigTy &PromotedInsts,
4130  unsigned &CreatedInstsCost,
4133  const TargetLowering &TLI);
4134 
4135  /// Given a sign/zero extend instruction \p Ext, return the appropriate
4136  /// action to promote the operand of \p Ext instead of using Ext.
4137  /// \return NULL if no promotable action is possible with the current
4138  /// sign extension.
4139  /// \p InsertedInsts keeps track of all the instructions inserted by the
4140  /// other CodeGenPrepare optimizations. This information is important
4141  /// because we do not want to promote these instructions as CodeGenPrepare
4142  /// will reinsert them later. Thus creating an infinite loop: create/remove.
4143  /// \p PromotedInsts maps the instructions to their type before promotion.
4144  static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts,
4145  const TargetLowering &TLI,
4146  const InstrToOrigTy &PromotedInsts);
4147 };
4148 
4149 } // end anonymous namespace
4150 
4151 bool TypePromotionHelper::canGetThrough(const Instruction *Inst,
4152  Type *ConsideredExtType,
4153  const InstrToOrigTy &PromotedInsts,
4154  bool IsSExt) {
4155  // The promotion helper does not know how to deal with vector types yet.
4156  // To be able to fix that, we would need to fix the places where we
4157  // statically extend, e.g., constants and such.
4158  if (Inst->getType()->isVectorTy())
4159  return false;
4160 
4161  // We can always get through zext.
4162  if (isa<ZExtInst>(Inst))
4163  return true;
4164 
4165  // sext(sext) is ok too.
4166  if (IsSExt && isa<SExtInst>(Inst))
4167  return true;
4168 
4169  // We can get through binary operator, if it is legal. In other words, the
4170  // binary operator must have a nuw or nsw flag.
4171  const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst);
4172  if (isa_and_nonnull<OverflowingBinaryOperator>(BinOp) &&
4173  ((!IsSExt && BinOp->hasNoUnsignedWrap()) ||
4174  (IsSExt && BinOp->hasNoSignedWrap())))
4175  return true;
4176 
4177  // ext(and(opnd, cst)) --> and(ext(opnd), ext(cst))
4178  if ((Inst->getOpcode() == Instruction::And ||
4179  Inst->getOpcode() == Instruction::Or))
4180  return true;
4181 
4182  // ext(xor(opnd, cst)) --> xor(ext(opnd), ext(cst))
4183  if (Inst->getOpcode() == Instruction::Xor) {
4184  const ConstantInt *Cst = dyn_cast<ConstantInt>(Inst->getOperand(1));
4185  // Make sure it is not a NOT.
4186  if (Cst && !Cst->getValue().isAllOnes())
4187  return true;
4188  }
4189 
4190  // zext(shrl(opnd, cst)) --> shrl(zext(opnd), zext(cst))
4191  // It may change a poisoned value into a regular value, like
4192  // zext i32 (shrl i8 %val, 12) --> shrl i32 (zext i8 %val), 12
4193  // poisoned value regular value
4194  // It should be OK since undef covers valid value.
4195  if (Inst->getOpcode() == Instruction::LShr && !IsSExt)
4196  return true;
4197 
4198  // and(ext(shl(opnd, cst)), cst) --> and(shl(ext(opnd), ext(cst)), cst)
4199  // It may change a poisoned value into a regular value, like
4200  // zext i32 (shl i8 %val, 12) --> shl i32 (zext i8 %val), 12
4201  // poisoned value regular value
4202  // It should be OK since undef covers valid value.
4203  if (Inst->getOpcode() == Instruction::Shl && Inst->hasOneUse()) {
4204  const auto *ExtInst = cast<const Instruction>(*Inst->user_begin());
4205  if (ExtInst->hasOneUse()) {
4206  const auto *AndInst = dyn_cast<const Instruction>(*ExtInst->user_begin());
4207  if (AndInst && AndInst->getOpcode() == Instruction::And) {
4208  const auto *Cst = dyn_cast<ConstantInt>(AndInst->getOperand(1));
4209  if (Cst &&
4210  Cst->getValue().isIntN(Inst->getType()->getIntegerBitWidth()))
4211  return true;
4212  }
4213  }
4214  }
4215 
4216  // Check if we can do the following simplification.
4217  // ext(trunc(opnd)) --> ext(opnd)
4218  if (!isa<TruncInst>(Inst))
4219  return false;
4220 
4221  Value *OpndVal = Inst->getOperand(0);
4222  // Check if we can use this operand in the extension.
4223  // If the type is larger than the result type of the extension, we cannot.
4224  if (!OpndVal->getType()->isIntegerTy() ||
4225  OpndVal->getType()->getIntegerBitWidth() >
4226  ConsideredExtType->getIntegerBitWidth())
4227  return false;
4228 
4229  // If the operand of the truncate is not an instruction, we will not have
4230  // any information on the dropped bits.
4231  // (Actually we could for constant but it is not worth the extra logic).
4232  Instruction *Opnd = dyn_cast<Instruction>(OpndVal);
4233  if (!Opnd)
4234  return false;
4235 
4236  // Check if the source of the type is narrow enough.
4237  // I.e., check that trunc just drops extended bits of the same kind of
4238  // the extension.
4239  // #1 get the type of the operand and check the kind of the extended bits.
4240  const Type *OpndType = getOrigType(PromotedInsts, Opnd, IsSExt);
4241  if (OpndType)
4242  ;
4243  else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd)))
4244  OpndType = Opnd->getOperand(0)->getType();
4245  else
4246  return false;
4247 
4248  // #2 check that the truncate just drops extended bits.
4249  return Inst->getType()->getIntegerBitWidth() >=
4250  OpndType->getIntegerBitWidth();
4251 }
4252 
4253 TypePromotionHelper::Action TypePromotionHelper::getAction(
4254  Instruction *Ext, const SetOfInstrs &InsertedInsts,
4255  const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) {
4256  assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
4257  "Unexpected instruction type");
4258  Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0));
4259  Type *ExtTy = Ext->getType();
4260  bool IsSExt = isa<SExtInst>(Ext);
4261  // If the operand of the extension is not an instruction, we cannot
4262  // get through.
4263  // If it, check we can get through.
4264  if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt))
4265  return nullptr;
4266 
4267  // Do not promote if the operand has been added by codegenprepare.
4268  // Otherwise, it means we are undoing an optimization that is likely to be
4269  // redone, thus causing potential infinite loop.
4270  if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd))
4271  return nullptr;
4272 
4273  // SExt or Trunc instructions.
4274  // Return the related handler.
4275  if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) ||
4276  isa<ZExtInst>(ExtOpnd))
4277  return promoteOperandForTruncAndAnyExt;
4278 
4279  // Regular instruction.
4280  // Abort early if we will have to insert non-free instructions.
4281  if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType()))
4282  return nullptr;
4283  return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther;
4284 }
4285 
4286 Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt(
4287  Instruction *SExt, TypePromotionTransaction &TPT,
4288  InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4290  SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4291  // By construction, the operand of SExt is an instruction. Otherwise we cannot
4292  // get through it and this method should not be called.
4293  Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0));
4294  Value *ExtVal = SExt;
4295  bool HasMergedNonFreeExt = false;
4296  if (isa<ZExtInst>(SExtOpnd)) {
4297  // Replace s|zext(zext(opnd))
4298  // => zext(opnd).
4299  HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd);
4300  Value *ZExt =
4301  TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType());
4302  TPT.replaceAllUsesWith(SExt, ZExt);
4303  TPT.eraseInstruction(SExt);
4304  ExtVal = ZExt;
4305  } else {
4306  // Replace z|sext(trunc(opnd)) or sext(sext(opnd))
4307  // => z|sext(opnd).
4308  TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0));
4309  }
4310  CreatedInstsCost = 0;
4311 
4312  // Remove dead code.
4313  if (SExtOpnd->use_empty())
4314  TPT.eraseInstruction(SExtOpnd);
4315 
4316  // Check if the extension is still needed.
4317  Instruction *ExtInst = dyn_cast<Instruction>(ExtVal);
4318  if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) {
4319  if (ExtInst) {
4320  if (Exts)
4321  Exts->push_back(ExtInst);
4322  CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt;
4323  }
4324  return ExtVal;
4325  }
4326 
4327  // At this point we have: ext ty opnd to ty.
4328  // Reassign the uses of ExtInst to the opnd and remove ExtInst.
4329  Value *NextVal = ExtInst->getOperand(0);
4330  TPT.eraseInstruction(ExtInst, NextVal);
4331  return NextVal;
4332 }
4333 
4334 Value *TypePromotionHelper::promoteOperandForOther(
4335  Instruction *Ext, TypePromotionTransaction &TPT,
4336  InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4338  SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI,
4339  bool IsSExt) {
4340  // By construction, the operand of Ext is an instruction. Otherwise we cannot
4341  // get through it and this method should not be called.
4342  Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0));
4343  CreatedInstsCost = 0;
4344  if (!ExtOpnd->hasOneUse()) {
4345  // ExtOpnd will be promoted.
4346  // All its uses, but Ext, will need to use a truncated value of the
4347  // promoted version.
4348  // Create the truncate now.
4349  Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType());
4350  if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) {
4351  // Insert it just after the definition.
4352  ITrunc->moveAfter(ExtOpnd);
4353  if (Truncs)
4354  Truncs->push_back(ITrunc);
4355  }
4356 
4357  TPT.replaceAllUsesWith(ExtOpnd, Trunc);
4358  // Restore the operand of Ext (which has been replaced by the previous call
4359  // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext.
4360  TPT.setOperand(Ext, 0, ExtOpnd);
4361  }
4362 
4363  // Get through the Instruction:
4364  // 1. Update its type.
4365  // 2. Replace the uses of Ext by Inst.
4366  // 3. Extend each operand that needs to be extended.
4367 
4368  // Remember the original type of the instruction before promotion.
4369  // This is useful to know that the high bits are sign extended bits.
4370  addPromotedInst(PromotedInsts, ExtOpnd, IsSExt);
4371  // Step #1.
4372  TPT.mutateType(ExtOpnd, Ext->getType());
4373  // Step #2.
4374  TPT.replaceAllUsesWith(Ext, ExtOpnd);
4375  // Step #3.
4376  Instruction *ExtForOpnd = Ext;
4377 
4378  LLVM_DEBUG(dbgs() << "Propagate Ext to operands\n");
4379  for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx;
4380  ++OpIdx) {
4381  LLVM_DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n');
4382  if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() ||
4383  !shouldExtOperand(ExtOpnd, OpIdx)) {
4384  LLVM_DEBUG(dbgs() << "No need to propagate\n");
4385  continue;
4386  }
4387  // Check if we can statically extend the operand.
4388  Value *Opnd = ExtOpnd->getOperand(OpIdx);
4389  if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) {
4390  LLVM_DEBUG(dbgs() << "Statically extend\n");
4391  unsigned BitWidth = Ext->getType()->getIntegerBitWidth();
4392  APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth)
4393  : Cst->getValue().zext(BitWidth);
4394  TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal));
4395  continue;
4396  }
4397  // UndefValue are typed, so we have to statically sign extend them.
4398  if (isa<UndefValue>(Opnd)) {
4399  LLVM_DEBUG(dbgs() << "Statically extend\n");
4400  TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType()));
4401  continue;
4402  }
4403 
4404  // Otherwise we have to explicitly sign extend the operand.
4405  // Check if Ext was reused to extend an operand.
4406  if (!ExtForOpnd) {
4407  // If yes, create a new one.
4408  LLVM_DEBUG(dbgs() << "More operands to ext\n");
4409  Value *ValForExtOpnd = IsSExt ? TPT.createSExt(Ext, Opnd, Ext->getType())
4410  : TPT.createZExt(Ext, Opnd, Ext->getType());
4411  if (!isa<Instruction>(ValForExtOpnd)) {
4412  TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd);
4413  continue;
4414  }
4415  ExtForOpnd = cast<Instruction>(ValForExtOpnd);
4416  }
4417  if (Exts)
4418  Exts->push_back(ExtForOpnd);
4419  TPT.setOperand(ExtForOpnd, 0, Opnd);
4420 
4421  // Move the sign extension before the insertion point.
4422  TPT.moveBefore(ExtForOpnd, ExtOpnd);
4423  TPT.setOperand(ExtOpnd, OpIdx, ExtForOpnd);
4424  CreatedInstsCost += !TLI.isExtFree(ExtForOpnd);
4425  // If more sext are required, new instructions will have to be created.
4426  ExtForOpnd = nullptr;
4427  }
4428  if (ExtForOpnd == Ext) {
4429  LLVM_DEBUG(dbgs() << "Extension is useless now\n");
4430  TPT.eraseInstruction(Ext);
4431  }
4432  return ExtOpnd;
4433 }
4434 
4435 /// Check whether or not promoting an instruction to a wider type is profitable.
4436 /// \p NewCost gives the cost of extension instructions created by the
4437 /// promotion.
4438 /// \p OldCost gives the cost of extension instructions before the promotion
4439 /// plus the number of instructions that have been
4440 /// matched in the addressing mode the promotion.
4441 /// \p PromotedOperand is the value that has been promoted.
4442 /// \return True if the promotion is profitable, false otherwise.
4443 bool AddressingModeMatcher::isPromotionProfitable(
4444  unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const {
4445  LLVM_DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost
4446  << '\n');
4447  // The cost of the new extensions is greater than the cost of the
4448  // old extension plus what we folded.
4449  // This is not profitable.
4450  if (NewCost > OldCost)
4451  return false;
4452  if (NewCost < OldCost)
4453  return true;
4454  // The promotion is neutral but it may help folding the sign extension in
4455  // loads for instance.
4456  // Check that we did not create an illegal instruction.
4457  return isPromotedInstructionLegal(TLI, DL, PromotedOperand);
4458 }
4459 
4460 /// Given an instruction or constant expr, see if we can fold the operation
4461 /// into the addressing mode. If so, update the addressing mode and return
4462 /// true, otherwise return false without modifying AddrMode.
4463 /// If \p MovedAway is not NULL, it contains the information of whether or
4464 /// not AddrInst has to be folded into the addressing mode on success.
4465 /// If \p MovedAway == true, \p AddrInst will not be part of the addressing
4466 /// because it has been moved away.
4467 /// Thus AddrInst must not be added in the matched instructions.
4468 /// This state can happen when AddrInst is a sext, since it may be moved away.
4469 /// Therefore, AddrInst may not be valid when MovedAway is true and it must
4470 /// not be referenced anymore.
4471 bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode,
4472  unsigned Depth,
4473  bool *MovedAway) {
4474  // Avoid exponential behavior on extremely deep expression trees.
4475  if (Depth >= 5) return false;
4476 
4477  // By default, all matched instructions stay in place.
4478  if (MovedAway)
4479  *MovedAway = false;
4480 
4481  switch (Opcode) {
4482  case Instruction::PtrToInt:
4483  // PtrToInt is always a noop, as we know that the int type is pointer sized.
4484  return matchAddr(AddrInst->getOperand(0), Depth);
4485  case Instruction::IntToPtr: {
4486  auto AS = AddrInst->getType()->getPointerAddressSpace();
4487  auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
4488  // This inttoptr is a no-op if the integer type is pointer sized.
4489  if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy)
4490  return matchAddr(AddrInst->getOperand(0), Depth);
4491  return false;
4492  }
4493  case Instruction::BitCast:
4494  // BitCast is always a noop, and we can handle it as long as it is
4495  // int->int or pointer->pointer (we don't want int<->fp or something).
4496  if (AddrInst->getOperand(0)->getType()->isIntOrPtrTy() &&
4497  // Don't touch identity bitcasts. These were probably put here by LSR,
4498  // and we don't want to mess around with them. Assume it knows what it
4499  // is doing.
4500  AddrInst->getOperand(0)->getType() != AddrInst->getType())
4501  return matchAddr(AddrInst->getOperand(0), Depth);
4502  return false;
4503  case Instruction::AddrSpaceCast: {
4504  unsigned SrcAS
4505  = AddrInst->getOperand(0)->getType()->getPointerAddressSpace();
4506  unsigned DestAS = AddrInst->getType()->getPointerAddressSpace();
4507  if (TLI.getTargetMachine().isNoopAddrSpaceCast(SrcAS, DestAS))
4508  return matchAddr(AddrInst->getOperand(0), Depth);
4509  return false;
4510  }
4511  case Instruction::Add: {
4512  // Check to see if we can merge in the RHS then the LHS. If so, we win.
4513  ExtAddrMode BackupAddrMode = AddrMode;
4514  unsigned OldSize = AddrModeInsts.size();
4515  // Start a transaction at this point.
4516  // The LHS may match but not the RHS.
4517  // Therefore, we need a higher level restoration point to undo partially
4518  // matched operation.
4519  TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4520  TPT.getRestorationPoint();
4521 
4522  AddrMode.InBounds = false;
4523  if (matchAddr(AddrInst->getOperand(1), Depth+1) &&
4524  matchAddr(AddrInst->getOperand(0), Depth+1))
4525  return true;
4526 
4527  // Restore the old addr mode info.
4528  AddrMode = BackupAddrMode;
4529  AddrModeInsts.resize(OldSize);
4530  TPT.rollback(LastKnownGood);
4531 
4532  // Otherwise this was over-aggressive. Try merging in the LHS then the RHS.
4533  if (matchAddr(AddrInst->getOperand(0), Depth+1) &&
4534  matchAddr(AddrInst->getOperand(1), Depth+1))
4535  return true;
4536 
4537  // Otherwise we definitely can't merge the ADD in.
4538  AddrMode = BackupAddrMode;
4539  AddrModeInsts.resize(OldSize);
4540  TPT.rollback(LastKnownGood);
4541  break;
4542  }
4543  //case Instruction::Or:
4544  // TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
4545  //break;
4546  case Instruction::Mul:
4547  case Instruction::Shl: {
4548  // Can only handle X*C and X << C.
4549  AddrMode.InBounds = false;
4550  ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1));
4551  if (!RHS || RHS->getBitWidth() > 64)
4552  return false;
4553  int64_t Scale = RHS->getSExtValue();
4554  if (Opcode == Instruction::Shl)
4555  Scale = 1LL << Scale;
4556 
4557  return matchScaledValue(AddrInst->getOperand(0), Scale, Depth);
4558  }
4559  case Instruction::GetElementPtr: {
4560  // Scan the GEP. We check it if it contains constant offsets and at most
4561  // one variable offset.
4562  int VariableOperand = -1;
4563  unsigned VariableScale = 0;
4564 
4565  int64_t ConstantOffset = 0;
4566  gep_type_iterator GTI = gep_type_begin(AddrInst);
4567  for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
4568  if (StructType *STy = GTI.getStructTypeOrNull()) {
4569  const StructLayout *SL = DL.getStructLayout(STy);
4570  unsigned Idx =
4571  cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
4572  ConstantOffset += SL->getElementOffset(Idx);
4573  } else {
4574  TypeSize TS = DL.getTypeAllocSize(GTI.getIndexedType());
4575  if (TS.isNonZero()) {
4576  // The optimisations below currently only work for fixed offsets.
4577  if (TS.isScalable())
4578  return false;
4579  int64_t TypeSize = TS.getFixedSize();
4580  if (ConstantInt *CI =
4581  dyn_cast<ConstantInt>(AddrInst->getOperand(i))) {
4582  const APInt &CVal = CI->getValue();
4583  if (CVal.getMinSignedBits() <= 64) {
4584  ConstantOffset += CVal.getSExtValue() * TypeSize;
4585  continue;
4586  }
4587  }
4588  // We only allow one variable index at the moment.
4589  if (VariableOperand != -1)
4590  return false;
4591 
4592  // Remember the variable index.
4593  VariableOperand = i;
4594  VariableScale = TypeSize;
4595  }
4596  }
4597  }
4598 
4599  // A common case is for the GEP to only do a constant offset. In this case,
4600  // just add it to the disp field and check validity.
4601  if (VariableOperand == -1) {
4602  AddrMode.BaseOffs += ConstantOffset;
4603  if (ConstantOffset == 0 ||
4604  TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace)) {
4605  // Check to see if we can fold the base pointer in too.
4606  if (matchAddr(AddrInst->getOperand(0), Depth+1)) {
4607  if (!cast<GEPOperator>(AddrInst)->isInBounds())
4608  AddrMode.InBounds = false;
4609  return true;
4610  }
4611  } else if (EnableGEPOffsetSplit && isa<GetElementPtrInst>(AddrInst) &&
4612  TLI.shouldConsiderGEPOffsetSplit() && Depth == 0 &&
4613  ConstantOffset > 0) {
4614  // Record GEPs with non-zero offsets as candidates for splitting in the
4615  // event that the offset cannot fit into the r+i addressing mode.
4616  // Simple and common case that only one GEP is used in calculating the
4617  // address for the memory access.
4618  Value *Base = AddrInst->getOperand(0);
4619  auto *BaseI = dyn_cast<Instruction>(Base);
4620  auto *GEP = cast<GetElementPtrInst>(AddrInst);
4621  if (isa<Argument>(Base) || isa<GlobalValue>(Base) ||
4622  (BaseI && !isa<CastInst>(BaseI) &&
4623  !isa<GetElementPtrInst>(BaseI))) {
4624  // Make sure the parent block allows inserting non-PHI instructions
4625  // before the terminator.
4626  BasicBlock *Parent =
4627  BaseI ? BaseI->getParent() : &GEP->getFunction()->getEntryBlock();
4628  if (!Parent->getTerminator()->isEHPad())
4629  LargeOffsetGEP = std::make_pair(GEP, ConstantOffset);
4630  }
4631  }
4632  AddrMode.BaseOffs -= ConstantOffset;
4633  return false;
4634  }
4635 
4636  // Save the valid addressing mode in case we can't match.
4637  ExtAddrMode BackupAddrMode = AddrMode;
4638  unsigned OldSize = AddrModeInsts.size();
4639 
4640  // See if the scale and offset amount is valid for this target.
4641  AddrMode.BaseOffs += ConstantOffset;
4642  if (!cast<GEPOperator>(AddrInst)->isInBounds())
4643  AddrMode.InBounds = false;
4644 
4645  // Match the base operand of the GEP.
4646  if (!matchAddr(AddrInst->getOperand(0), Depth+1)) {
4647  // If it couldn't be matched, just stuff the value in a register.
4648  if (AddrMode.HasBaseReg) {
4649  AddrMode = BackupAddrMode;
4650  AddrModeInsts.resize(OldSize);
4651  return false;
4652  }
4653  AddrMode.HasBaseReg = true;
4654  AddrMode.BaseReg = AddrInst->getOperand(0);
4655  }
4656 
4657  // Match the remaining variable portion of the GEP.
4658  if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale,
4659  Depth)) {
4660  // If it couldn't be matched, try stuffing the base into a register
4661  // instead of matching it, and retrying the match of the scale.
4662  AddrMode = BackupAddrMode;
4663  AddrModeInsts.resize(OldSize);
4664  if (AddrMode.HasBaseReg)
4665  return false;
4666  AddrMode.HasBaseReg = true;
4667  AddrMode.BaseReg = AddrInst->getOperand(0);
4668  AddrMode.BaseOffs += ConstantOffset;
4669  if (!matchScaledValue(AddrInst->getOperand(VariableOperand),
4670  VariableScale, Depth)) {
4671  // If even that didn't work, bail.
4672  AddrMode = BackupAddrMode;
4673  AddrModeInsts.resize(OldSize);
4674  return false;
4675  }
4676  }
4677 
4678  return true;
4679  }
4680  case Instruction::SExt:
4681  case Instruction::ZExt: {
4682  Instruction *Ext = dyn_cast<Instruction>(AddrInst);
4683  if (!Ext)
4684  return false;
4685 
4686  // Try to move this ext out of the way of the addressing mode.
4687  // Ask for a method for doing so.
4688  TypePromotionHelper::Action TPH =
4689  TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts);
4690  if (!TPH)
4691  return false;
4692 
4693  TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4694  TPT.getRestorationPoint();
4695  unsigned CreatedInstsCost = 0;
4696  unsigned ExtCost = !TLI.isExtFree(Ext);
4697  Value *PromotedOperand =
4698  TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI);
4699  // SExt has been moved away.
4700  // Thus either it will be rematched later in the recursive calls or it is
4701  // gone. Anyway, we must not fold it into the addressing mode at this point.
4702  // E.g.,
4703  // op = add opnd, 1
4704  // idx = ext op
4705  // addr = gep base, idx
4706  // is now:
4707  // promotedOpnd = ext opnd <- no match here
4708  // op = promoted_add promotedOpnd, 1 <- match (later in recursive calls)
4709  // addr = gep base, op <- match
4710  if (MovedAway)
4711  *MovedAway = true;
4712 
4713  assert(PromotedOperand &&
4714  "TypePromotionHelper should have filtered out those cases");
4715 
4716  ExtAddrMode BackupAddrMode = AddrMode;
4717  unsigned OldSize = AddrModeInsts.size();
4718 
4719  if (!matchAddr(PromotedOperand, Depth) ||
4720  // The total of the new cost is equal to the cost of the created
4721  // instructions.
4722  // The total of the old cost is equal to the cost of the extension plus
4723  // what we have saved in the addressing mode.
4724  !isPromotionProfitable(CreatedInstsCost,
4725  ExtCost + (AddrModeInsts.size() - OldSize),
4726  PromotedOperand)) {
4727  AddrMode = BackupAddrMode;
4728  AddrModeInsts.resize(OldSize);
4729  LLVM_DEBUG(dbgs() << "Sign extension does not pay off: rollback\n");
4730  TPT.rollback(LastKnownGood);
4731  return false;
4732  }
4733  return true;
4734  }
4735  }
4736  return false;
4737 }
4738 
4739 /// If we can, try to add the value of 'Addr' into the current addressing mode.
4740 /// If Addr can't be added to AddrMode this returns false and leaves AddrMode
4741 /// unmodified. This assumes that Addr is either a pointer type or intptr_t
4742 /// for the target.
4743 ///
4744 bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) {
4745  // Start a transaction at this point that we will rollback if the matching
4746  // fails.
4747  TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4748  TPT.getRestorationPoint();
4749  if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) {
4750  if (CI->getValue().isSignedIntN(64)) {
4751  // Fold in immediates if legal for the target.
4752  AddrMode.BaseOffs += CI->getSExtValue();
4753  if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
4754  return true;
4755  AddrMode.BaseOffs -= CI->getSExtValue();
4756  }
4757  } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
4758  // If this is a global variable, try to fold it into the addressing mode.
4759  if (!AddrMode.BaseGV) {
4760  AddrMode.BaseGV = GV;
4761  if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
4762  return true;
4763  AddrMode.BaseGV = nullptr;
4764  }
4765  } else if (Instruction *I = dyn_cast<Instruction>(Addr)) {
4766  ExtAddrMode BackupAddrMode = AddrMode;
4767  unsigned OldSize = AddrModeInsts.size();
4768 
4769  // Check to see if it is possible to fold this operation.
4770  bool MovedAway = false;
4771  if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) {
4772  // This instruction may have been moved away. If so, there is nothing
4773  // to check here.
4774  if (MovedAway)
4775  return true;
4776  // Okay, it's possible to fold this. Check to see if it is actually
4777  // *profitable* to do so. We use a simple cost model to avoid increasing
4778  // register pressure too much.
4779  if (I->hasOneUse() ||
4780  isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) {
4781  AddrModeInsts.push_back(I);
4782  return true;
4783  }
4784 
4785  // It isn't profitable to do this, roll back.
4786  //cerr << "NOT FOLDING: " << *I;
4787  AddrMode = BackupAddrMode;
4788  AddrModeInsts.resize(OldSize);
4789  TPT.rollback(LastKnownGood);
4790  }
4791  } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
4792  if (matchOperationAddr(CE, CE->getOpcode(), Depth))
4793  return true;
4794  TPT.rollback(LastKnownGood);
4795  } else if (isa<ConstantPointerNull>(Addr)) {
4796  // Null pointer gets folded without affecting the addressing mode.
4797  return true;
4798  }
4799 
4800  // Worse case, the target should support [reg] addressing modes. :)
4801  if (!AddrMode.HasBaseReg) {
4802  AddrMode.HasBaseReg = true;
4803  AddrMode.BaseReg = Addr;
4804  // Still check for legality in case the target supports [imm] but not [i+r].
4805  if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
4806  return true;
4807  AddrMode.HasBaseReg = false;
4808  AddrMode.BaseReg = nullptr;
4809  }
4810 
4811  // If the base register is already taken, see if we can do [r+r].
4812  if (AddrMode.Scale == 0) {
4813  AddrMode.Scale = 1;
4814  AddrMode.ScaledReg = Addr;
4815  if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
4816  return true;
4817  AddrMode.Scale = 0;
4818  AddrMode.ScaledReg = nullptr;
4819  }
4820  // Couldn't match.
4821  TPT.rollback(LastKnownGood);
4822  return false;
4823 }
4824 
4825 /// Check to see if all uses of OpVal by the specified inline asm call are due
4826 /// to memory operands. If so, return true, otherwise return false.
4827 static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
4828  const TargetLowering &TLI,
4829  const TargetRegisterInfo &TRI) {
4830  const Function *F = CI->getFunction();
4831  TargetLowering::AsmOperandInfoVector TargetConstraints =
4832  TLI.ParseConstraints(F->getParent()->getDataLayout(), &TRI, *CI);
4833 
4834  for (TargetLowering::AsmOperandInfo &OpInfo : TargetConstraints) {
4835  // Compute the constraint code and ConstraintType to use.
4836  TLI.ComputeConstraintToUse(OpInfo, SDValue());
4837 
4838  // If this asm operand is our Value*, and if it isn't an indirect memory
4839  // operand, we can't fold it!
4840  if (OpInfo.CallOperandVal == OpVal &&
4841  (OpInfo.ConstraintType != TargetLowering::C_Memory ||
4842  !OpInfo.isIndirect))
4843  return false;
4844  }
4845 
4846  return true;
4847 }
4848 
4849 // Max number of memory uses to look at before aborting the search to conserve
4850 // compile time.
4851 static constexpr int MaxMemoryUsesToScan = 20;
4852 
4853 /// Recursively walk all the uses of I until we find a memory use.
4854 /// If we find an obviously non-foldable instruction, return true.
4855 /// Add accessed addresses and types to MemoryUses.
4856 static bool FindAllMemoryUses(
4857  Instruction *I, SmallVectorImpl<std::pair<Value *, Type *>> &MemoryUses,
4858  SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI,
4859  const TargetRegisterInfo &TRI, bool OptSize, ProfileSummaryInfo *PSI,
4860  BlockFrequencyInfo *BFI, int SeenInsts = 0) {
4861  // If we already considered this instruction, we're done.
4862  if (!ConsideredInsts.insert(I).second)
4863  return false;
4864 
4865  // If this is an obviously unfoldable instruction, bail out.
4866  if (!MightBeFoldableInst(I))
4867  return true;
4868 
4869  // Loop over all the uses, recursively processing them.
4870  for (Use &U : I->uses()) {
4871  // Conservatively return true if we're seeing a large number or a deep chain
4872  // of users. This avoids excessive compilation times in pathological cases.
4873  if (SeenInsts++ >= MaxMemoryUsesToScan)
4874  return true;
4875 
4876  Instruction *UserI = cast<Instruction>(U.getUser());
4877  if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) {
4878  MemoryUses.push_back({U.get(), LI->getType()});
4879  continue;
4880  }
4881 
4882  if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
4883  if (U.getOperandNo() != StoreInst::getPointerOperandIndex())
4884  return true; // Storing addr, not into addr.
4885  MemoryUses.push_back({U.get(), SI->getValueOperand()->getType()});
4886  continue;
4887  }
4888 
4889  if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) {
4890  if (U.getOperandNo() != AtomicRMWInst::getPointerOperandIndex())
4891  return true; // Storing addr, not into addr.
4892  MemoryUses.push_back({U.get(), RMW->getValOperand()->getType()});
4893  continue;
4894  }
4895 
4896  if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) {
4897  if (U.getOperandNo() != AtomicCmpXchgInst::getPointerOperandIndex())
4898  return true; // Storing addr, not into addr.
4899  MemoryUses.push_back({U.get(), CmpX->getCompareOperand()->getType()});
4900  continue;
4901  }
4902 
4903  if (CallInst *CI = dyn_cast<CallInst>(UserI)) {
4904  if (CI->hasFnAttr(Attribute::Cold)) {
4905  // If this is a cold call, we can sink the addressing calculation into
4906  // the cold path. See optimizeCallInst
4907  bool OptForSize = OptSize ||
4908  llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI);
4909  if (!OptForSize)
4910  continue;
4911  }
4912 
4913  InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledOperand());
4914  if (!IA) return true;
4915 
4916  // If this is a memory operand, we're cool, otherwise bail out.
4917  if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI))
4918  return true;
4919  continue;
4920  }
4921 
4922  if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI, OptSize,
4923  PSI, BFI, SeenInsts))
4924  return true;
4925  }
4926 
4927  return false;
4928 }
4929 
4930 /// Return true if Val is already known to be live at the use site that we're
4931 /// folding it into. If so, there is no cost to include it in the addressing
4932 /// mode. KnownLive1 and KnownLive2 are two values that we know are live at the
4933 /// instruction already.
4934 bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,Value *KnownLive1,
4935  Value *KnownLive2) {
4936  // If Val is either of the known-live values, we know it is live!
4937  if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2)
4938  return true;
4939 
4940  // All values other than instructions and arguments (e.g. constants) are live.
4941  if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true;
4942 
4943  // If Val is a constant sized alloca in the entry block, it is live, this is
4944  // true because it is just a reference to the stack/frame pointer, which is
4945  // live for the whole function.
4946  if (AllocaInst *AI = dyn_cast<AllocaInst>(Val))
4947  if (AI->isStaticAlloca())
4948  return true;
4949 
4950  // Check to see if this value is already used in the memory instruction's
4951  // block. If so, it's already live into the block at the very least, so we
4952  // can reasonably fold it.
4953  return Val->isUsedInBasicBlock(MemoryInst->getParent());
4954 }
4955 
4956 /// It is possible for the addressing mode of the machine to fold the specified
4957 /// instruction into a load or store that ultimately uses it.
4958 /// However, the specified instruction has multiple uses.
4959 /// Given this, it may actually increase register pressure to fold it
4960 /// into the load. For example, consider this code:
4961 ///
4962 /// X = ...
4963 /// Y = X+1
4964 /// use(Y) -> nonload/store
4965 /// Z = Y+1
4966 /// load Z
4967 ///
4968 /// In this case, Y has multiple uses, and can be folded into the load of Z
4969 /// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to
4970 /// be live at the use(Y) line. If we don't fold Y into load Z, we use one
4971 /// fewer register. Since Y can't be folded into "use(Y)" we don't increase the
4972 /// number of computations either.
4973 ///
4974 /// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If
4975 /// X was live across 'load Z' for other reasons, we actually *would* want to
4976 /// fold the addressing mode in the Z case. This would make Y die earlier.
4977 bool AddressingModeMatcher::
4978 isProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
4979  ExtAddrMode &AMAfter) {
4980  if (IgnoreProfitability) return true;
4981 
4982  // AMBefore is the addressing mode before this instruction was folded into it,
4983  // and AMAfter is the addressing mode after the instruction was folded. Get
4984  // the set of registers referenced by AMAfter and subtract out those
4985  // referenced by AMBefore: this is the set of values which folding in this
4986  // address extends the lifetime of.
4987  //
4988  // Note that there are only two potential values being referenced here,
4989  // BaseReg and ScaleReg (global addresses are always available, as are any
4990  // folded immediates).
4991  Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg;
4992 
4993  // If the BaseReg or ScaledReg was referenced by the previous addrmode, their
4994  // lifetime wasn't extended by adding this instruction.
4995  if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
4996  BaseReg = nullptr;
4997  if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
4998  ScaledReg = nullptr;
4999 
5000  // If folding this instruction (and it's subexprs) didn't extend any live
5001  // ranges, we're ok with it.
5002  if (!BaseReg && !ScaledReg)
5003  return true;
5004 
5005  // If all uses of this instruction can have the address mode sunk into them,
5006  // we can remove the addressing mode and effectively trade one live register
5007  // for another (at worst.) In this context, folding an addressing mode into
5008  // the use is just a particularly nice way of sinking it.
5010  SmallPtrSet<Instruction*, 16> ConsideredInsts;
5011  if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI, OptSize,
5012  PSI, BFI))
5013  return false; // Has a non-memory, non-foldable use!
5014 
5015  // Now that we know that all uses of this instruction are part of a chain of
5016  // computation involving only operations that could theoretically be folded
5017  // into a memory use, loop over each of these memory operation uses and see
5018  // if they could *actually* fold the instruction. The assumption is that
5019  // addressing modes are cheap and that duplicating the computation involved
5020  // many times is worthwhile, even on a fastpath. For sinking candidates
5021  // (i.e. cold call sites), this serves as a way to prevent excessive code
5022  // growth since most architectures have some reasonable small and fast way to
5023  // compute an effective address. (i.e LEA on x86)
5024  SmallVector<Instruction*, 32> MatchedAddrModeInsts;
5025  for (const std::pair<Value *, Type *> &Pair : MemoryUses) {
5026  Value *Address = Pair.first;
5027  Type *AddressAccessTy = Pair.second;
5028  unsigned AS = Address->getType()->getPointerAddressSpace();
5029 
5030  // Do a match against the root of this address, ignoring profitability. This
5031  // will tell us if the addressing mode for the memory operation will
5032  // *actually* cover the shared instruction.
5034  std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr,
5035  0);
5036  TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5037  TPT.getRestorationPoint();
5038  AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, TRI, LI, getDTFn,
5039  AddressAccessTy, AS, MemoryInst, Result,
5040  InsertedInsts, PromotedInsts, TPT,
5041  LargeOffsetGEP, OptSize, PSI, BFI);
5042  Matcher.IgnoreProfitability = true;
5043  bool Success = Matcher.matchAddr(Address, 0);
5044  (void)Success; assert(Success && "Couldn't select *anything*?");
5045 
5046  // The match was to check the profitability, the changes made are not
5047  // part of the original matcher. Therefore, they should be dropped
5048  // otherwise the original matcher will not present the right state.
5049  TPT.rollback(LastKnownGood);
5050 
5051  // If the match didn't cover I, then it won't be shared by it.
5052  if (!is_contained(MatchedAddrModeInsts, I))
5053  return false;
5054 
5055  MatchedAddrModeInsts.clear();
5056  }
5057 
5058  return true;
5059 }
5060 
5061 /// Return true if the specified values are defined in a
5062 /// different basic block than BB.
5063 static bool IsNonLocalValue(Value *V, BasicBlock *BB) {
5064  if (Instruction *I = dyn_cast<Instruction>(V))
5065  return I->getParent() != BB;
5066  return false;
5067 }
5068 
5069 /// Sink addressing mode computation immediate before MemoryInst if doing so
5070 /// can be done without increasing register pressure. The need for the
5071 /// register pressure constraint means this can end up being an all or nothing
5072 /// decision for all uses of the same addressing computation.
5073 ///
5074 /// Load and Store Instructions often have addressing modes that can do
5075 /// significant amounts of computation. As such, instruction selection will try
5076 /// to get the load or store to do as much computation as possible for the
5077 /// program. The problem is that isel can only see within a single block. As
5078 /// such, we sink as much legal addressing mode work into the block as possible.
5079 ///
5080 /// This method is used to optimize both load/store and inline asms with memory
5081 /// operands. It's also used to sink addressing computations feeding into cold
5082 /// call sites into their (cold) basic block.
5083 ///
5084 /// The motivation for handling sinking into cold blocks is that doing so can
5085 /// both enable other address mode sinking (by satisfying the register pressure
5086 /// constraint above), and reduce register pressure globally (by removing the
5087 /// addressing mode computation from the fast path entirely.).
5088 bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
5089  Type *AccessTy, unsigned AddrSpace) {
5090  Value *Repl = Addr;
5091 
5092  // Try to collapse single-value PHI nodes. This is necessary to undo
5093  // unprofitable PRE transformations.
5094  SmallVector<Value*, 8> worklist;
5095  SmallPtrSet<Value*, 16> Visited;
5096  worklist.push_back(Addr);
5097 
5098  // Use a worklist to iteratively look through PHI and select nodes, and
5099  // ensure that the addressing mode obtained from the non-PHI/select roots of
5100  // the graph are compatible.
5101  bool PhiOrSelectSeen = false;
5102  SmallVector<Instruction*, 16> AddrModeInsts;
5103  const SimplifyQuery SQ(*DL, TLInfo);
5104  AddressingModeCombiner AddrModes(SQ, Addr);
5105  TypePromotionTransaction TPT(RemovedInsts);
5106  TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5107  TPT.getRestorationPoint();
5108  while (!worklist.empty()) {
5109  Value *V = worklist.pop_back_val();
5110 
5111  // We allow traversing cyclic Phi nodes.
5112  // In case of success after this loop we ensure that traversing through
5113  // Phi nodes ends up with all cases to compute address of the form
5114  // BaseGV + Base + Scale * Index + Offset
5115  // where Scale and Offset are constans and BaseGV, Base and Index
5116  // are exactly the same Values in all cases.
5117  // It means that BaseGV, Scale and Offset dominate our memory instruction
5118  // and have the same value as they had in address computation represented
5119  // as Phi. So we can safely sink address computation to memory instruction.
5120  if (!Visited.insert(V).second)
5121  continue;
5122 
5123  // For a PHI node, push all of its incoming values.
5124  if (PHINode *P = dyn_cast<PHINode>(V)) {
5125  append_range(worklist, P->incoming_values());
5126  PhiOrSelectSeen = true;
5127  continue;
5128  }
5129  // Similar for select.
5130  if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
5131  worklist.push_back(SI->getFalseValue());
5132  worklist.push_back(SI->getTrueValue());
5133  PhiOrSelectSeen = true;
5134  continue;
5135  }
5136 
5137  // For non-PHIs, determine the addressing mode being computed. Note that
5138  // the result may differ depending on what other uses our candidate
5139  // addressing instructions might have.
5140  AddrModeInsts.clear();
5141  std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr,
5142  0);
5143  // Defer the query (and possible computation of) the dom tree to point of
5144  // actual use. It's expected that most address matches don't actually need
5145  // the domtree.
5146  auto getDTFn = [MemoryInst, this]() -> const DominatorTree & {
5147  Function *F = MemoryInst->getParent()->getParent();
5148  return this->getDT(*F);
5149  };
5150  ExtAddrMode NewAddrMode = AddressingModeMatcher::Match(
5151  V, AccessTy, AddrSpace, MemoryInst, AddrModeInsts, *TLI, *LI, getDTFn,
5152  *TRI, InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP, OptSize, PSI,
5153  BFI.get());
5154 
5155  GetElementPtrInst *GEP = LargeOffsetGEP.first;
5156  if (GEP && !NewGEPBases.count(GEP)) {
5157  // If splitting the underlying data structure can reduce the offset of a
5158  // GEP, collect the GEP. Skip the GEPs that are the new bases of
5159  // previously split data structures.
5160  LargeOffsetGEPMap[GEP->getPointerOperand()].push_back(LargeOffsetGEP);
5161  if (LargeOffsetGEPID.find(GEP) == LargeOffsetGEPID.end())
5162  LargeOffsetGEPID[GEP] = LargeOffsetGEPID.size();
5163  }
5164 
5165  NewAddrMode.OriginalValue = V;
5166  if (!AddrModes.addNewAddrMode(NewAddrMode))
5167  break;
5168  }
5169 
5170  // Try to combine the AddrModes we've collected. If we couldn't collect any,
5171  // or we have multiple but either couldn't combine them or combining them
5172  // wouldn't do anything useful, bail out now.
5173  if (!AddrModes.combineAddrModes()) {
5174  TPT.rollback(LastKnownGood);
5175  return false;
5176  }
5177  bool Modified = TPT.commit();
5178 
5179  // Get the combined AddrMode (or the only AddrMode, if we only had one).
5180  ExtAddrMode AddrMode = AddrModes.getAddrMode();
5181 
5182  // If all the instructions matched are already in this BB, don't do anything.
5183  // If we saw a Phi node then it is not local definitely, and if we saw a select
5184  // then we want to push the address calculation past it even if it's already
5185  // in this BB.
5186  if (!PhiOrSelectSeen && none_of(AddrModeInsts, [&](Value *V) {
5187  return IsNonLocalValue(V, MemoryInst->getParent());
5188  })) {
5189  LLVM_DEBUG(dbgs() << "CGP: Found local addrmode: " << AddrMode
5190  << "\n");
5191  return Modified;
5192  }
5193 
5194  // Insert this computation right after this user. Since our caller is
5195  // scanning from the top of the BB to the bottom, reuse of the expr are
5196  // guaranteed to happen later.
5197  IRBuilder<> Builder(MemoryInst);
5198 
5199  // Now that we determined the addressing expression we want to use and know
5200  // that we have to sink it into this block. Check to see if we have already
5201  // done this for some other load/store instr in this block. If so, reuse
5202  // the computation. Before attempting reuse, check if the address is valid
5203  // as it may have been erased.
5204 
5205  WeakTrackingVH SunkAddrVH = SunkAddrs[Addr];
5206 
5207  Value * SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr;
5208  if (SunkAddr) {
5209  LLVM_DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode
5210  << " for " << *MemoryInst << "\n");
5211  if (SunkAddr->getType() != Addr->getType())
5212  SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
5214  SubtargetInfo->addrSinkUsingGEPs())) {
5215  // By default, we use the GEP-based method when AA is used later. This
5216  // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities.
5217  LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
5218  << " for " << *MemoryInst << "\n");
5219  Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
5220  Value *ResultPtr = nullptr, *ResultIndex = nullptr;
5221 
5222  // First, find the pointer.
5223  if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) {
5224  ResultPtr = AddrMode.BaseReg;
5225  AddrMode.BaseReg = nullptr;
5226  }
5227 
5228  if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) {
5229  // We can't add more than one pointer together, nor can we scale a
5230  // pointer (both of which seem meaningless).
5231  if (ResultPtr || AddrMode.Scale != 1)
5232  return Modified;
5233 
5234  ResultPtr = AddrMode.ScaledReg;
5235  AddrMode.Scale = 0;
5236  }
5237 
5238  // It is only safe to sign extend the BaseReg if we know that the math
5239  // required to create it did not overflow before we extend it. Since
5240  // the original IR value was tossed in favor of a constant back when
5241  // the AddrMode was created we need to bail out gracefully if widths
5242  // do not match instead of extending it.
5243  //
5244  // (See below for code to add the scale.)
5245  if (AddrMode.Scale) {
5246  Type *ScaledRegTy = AddrMode.ScaledReg->getType();
5247  if (cast<IntegerType>(IntPtrTy)->getBitWidth() >
5248  cast<IntegerType>(ScaledRegTy)->getBitWidth())
5249  return Modified;
5250  }
5251 
5252  if (AddrMode.BaseGV) {
5253  if (ResultPtr)
5254  return Modified;
5255 
5256  ResultPtr = AddrMode.BaseGV;
5257  }
5258 
5259  // If the real base value actually came from an inttoptr, then the matcher
5260  // will look through it and provide only the integer value. In that case,
5261  // use it here.
5262  if (!DL->isNonIntegralPointerType(Addr->getType())) {
5263  if (!ResultPtr && AddrMode.BaseReg) {
5264  ResultPtr = Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(),
5265  "sunkaddr");
5266  AddrMode.BaseReg = nullptr;
5267  } else if (!ResultPtr && AddrMode.Scale == 1) {
5268  ResultPtr = Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(),
5269  "sunkaddr");
5270  AddrMode.Scale = 0;
5271  }
5272  }
5273 
5274  if (!ResultPtr &&
5275  !AddrMode.BaseReg && !AddrMode.Scale && !AddrMode.BaseOffs) {
5276  SunkAddr = Constant::getNullValue(Addr->getType());
5277  } else if (!ResultPtr) {
5278  return Modified;
5279  } else {
5280  Type *I8PtrTy =
5281  Builder.getInt8PtrTy(Addr->getType()->getPointerAddressSpace());
5282  Type *I8Ty = Builder.getInt8Ty();
5283 
5284  // Start with the base register. Do this first so that subsequent address
5285  // matching finds it last, which will prevent it from trying to match it
5286  // as the scaled value in case it happens to be a mul. That would be
5287  // problematic if we've sunk a different mul for the scale, because then
5288  // we'd end up sinking both muls.
5289  if (AddrMode.BaseReg) {
5290  Value *V = AddrMode.BaseReg;
5291  if (V->getType() != IntPtrTy)
5292  V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
5293 
5294  ResultIndex = V;
5295  }
5296 
5297  // Add the scale value.
5298  if (AddrMode.Scale) {
5299  Value *V = AddrMode.ScaledReg;
5300  if (V->getType() == IntPtrTy) {
5301  // done.
5302  } else {
5303  assert(cast<IntegerType>(IntPtrTy)->getBitWidth() <
5304  cast<IntegerType>(V->getType())->getBitWidth() &&
5305  "We can't transform if ScaledReg is too narrow");
5306  V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
5307  }
5308 
5309  if (AddrMode.Scale != 1)
5310  V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
5311  "sunkaddr");
5312  if (ResultIndex)
5313  ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr");
5314  else
5315  ResultIndex = V;
5316  }
5317 
5318  // Add in the Base Offset if present.
5319  if (AddrMode.BaseOffs) {
5320  Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
5321  if (ResultIndex) {
5322  // We need to add this separately from the scale above to help with
5323  // SDAG consecutive load/store merging.
5324  if (ResultPtr->getType() != I8PtrTy)
5325  ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy);
5326  ResultPtr =
5327  AddrMode.InBounds
5328  ? Builder.CreateInBoundsGEP(I8Ty, ResultPtr, ResultIndex,
5329  "sunkaddr")
5330  : Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr");
5331  }
5332 
5333  ResultIndex = V;
5334  }
5335 
5336  if (!ResultIndex) {
5337  SunkAddr = ResultPtr;
5338  } else {
5339  if (ResultPtr->getType() != I8PtrTy)
5340  ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy);
5341  SunkAddr =
5342  AddrMode.InBounds
5343  ? Builder.CreateInBoundsGEP(I8Ty, ResultPtr, ResultIndex,
5344  "sunkaddr")
5345  : Builder.CreateGEP(I8Ty, ResultPtr, ResultIndex, "sunkaddr");
5346  }
5347 
5348  if (SunkAddr->getType() != Addr->getType())
5349  SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
5350  }
5351  } else {
5352  // We'd require a ptrtoint/inttoptr down the line, which we can't do for
5353  // non-integral pointers, so in that case bail out now.
5354  Type *BaseTy = AddrMode.BaseReg ? AddrMode.BaseReg->getType() : nullptr;
5355  Type *ScaleTy = AddrMode.Scale ? AddrMode.ScaledReg->getType() : nullptr;
5356  PointerType *BasePtrTy = dyn_cast_or_null<PointerType>(BaseTy);
5357  PointerType *ScalePtrTy = dyn_cast_or_null<PointerType>(ScaleTy);
5358  if (DL->isNonIntegralPointerType(Addr->getType()) ||
5359  (BasePtrTy && DL->isNonIntegralPointerType(BasePtrTy)) ||
5360  (ScalePtrTy && DL->isNonIntegralPointerType(ScalePtrTy)) ||
5361  (AddrMode.BaseGV &&
5362  DL->isNonIntegralPointerType(AddrMode.BaseGV->getType())))
5363  return Modified;
5364 
5365  LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
5366  << " for " << *MemoryInst << "\n");
5367  Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
5368  Value *Result = nullptr;
5369 
5370  // Start with the base register. Do this first so that subsequent address
5371  // matching finds it last, which will prevent it from trying to match it
5372  // as the scaled value in case it happens to be a mul. That would be
5373  // problematic if we've sunk a different mul for the scale, because then
5374  // we'd end up sinking both muls.
5375  if (AddrMode.BaseReg) {
5376  Value *V = AddrMode.BaseReg;
5377  if (V->getType()->isPointerTy())
5378  V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
5379  if (V->getType() != IntPtrTy)
5380  V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
5381  Result = V;
5382  }
5383 
5384  // Add the scale value.
5385  if (AddrMode.Scale) {
5386  Value *V = AddrMode.ScaledReg;
5387  if (V->getType() == IntPtrTy) {
5388  // done.
5389  } else if (V->getType()->isPointerTy()) {
5390  V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
5391  } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
5392  cast<IntegerType>(V->getType())->getBitWidth()) {
5393  V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
5394  } else {
5395  // It is only safe to sign extend the BaseReg if we know that the math
5396  // required to create it did not overflow before we extend it. Since
5397  // the original IR value was tossed in favor of a constant back when
5398  // the AddrMode was created we need to bail out gracefully if widths
5399  // do not match instead of extending it.
5400  Instruction *I = dyn_cast_or_null<Instruction>(Result);
5401  if (I && (Result != AddrMode.BaseReg))
5402  I->eraseFromParent();
5403  return Modified;
5404  }
5405  if (AddrMode.Scale != 1)
5406  V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
5407  "sunkaddr");
5408  if (Result)
5409  Result = Builder.CreateAdd(Result, V, "sunkaddr");
5410  else
5411  Result = V;
5412  }
5413 
5414  // Add in the BaseGV if present.
5415  if (AddrMode.BaseGV) {
5416  Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr");
5417  if (Result)
5418  Result = Builder.CreateAdd(Result, V, "sunkaddr");